repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
synnick/roboronya | roboronya/plugins/cholify.py | 1 | 1871 | import random
from roboronya.plugins.plugin import Plugin
class Cholificator(Plugin):
description = 'Roboronya will use her *Automated Cholification Algorithm* (Patent Pending) to translate your text to a more sophisticated language.'
name = 'cholify'
@Plugin.requires_args
def run(roboronya, conv, cmd_args, **kwargs):
def _cholify(words):
choloWords = []
for word in words:
choloWord = ''
oldChar = ''
for char in word.lower():
if char == 'y':
choloWord += 'ii'
elif char == 't':
choloWord += 'th'
elif char == 'u' and (oldChar == 'q'):
choloWord += random.choice(['kh', 'k'])
elif (char == 'i' or char == 'e') and oldChar == 'c':
choloWord = choloWord[:-1]
choloWord += random.choice(['s', 'z']) + char
elif char == 'h' and oldChar == 'c':
choloWord = choloWord[:-1]
choloWord += random.choice(['zh', 'sh'])
elif char == 'c':
choloWord += 'k'
elif char == 's':
choloWord += 'z'
elif char == 'v':
choloWord += 'b'
elif char == 'b':
choloWord += 'v'
elif char == 'q':
pass
else:
choloWord += char
oldChar = char
choloWords.append(choloWord)
return choloWords
return roboronya.send_message(
conv,
' '.join(_cholify(cmd_args)),
**kwargs
)
| mit | 2,323,757,040,166,518,300 | 36.42 | 152 | 0.405665 | false |
geeag/kafka | tests/kafkatest/tests/core/reassign_partitions_test.py | 4 | 5559 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
import random
class ReassignPartitionsTest(ProduceConsumeValidateTest):
"""
These tests validate partition reassignment.
Create a topic with few partitions, load some data, trigger partition re-assignment with and without broker failure,
check that partition re-assignment can complete and there is no data loss.
"""
def __init__(self, test_context):
""":type test_context: ducktape.tests.test.TestContext"""
super(ReassignPartitionsTest, self).__init__(test_context=test_context)
self.topic = "test_topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(test_context, num_nodes=4, zk=self.zk, topics={self.topic: {
"partitions": 20,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}
})
self.num_partitions = 20
self.timeout_sec = 60
self.producer_throughput = 1000
self.num_producers = 1
self.num_consumers = 1
def setUp(self):
self.zk.start()
def min_cluster_size(self):
# Override this since we're adding services outside of the constructor
return super(ReassignPartitionsTest, self).min_cluster_size() + self.num_producers + self.num_consumers
def clean_bounce_some_brokers(self):
"""Bounce every other broker"""
for node in self.kafka.nodes[::2]:
self.kafka.restart_node(node, clean_shutdown=True)
def reassign_partitions(self, bounce_brokers):
partition_info = self.kafka.parse_describe_topic(self.kafka.describe_topic(self.topic))
self.logger.debug("Partitions before reassignment:" + str(partition_info))
# jumble partition assignment in dictionary
seed = random.randint(0, 2 ** 31 - 1)
self.logger.debug("Jumble partition assignment with seed " + str(seed))
random.seed(seed)
# The list may still be in order, but that's ok
shuffled_list = range(0, self.num_partitions)
random.shuffle(shuffled_list)
for i in range(0, self.num_partitions):
partition_info["partitions"][i]["partition"] = shuffled_list[i]
self.logger.debug("Jumbled partitions: " + str(partition_info))
# send reassign partitions command
self.kafka.execute_reassign_partitions(partition_info)
if bounce_brokers:
# bounce a few brokers at the same time
self.clean_bounce_some_brokers()
# Wait until finished or timeout
wait_until(lambda: self.kafka.verify_reassign_partitions(partition_info), timeout_sec=self.timeout_sec, backoff_sec=.5)
@parametrize(security_protocol="PLAINTEXT", bounce_brokers=True)
@parametrize(security_protocol="PLAINTEXT", bounce_brokers=False)
def test_reassign_partitions(self, bounce_brokers, security_protocol):
"""Reassign partitions tests.
Setup: 1 zk, 3 kafka nodes, 1 topic with partitions=3, replication-factor=3, and min.insync.replicas=2
- Produce messages in the background
- Consume messages in the background
- Reassign partitions
- If bounce_brokers is True, also bounce a few brokers while partition re-assignment is in progress
- When done reassigning partitions and bouncing brokers, stop producing, and finish consuming
- Validate that every acked message was consumed
"""
self.kafka.security_protocol = security_protocol
self.kafka.interbroker_security_protocol = security_protocol
new_consumer = False if self.kafka.security_protocol == "PLAINTEXT" else True
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka, self.topic, throughput=self.producer_throughput)
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic, new_consumer=new_consumer, consumer_timeout_ms=60000, message_validator=is_int)
self.kafka.start()
self.run_produce_consume_validate(core_test_action=lambda: self.reassign_partitions(bounce_brokers))
| apache-2.0 | 136,403,551,534,332,140 | 49.536364 | 182 | 0.679799 | false |
dkmatt0/banking-alpha-angularjs | backend/json-bank.py | 1 | 1645 | #!/usr/bin/env python3
import simplejsondb as sjdb
import json
# user { id, nickname, name, password, address, }
#
# session { id, fk_user, last_login, last_activity, }
#
# payee { id, name, desc, }
#
# category { id, name, desc, parent_id, }
#
# mode { id, name, }
#
# bank { id, name, bic_code, sort_code, address, phone, mail, website,
# contact_name, contact_mail, contact_phone, contact_fax, note, }
#
# account { id, bank, user, name, iban, number,
# min_authorised, min_desired, note, }
#
# transaction { id, account, payee, category, mode, date, order,
# amount, balance, number, note, scheduled, hidden, }
#
# scheduler { id, transaction, next, frequency, automatic_add, }
with open('data/config.json', 'r') as f:
config_data = json.load(f)
test = sjdb.SimpleJsonDB('test.json', config_data=config_data)
# test.set_indent(2)
test.save('transaction', amount='20.65') # 1
test.save_db()
# print(test.save('transaction', amount='30.47')) # 2
# print(test.save('transaction', convert=False, amount='40.86')) # 3
# print(test.save('transaction', amount='10.20')) # 4
# print(test.save('transaction', id=4, date='2013-01-05')) # 4
# print('# 1 ################')
# print(test.show('transaction'))
# print('# 2 ################')
# test.move('transaction', 3, 0)
# print('# 3 ################')
# print(test.show('transaction'))
# import time
# starttt = time.clock()
# for i in range(10000):
# start = time.clock()
# test.save('transaction')
# print(i, time.clock() - start)
# print(time.clock() - starttt)
test.save_db()
| agpl-3.0 | -7,484,684,051,327,849,000 | 27.375 | 72 | 0.590881 | false |
h4ck3rm1k3/gcc_py_introspector | gcc/tree/attic/query_function_example.py | 1 | 42931 |
import prefix
import types
import json
#import pprint
from graphviz import Digraph
from SPARQLWrapper import SPARQLWrapper, XML, N3, JSONLD, JSON, POST, GET, SELECT, CONSTRUCT, ASK, DESCRIBE
from SPARQLWrapper.Wrapper import _SPARQL_DEFAULT, _SPARQL_XML, _SPARQL_JSON, _SPARQL_POSSIBLE, _RDF_XML, _RDF_N3, _RDF_JSONLD, _RDF_POSSIBLE
from SPARQLWrapper.SPARQLExceptions import QueryBadFormed
# special tree, name only
fdecl = {
'name' : 'function decl tree',
'exprs' : {
'node:function_decl': {
'fld:body': {'skip': 'yes'},
'fld:args': {'node:parm_decl': '45'},
'fld:mngl': {'node:identifier_node': '528'},
'fld:name': {'node:identifier_node': '3082'},
},
}
}
just_vals = {
'name' : 'just values tree',
'exprs' : {
'node:function_decl': {
'fld:body': {'skip': 'yes'},
'fld:args': {'node:parm_decl': '45'},
'fld:mngl': {'node:identifier_node': '528'},
'fld:name': {'node:identifier_node': '3082'},
},
}
}
stree = {
'name' : 'addr expr tree',
'exprs':
{
'node:addr_expr': {
'fld:type': {
'node:function_decl': fdecl, #this could contain an entire function
}
}
}
}
tree = {
'name' : 'main tree',
'exprs':
{
'node:addr_expr': {
'fld:OP0': {
'node:pointer_type': '90'
},
'fld:type': {
#u'node:function_decl': u'78', this could contain an entire function
'node:string_cst': '9',
'node:var_decl': '3'
}
},
'node:array_ref': {'fld:OP0': {'node:component_ref': '3'},
'fld:OP1': {'node:var_decl': '3'}},
'node:bind_expr': {'fld:body': {'node:return_expr': '30',
'node:statement_list': '24'},
'fld:vars': {'node:var_decl': '21'}},
'node:bit_and_expr': {'fld:OP0': {'node:array_ref': '1',
'node:component_ref': '2',
'node:convert_expr': '4',
'node:nop_expr': '3',
'node:parm_decl': '2',
'node:plus_expr': '3'},
'fld:OP1': {'node:bit_not_expr': '1',
'node:integer_cst': '13',
'node:var_decl': '1'}},
'node:bit_ior_expr': {'fld:OP0': {'node:array_ref': '1',
'node:bit_and_expr': '3',
'node:bit_ior_expr': '1',
'node:nop_expr': '1'},
'fld:OP1': {'node:bit_and_expr': '2',
'node:lshift_expr': '3',
'node:var_decl': '1'}},
'node:bit_not_expr': {'fld:OP0': {'node:var_decl': '1'}},
'node:call_expr': {'fld:E0': {'node:ge_expr': '6',
'node:integer_cst': '10',
'node:nop_expr': '23',
'node:parm_decl': '18',
'node:var_decl': '7'},
'fld:E1': {'node:integer_cst': '12',
'node:nop_expr': '13',
'node:parm_decl': '8',
'node:var_decl': '2'},
'fld:E2': {'node:integer_cst': '8',
'node:parm_decl': '6',
'node:var_decl': '2'},
'fld:E3': {'node:integer_cst': '5',
'node:parm_decl': '2'},
'fld:fn': {'node:addr_expr': '76',
'node:parm_decl': '1'}},
'node:case_label_expr': {'fld:low': {'node:integer_cst': '4'},
'fld:name': {'node:label_decl': '5'}},
'node:component_ref': {'fld:OP0': {'node:indirect_ref': '25',
'node:var_decl': '1'},
'fld:OP1': {'node:field_decl': '26'}},
'node:compound_expr': {'fld:OP0': {'node:modify_expr': '2'},
'fld:OP1': {'node:integer_cst': '2'}},
'node:cond_expr': {'fld:OP0': {'node:eq_expr': '12',
'node:gt_expr': '2',
'node:le_expr': '2',
'node:lt_expr': '2',
'node:ne_expr': '28',
'node:truth_andif_expr': '14',
'node:truth_orif_expr': '4'},
'fld:OP1': {'node:bind_expr': '2',
'node:call_expr': '16',
'node:cond_expr': '1',
'node:convert_expr': '2',
'node:goto_expr': '12',
'node:modify_expr': '9',
'node:nop_expr': '5',
'node:statement_list': '17'},
'fld:OP2': {'node:call_expr': '4',
'node:cond_expr': '3',
'node:goto_expr': '12',
'node:integer_cst': '2',
'node:nop_expr': '6',
'node:parm_decl': '2',
'node:return_expr': '1'}},
'node:const_decl': {#u'fld:chain': {u'node:const_decl': u'462',
# u'node:type_decl': u'26'},
'fld:cnst': {'node:integer_cst': '488'},
'fld:name': {'node:identifier_node': '488'},
#u'fld:scpe': {u'node:translation_unit_decl': u'488'}
},
'node:convert_expr': {'fld:OP0': {'node:addr_expr': '1',
'node:call_expr': '1',
'node:parm_decl': '9',
'node:rshift_expr': '3'}},
'node:eq_expr': {'fld:OP0': {'node:call_expr': '2',
'node:nop_expr': '16',
'node:parm_decl': '1',
'node:var_decl': '6'},
'fld:OP1': {'node:integer_cst': '12',
'node:nop_expr': '7',
'node:parm_decl': '6'}},
'node:field_decl': {
#u'fld:bpos': {u'node:integer_cst': u'562'},
#u'fld:chain': {u'node:field_decl': u'427'},
'fld:name': {'node:identifier_node': '545'},
'fld:orig': {'node:field_decl': '2'},
#u'fld:size': {u'node:integer_cst': u'562'}
},
'node:function_decl': {'fld:args': {'node:parm_decl': '45'},
'fld:body': {'node:bind_expr': '51'},
#u'fld:chain': {u'node:function_decl': u'3059',
# u'node:type_decl': u'3',
# u'node:var_decl': u'19'},
'fld:mngl': {'node:identifier_node': '528'},
'fld:name': {'node:identifier_node': '3082'},
#u'fld:scpe': {u'node:translation_unit_decl': u'2767'}
},
'node:ge_expr': {'fld:OP0': {'node:component_ref': '6'},
'fld:OP1': {'node:component_ref': '6'}},
'node:goto_expr': {'fld:labl': {'node:label_decl': '46'}},
'node:gt_expr': {'fld:OP0': {'node:var_decl': '2'},
'fld:OP1': {'node:integer_cst': '2'}},
'node:indirect_ref': {'fld:OP0': {'node:call_expr': '2',
'node:nop_expr': '3',
'node:parm_decl': '38',
'node:pointer_plus_expr': '18',
'node:postincrement_expr': '7',
'node:var_decl': '15'}},
'node:label_decl': {'fld:name': {'node:identifier_node': '1'},
#u'fld:scpe': {u'node:function_decl': u'47'}
},
'node:label_expr': {'fld:name': {'node:label_decl': '42'}},
'node:le_expr': {'fld:OP0': {'node:nop_expr': '1',
'node:parm_decl': '1',
'node:plus_expr': '2'},
'fld:OP1': {'node:integer_cst': '4'}},
'node:lshift_expr': {'fld:OP0': {'node:bit_and_expr': '3',
'node:integer_cst': '3'},
'fld:OP1': {'node:bit_and_expr': '3',
'node:integer_cst': '3'}},
'node:lt_expr': {'fld:OP0': {'node:var_decl': '2'},
'fld:OP1': {'node:integer_cst': '1',
'node:var_decl': '1'}},
'node:modify_expr': {'fld:OP0': {'node:array_ref': '2',
'node:indirect_ref': '11',
'node:parm_decl': '1',
'node:result_decl': '50',
'node:var_decl': '49'},
'fld:OP1': {'node:bit_and_expr': '1',
'node:bit_ior_expr': '4',
'node:call_expr': '18',
'node:compound_expr': '2',
'node:cond_expr': '14',
'node:convert_expr': '4',
'node:indirect_ref': '1',
'node:integer_cst': '34',
'node:modify_expr': '1',
'node:ne_expr': '3',
'node:nop_expr': '6',
'node:parm_decl': '2',
'node:plus_expr': '1',
'node:pointer_plus_expr': '1',
'node:postincrement_expr': '1',
'node:preincrement_expr': '1',
'node:trunc_div_expr': '1',
'node:var_decl': '18'}},
'node:mult_expr': {'fld:OP0': {'node:nop_expr': '2',
'node:var_decl': '1'},
'fld:OP1': {'node:integer_cst': '2',
'node:parm_decl': '1'}},
'node:ne_expr': {'fld:OP0': {'node:bit_and_expr': '3',
'node:call_expr': '9',
'node:component_ref': '1',
'node:modify_expr': '2',
'node:nop_expr': '25',
'node:parm_decl': '1',
'node:var_decl': '18'},
'fld:OP1': {'node:integer_cst': '48',
'node:parm_decl': '11'}},
'node:nop_expr': {'fld:OP0': {'node:addr_expr': '13',
'node:array_ref': '1',
'node:bit_ior_expr': '1',
'node:call_expr': '7',
'node:component_ref': '2',
'node:convert_expr': '3',
'node:indirect_ref': '40',
'node:modify_expr': '3',
'node:mult_expr': '3',
'node:nop_expr': '3',
'node:parm_decl': '24',
'node:plus_expr': '3',
'node:postincrement_expr': '3',
'node:var_decl': '31'}},
'node:parm_decl': {'fld:chain': {'node:parm_decl': '48'},
'fld:name': {'node:identifier_node': '93'},
#u'fld:scpe': {u'node:function_decl': u'93'},
#u'fld:size': {u'node:integer_cst': u'93'}
}
,
'node:plus_expr': {'fld:OP0': {'node:nop_expr': '2',
'node:parm_decl': '6',
'node:var_decl': '2'},
'fld:OP1': {'node:integer_cst': '9',
'node:var_decl': '1'}},
'node:pointer_plus_expr': {'fld:OP0': {'node:indirect_ref': '2',
'node:parm_decl': '17'},
'fld:OP1': {'node:integer_cst': '1',
'node:nop_expr': '18'}},
'node:postdecrement_expr': {'fld:OP0': {'node:var_decl': '1'},
'fld:OP1': {'node:integer_cst': '1'}},
'node:postincrement_expr': {'fld:OP0': {'node:component_ref': '6',
'node:indirect_ref': '1',
'node:parm_decl': '2',
'node:var_decl': '3'},
'fld:OP1': {'node:integer_cst': '12'}},
'node:preincrement_expr': {'fld:OP0': {'node:parm_decl': '3',
'node:var_decl': '9'},
'fld:OP1': {'node:integer_cst': '12'}},
'node:result_decl': {
#u'fld:scpe': {u'node:function_decl': u'49'},
# u'fld:size': {u'node:integer_cst': u'49'}
},
'node:return_expr': {'fld:expr': {'node:modify_expr': '50'}},
'node:rshift_expr': {'fld:OP0': {'node:parm_decl': '3'},
'fld:OP1': {'node:integer_cst': '3'}},
'node:statement_list': {'fld:E0': {'node:call_expr': '4',
'node:case_label_expr': '1',
'node:decl_expr': '21',
'node:goto_expr': '2',
'node:modify_expr': '14'},
'fld:E1': {'node:call_expr': '4',
'node:case_label_expr': '1',
'node:cond_expr': '7',
'node:decl_expr': '8',
'node:goto_expr': '12',
'node:label_expr': '4',
'node:modify_expr': '4',
'node:postincrement_expr': '1',
'node:switch_expr': '1'},
'fld:E10': {'node:cond_expr': '2',
'node:label_expr': '1',
'node:modify_expr': '2'},
'fld:E11': {'node:call_expr': '1',
'node:cond_expr': '1',
'node:modify_expr': '1',
'node:postdecrement_expr': '1',
'node:return_expr': '1'},
'fld:E12': {'node:cond_expr': '1',
'node:goto_expr': '1',
'node:modify_expr': '1',
'node:return_expr': '1'},
'fld:E13': {'node:case_label_expr': '1',
'node:label_expr': '1',
'node:modify_expr': '1'},
'fld:E14': {'node:call_expr': '1',
'node:cond_expr': '2'},
'fld:E15': {'node:label_expr': '1',
'node:return_expr': '1'},
'fld:E16': {'node:return_expr': '1'},
'fld:E2': {'node:call_expr': '2',
'node:case_label_expr': '1',
'node:cond_expr': '3',
'node:convert_expr': '1',
'node:decl_expr': '2',
'node:goto_expr': '2',
'node:label_expr': '8',
'node:modify_expr': '4',
'node:preincrement_expr': '2',
'node:return_expr': '6'},
'fld:E3': {'node:call_expr': '2',
'node:cond_expr': '4',
'node:decl_expr': '2',
'node:label_expr': '3',
'node:modify_expr': '4',
'node:preincrement_expr': '6'},
'fld:E4': {'node:call_expr': '2',
'node:cond_expr': '6',
'node:decl_expr': '1',
'node:label_expr': '7',
'node:modify_expr': '1',
'node:preincrement_expr': '3',
'node:return_expr': '1'},
'fld:E5': {'node:call_expr': '1',
'node:cond_expr': '7',
'node:goto_expr': '3',
'node:label_expr': '4',
'node:modify_expr': '5'},
'fld:E6': {'node:call_expr': '1',
'node:cond_expr': '3',
'node:goto_expr': '1',
'node:label_expr': '10',
'node:modify_expr': '3',
'node:return_expr': '2'},
'fld:E7': {'node:bind_expr': '1',
'node:case_label_expr': '1',
'node:cond_expr': '3',
'node:goto_expr': '1',
'node:label_expr': '1',
'node:modify_expr': '3',
'node:return_expr': '6'},
'fld:E8': {'node:cond_expr': '3',
'node:label_expr': '2',
'node:modify_expr': '2',
'node:return_expr': '1'},
'fld:E9': {'node:cond_expr': '4',
'node:modify_expr': '1'}},
'node:switch_expr': {'fld:body': {'node:statement_list': '1'},
'fld:cond': {'node:var_decl': '1'}},
'node:tree_list': {'fld:chan': {'node:tree_list': '2714'},
'fld:purp': {'node:identifier_node': '488'},
'fld:valu': {'node:integer_cst': '488'}},
'node:trunc_div_expr': {'fld:OP0': {'node:nop_expr': '3',
'node:plus_expr': '1'},
'fld:OP1': {'node:integer_cst': '4'}},
'node:truth_andif_expr': {'fld:OP0': {'node:eq_expr': '1',
'node:ne_expr': '13',
'node:truth_andif_expr': '6'},
'fld:OP1': {'node:eq_expr': '2',
'node:le_expr': '2',
'node:ne_expr': '15',
'node:truth_and_expr': '1'}},
'node:truth_orif_expr': {'fld:OP0': {'node:eq_expr': '4',
'node:truth_orif_expr': '2'},
'fld:OP1': {'node:eq_expr': '6'}},
'node:type_decl': {#u'fld:chain': {u'node:const_decl': u'26',
# u'node:function_decl': u'5',
# u'node:type_decl': u'460'},
'fld:name': {'node:identifier_node': '318'},
#u'fld:scpe': {u'node:translation_unit_decl': u'449'}
},
'node:var_decl': {#u'fld:chain': {u'node:function_decl': u'18',
# u'node:label_decl': u'1',
# u'node:var_decl': u'106'},
'fld:init': {'node:indirect_ref': '3',
'node:integer_cst': '6',
'node:lshift_expr': '3',
'node:trunc_div_expr': '3',
'node:var_decl': '2'},
'fld:name': {'node:identifier_node': '146'},
#u'fld:scpe': {u'node:function_decl': u'34',
# u'node:translation_unit_decl': u'112'},
#u'fld:size': {u'node:integer_cst': u'134'}
},
'node:enumeral_type': {
#{u'fld:csts': {u'node:tree_list': u'31'},
'fld:max': {'node:integer_cst': '31'},
'fld:min': {'node:integer_cst': '31'},
'fld:name': {'node:identifier_node': '9',
'node:type_decl': '5'},
'fld:size': {'node:integer_cst': '31'},
#u'fld:unql': {u'node:enumeral_type': u'5'}
},
'node:integer_type': {'fld:max': {'node:integer_cst': '188'},
'fld:min': {'node:integer_cst': '188'},
'fld:name': {'node:identifier_node': '2',
'node:type_decl': '157'},
'fld:size': {'node:integer_cst': '188'},
#u'fld:unql': {u'node:integer_type': u'144'}
},
'node:pointer_type': {'fld:name': {'node:type_decl': '17'},
'fld:ptd': {'node:array_type': '7',
'node:function_type': '77',
'node:integer_type': '40',
'node:pointer_type': '18',
'node:real_type': '6',
'node:record_type': '129',
'node:union_type': '2',
'node:vector_type': '3',
'node:void_type': '9'},
'fld:size': {'node:integer_cst': '291'},
'fld:unql': {'node:pointer_type': '62'}},
},
# here are the types of objects that are ignored
'types': {
'node:array_ref': {'fld:type': {'node:integer_type': '3'}},
'node:array_type': {'fld:domn': {'node:integer_type': '49'},
'fld:elts': {'node:integer_type': '36',
'node:pointer_type': '7',
'node:record_type': '14'},
'fld:name': {'node:type_decl': '8'},
#u'fld:size': {u'node:integer_cst': u'49'},
'fld:unql': {'node:array_type': '12'}},
'node:bind_expr': {'fld:type': {'node:void_type': '54'}},
'node:bit_and_expr': {'fld:type': {'node:integer_type': '15'}},
'node:bit_ior_expr': {'fld:type': {'node:integer_type': '6'}},
'node:bit_not_expr': {'fld:type': {'node:integer_type': '1'}},
'node:boolean_type': {'fld:name': {'node:type_decl': '1'},
'fld:size': {'node:integer_cst': '1'}},
'node:call_expr': {'fld:type': {'node:integer_type': '46',
'node:pointer_type': '12',
'node:real_type': '1',
'node:void_type': '18'}},
'node:case_label_expr': {'fld:type': {'node:void_type': '5'}},
'node:complex_type': {'fld:name': {'node:type_decl': '4'},
'fld:size': {'node:integer_cst': '5'}},
'node:component_ref': {'fld:type': {'node:array_type': '3',
'node:enumeral_type': '1',
'node:integer_type': '2',
'node:pointer_type': '20'}},
'node:compound_expr': {'fld:type': {'node:integer_type': '2'}},
'node:cond_expr': {'fld:type': {'node:integer_type': '11',
'node:pointer_type': '3',
'node:void_type': '50'}},
'node:const_decl': {'fld:type': {'node:enumeral_type': '488'}},
'node:convert_expr': {'fld:type': {'node:integer_type': '11',
'node:pointer_type': '2',
'node:void_type': '1'}},
'node:decl_expr': {'fld:type': {'node:void_type': '34'}},
'node:enumeral_type': {'fld:csts': {'node:tree_list': '31'},
#u'fld:max': {u'node:integer_cst': u'31'},
#u'fld:min': {u'node:integer_cst': u'31'},
#u'fld:name': {u'node:identifier_node': u'9',
# u'node:type_decl': u'5'},
#u'fld:size': {u'node:integer_cst': u'31'},
'fld:unql': {'node:enumeral_type': '5'}},
'node:eq_expr': {'fld:type': {'node:integer_type': '25'}},
'node:pointer_type': {
'fld:name': {'node:type_decl': '17'},
'fld:ptd': {'node:array_type': '7',
'node:function_type': '77',
'node:integer_type': '40',
'node:pointer_type': '18',
'node:real_type': '6',
'node:record_type': '129',
'node:union_type': '2',
'node:vector_type': '3',
'node:void_type': '9'},
'fld:size': {'node:integer_cst': '291'},
'fld:unql': {'node:pointer_type': '62'}},
'node:field_decl': {
#u'fld:scpe': {u'node:record_type': u'459',
# u'node:union_type': u'103'},
'fld:type': {'node:array_type': '42',
'node:enumeral_type': '4',
'node:integer_type': '290',
'node:pointer_type': '169',
'node:real_type': '2',
'node:record_type': '29',
'node:union_type': '26'}},
'node:function_decl': {'fld:type': {'node:function_type': '3082'}},
'node:function_type': {'fld:name': {'node:type_decl': '45'},
'fld:prms': {'node:tree_list': '1102'},
'fld:retn': {'node:boolean_type': '22',
'node:complex_type': '13',
'node:integer_type': '487',
'node:pointer_type': '310',
'node:real_type': '66',
'node:record_type': '4',
'node:vector_type': '58',
'node:void_type': '154'},
'fld:size': {'node:integer_cst': '1114'},
'fld:unql': {'node:function_type': '51'}},
'node:ge_expr': {'fld:type': {'node:integer_type': '6'}},
'node:goto_expr': {'fld:type': {'node:void_type': '46'}},
'node:gt_expr': {'fld:type': {'node:integer_type': '2'}},
'node:indirect_ref': {'fld:type': {'node:integer_type': '47',
'node:pointer_type': '11',
'node:record_type': '25'}},
'node:integer_cst': {'fld:type': {'node:integer_type': '455',
'node:pointer_type': '12'}},
'node:integer_type': {'fld:max': {'node:integer_cst': '188'},
'fld:min': {'node:integer_cst': '188'},
'fld:name': {'node:identifier_node': '2',
'node:type_decl': '157'},
'fld:size': {'node:integer_cst': '188'},
'fld:unql': {'node:integer_type': '144'}},
'node:label_decl': {'fld:type': {'node:void_type': '47'}},
'node:label_expr': {'fld:type': {'node:void_type': '42'}},
'node:le_expr': {'fld:type': {'node:integer_type': '4'}},
'node:lshift_expr': {'fld:type': {'node:integer_type': '6'}},
'node:lt_expr': {'fld:type': {'node:integer_type': '2'}},
'node:modify_expr': {'fld:type': {'node:integer_type': '76',
'node:pointer_type': '36',
'node:real_type': '1'}},
'node:mult_expr': {'fld:type': {'node:integer_type': '3'}},
'node:ne_expr': {'fld:type': {'node:integer_type': '59'}},
'node:nop_expr': {'fld:type': {'node:integer_type': '103',
'node:pointer_type': '34'}},
'node:parm_decl': {'fld:argt': {'node:integer_type': '49',
'node:pointer_type': '44'},
'fld:type': {'node:integer_type': '49',
'node:pointer_type': '44'}},
'node:plus_expr': {'fld:type': {'node:integer_type': '10'}},
'node:pointer_plus_expr': {'fld:type': {'node:pointer_type': '19'}},
'node:postdecrement_expr': {'fld:type': {'node:integer_type': '1'}},
'node:postincrement_expr': {'fld:type': {'node:integer_type': '1',
'node:pointer_type': '11'}},
'node:preincrement_expr': {'fld:type': {'node:integer_type': '7',
'node:pointer_type': '5'}},
'node:real_type': {'fld:name': {'node:type_decl': '9'},
'fld:size': {'node:integer_cst': '9'},
'fld:unql': {'node:real_type': '2'}},
'node:record_type': {'fld:flds': {'node:field_decl': '177'},
'fld:name': {'node:identifier_node': '89',
'node:type_decl': '69'},
'fld:size': {'node:integer_cst': '177'},
'fld:unql': {'node:record_type': '79'}},
'node:reference_type': {'fld:refd': {'node:pointer_type': '1'},
'fld:size': {'node:integer_cst': '1'}},
'node:result_decl': {'fld:type': {'node:integer_type': '41',
'node:pointer_type': '7',
'node:real_type': '1'}},
'node:return_expr': {'fld:type': {'node:void_type': '51'}},
'node:rshift_expr': {'fld:type': {'node:integer_type': '3'}},
'node:string_cst': {'fld:type': {'node:array_type': '9'}},
'node:switch_expr': {'fld:type': {'node:integer_type': '1'}},
'node:tree_list': {'fld:valu': {'node:boolean_type': '9',
'node:complex_type': '12',
'node:enumeral_type': '15',
'node:integer_type': '811',
'node:pointer_type': '1227',
'node:real_type': '89',
'node:record_type': '3',
'node:reference_type': '3',
'node:union_type': '6',
'node:vector_type': '105',
'node:void_type': '4'}},
'node:trunc_div_expr': {'fld:type': {'node:integer_type': '4'}},
'node:truth_and_expr': {'fld:type': {'node:integer_type': '1'}},
'node:truth_andif_expr': {'fld:type': {'node:integer_type': '20'}},
'node:truth_orif_expr': {'fld:type': {'node:integer_type': '6'}},
'node:type_decl': {'fld:type': {'node:array_type': '8',
'node:boolean_type': '1',
'node:complex_type': '5',
'node:enumeral_type': '31',
'node:function_type': '45',
'node:integer_type': '161',
'node:pointer_type': '17',
'node:real_type': '8',
'node:record_type': '167',
'node:union_type': '48',
'node:void_type': '2'}},
'node:union_type': {'fld:flds': {'node:field_decl': '50'},
'fld:name': {'node:identifier_node': '5',
'node:type_decl': '13'},
'fld:size': {'node:integer_cst': '50'},
'fld:unql': {'node:union_type': '14'}},
'node:var_decl': {'fld:type': {'node:array_type': '14',
'node:integer_type': '95',
'node:pointer_type': '30',
'node:record_type': '7'}},
'node:vector_type': {'fld:size': {'node:integer_cst': '12'},
'fld:unql': {'node:vector_type': '1'}},
'node:void_type': {'fld:name': {'node:type_decl': '5'},
'fld:unql': {'node:void_type': '4'}}}}
f = {}
skip= {
'fld:source_file' :1 # dont need this in the document
}
def query(s):
results = prefix.q( """
SELECT ?a ?p ?o ?t WHERE {
<%s> ?p ?o.
optional {
?o rdf:type ?t.
}
}
""" % s)
d={
'node_id' : prefix.clean(s)
}
dt={
'node_id' : None # literal has no type...
}
#pprint.pprint(results)
for x in results['results']['bindings']:
v = prefix.clean(x['o']['value'])
t = None
if 't' in x:
t = prefix.clean(x['t']['value'])
else:
#pprint.pprint(x)
pass # have no node type
k = x['p']['value']
k = prefix.clean(k)
if k not in d:
if k not in skip:
d[k]=v # the value of the field
dt[k]=t # the domain type of the field object
else:
#d[k]=[d[k],v]
raise Exception("duplicate")
pprint.pprint({'query_results':d}, depth=2)
return d, dt
import types
def recurse_ref(s, subtree):
print("RECURSE for %s\n" % s)
print("using subtree : %s" % subtree['name'])
d,dt = query(s)
pprint.pprint({"Got from db":d})
if 'rdf:type' not in d:
return d
st = d['rdf:type']
#print "st" + str(st)
#pprint.pprint(dt)
found = False
if not 'exprs' in subtree:
pprint.pprint({"bad subtree": subtree}, depth=2)
raise Exception()
lookup = subtree['exprs']
for k in d:
r = None # result of the field
ot = dt[k]
v = d[k]
u = prefix.tg +v
if type(st) is dict:
print('skip' + st)
pprint.pprint({
'case': 'is type',
'k' :k,
'ot' :ot,
'st' : st
}, depth=2)
#pprint.pprint(dt)
#pass # no type
elif not ot : # no type, a literal
if k.startswith('fld:'):
r = prefix.clean(v) # just a literal
pprint.pprint({
'case': 'is literal',
'k' :k,
'dt': dt,
'ot' :ot,
'st' : st
}, depth=2)
found = True
else:
pprint.pprint({
'case': 'is no field',
'k' :k,
'ot' :ot,
'st' : st,
'r' : r,
'v' : v,
}, depth=2)
r = v # we need to store the type field
found = True
elif st in lookup:
if k in lookup[st]:
if ot in lookup[st][k]:
subtree = lookup[st][k]
if type(subtree) is dict:
if 'exprs' in subtree:
r = recurse_ref(u, subtree)
pprint.pprint({"Subtree":r}, depth=2)
else:
r = recurse_ref(u, tree)
pprint.pprint({"tree":r}, depth=2)
else:
r = recurse_ref(u, tree)
pprint.pprint({"tree2":r}, depth=2)
found = True
else:
pass # skip
if not found:
r = recurse_ref(u, just_vals ) # just get one level of info for types and such
pprint.pprint({
"missing" : True,
'k' :k,
'ot' :ot,
'st' : st,
'u' :u,
'r' :r
}, depth=2)
d[k]=r
pprint.pprint({"rec found":d}, depth=2)
return (d)
# print out what field types occur
def start():
t = {}
results = prefix.q( """
SELECT ?a WHERE {
?a fld:srcp 'eval.c:216'.
?a fld:name [ fld:string 'parse_command'].
?a rdf:type nt:function_decl.
}
""")
for x in results['results']['bindings']:
print(x['a']['value'])
r= recurse_ref(x['a']['value'],tree)
o = open("data/body2.py","w")
o.write("deep={v2}".format(v2=pprint.pformat(r)))
o.close()
start()
| gpl-2.0 | 5,027,496,312,322,403,000 | 53.828863 | 141 | 0.313759 | false |
Blake-Latchford/Bugs2 | rules/hexgrid.py | 1 | 1215 | from rules.hexcell import HexCell
class HexGrid:
"""A grid of HexCells.
Registered cells are preserved. As this represents the entire infinite hex
plane, any cell can be gotten. However, only registered cells are guaranteed
to be identical on future calls.
"""
def __init__(self):
self._registered_cells = {}
def get_cell(self, q, r):
"""Get the cell at the specified coordinates. If no cell is registered
at that location, create a temporary new cell."""
coords = (q, r)
if coords in self._registered_cells:
return self._registered_cells[coords]
return HexCell(q, r)
def register_cell(self, hex_cell):
"""Register a hex cell to be retained in the grid."""
assert (hex_cell.q, hex_cell.r) not in self._registered_cells
self._registered_cells[(hex_cell.q, hex_cell.r)] = hex_cell
def unregister_cell(self, hex_cell):
assert self._registered_cells[(hex_cell.q, hex_cell.r)] is hex_cell
self._registered_cells.pop((hex_cell.q, hex_cell.r), None)
def reset(self):
self._registered_cells = {}
def __repr__(self):
return repr(self._registered_cells)
| gpl-3.0 | 6,787,040,432,161,741,000 | 30.973684 | 80 | 0.633745 | false |
ollej/shoutbridge | src/bridges/XmppPyBridge.py | 1 | 7001 | # -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2010 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import xmpp
import time
import re
from bridges.XmppBridge import *
from utils.utilities import *
class XmppPyBridge(XmppBridge):
login = ""
passwd = ""
room = ""
host = ""
port = 5222
discoName = "Shoutbridge"
shoutbox = None
roster = []
def __init__(self, sbox, cfg):
"""
Instantiate an XMPP bridge using XMPP login details and a shoutbox object.
"""
self.shoutbox = sbox
self.login = cfg.xmpp_login
self.passwd = cfg.xmpp_pass
self.host = cfg.xmpp_host
if cfg.xmpp_port:
self.port = cfg.xmpp_port
self.room = cfg.xmpp_room
# Make an XMPP connection
self.make_connection()
# Register handlers
self.register_handlers()
def __del__(self):
if self.cl:
self.cl.disconnect()
def make_connection(self):
"""
Make an XMPP connection and authorize the user.
"""
self.jid = xmpp.protocol.JID(self.login)
debug = xmpp.debug.Debug() #['always', 'nodebuilder']
self.cl = xmpp.Client(self.jid.getDomain(), debug=debug)
self.con = self.cl.connect()
#self.cl = xmpp.client.Component(self.jid.getDomain(), self.port, debug=debug)
#self.con = self.cl.connect((self.jid.getDomain(), self.port))
if not self.con:
raise BridgeConnectionError
print 'Connected with', self.con
self.auth = self.cl.auth(self.jid.getNode(), self.passwd, resource=self.jid.getResource())
if not self.auth:
raise BridgeAuthenticationError
print 'Authenticated using', self.auth
def register_handlers(self):
"""
Register message handlers
"""
self.cl.RegisterHandler('iq', self.handle_iq)
self.cl.RegisterHandler('presence', self.handle_presence)
self.cl.RegisterHandler('message', self.handle_message)
self.disco = xmpp.browser.Browser()
self.disco.PlugIn(self.cl)
self.disco.setDiscoHandler(self.xmpp_base_disco,node='', jid=self.login)
# Disco Handlers
def xmpp_base_disco(self, con, event, type):
fromjid = event.getFrom().__str__()
to = event.getTo()
node = event.getQuerynode();
#Type is either 'info' or 'items'
if to == self.login:
if node == None:
if type == 'info':
return {
'ids': [
{'category': 'gateway', 'type': 'smtp', 'name': self.discoName}],
'features': [NS_VERSION, NS_COMMANDS]}
if type == 'items':
return []
else:
self.cl.send(Error(event, ERR_ITEM_NOT_FOUND))
raise NodeProcessed
else:
self.cl.send(Error(event, MALFORMED_JID))
raise NodeProcessed
def handle_iq(self, conn, iq_node):
"""
Handler for processing some "get" query from custom namespace
"""
print "Iq stanza received:", iq_node.getType(), iq_node.getFrom().getResource()
reply = iq_node.buildReply('result')
# ... put some content into reply node
conn.send(reply)
raise NodeProcessed # This stanza is fully processed
def handle_presence(self, conn, pres):
nick = pres.getFrom().getResource()
type = pres.getType()
print "Presence stanza received:", nick, type
if type == 'unavailable':
if nick in self.roster:
self.roster.remove(nick)
print "Adding to roster:", nick
else:
if nick not in self.roster:
self.roster.append(nick)
print "Removing from roster:", nick
def handle_message(self, conn, mess):
"""
Handle an XMPP message.
"""
type = mess.getType()
fromjid = mess.getFrom().getStripped()
nick = mess.getFrom().getResource()
print "Message stanza received:", fromjid, '/', nick, type
if type in ['message', 'chat', None]:
# and fromjid == self.remotejid:
text = mess.getBody()
try:
user = self.shoutbox.getUserByLogin(fromjid)
except ShoutboxUserNotFoundError:
# Default to anonymous user with JID as username
user = User(1, nick, '', '')
self.shoutbox.sendShout(user, text)
def clean_message(self, text):
"""
Clean text of unwanted content.
"""
text = strip_tags(text)
return text
def send_message(self, tojid, text):
"""
Send an text as XMPP message to tojid
"""
try:
id = self.cl.send(xmpp.protocol.Message(tojid, text))
print 'Sent message with id', id
except UnicodeDecodeError:
print "Unicode Decode Error: " + text
def process_shoutbox_messages(self):
msgs = self.shoutbox.readShouts()
for m in msgs:
text = self.clean_message(m.text)
text = "%s <%s> %s" % (m.time, m.name, text)
self.send_message(self.room, text)
def listen(self):
"""
Start listening on XMPP and Shoutbox, relaying messages.
"""
try:
while 1:
print "Loop..."
# Process incoming XMPP messages.
self.cl.Process(5)
# Process shoutbox messages.
self.process_shoutbox_messages()
# Sleep before next loop iteration.
#time.sleep(1)
# Reconnect to XMPP if necessary.
if not self.cl.isConnected():
self.cl.reconnectAndReauth()
except KeyboardInterrupt:
print "Exiting..."
| mit | 4,904,962,323,956,999,000 | 32.821256 | 98 | 0.584202 | false |
Diti24/python-ivi | ivi/agilent/agilent437B.py | 1 | 14103 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import pwrmeter
import time
Units = set(['dBm', 'Watts'])
class agilent437B(ivi.Driver, pwrmeter.Base, pwrmeter.ManualRange,
pwrmeter.DutyCycleCorrection, pwrmeter.AveragingCount,
pwrmeter.ZeroCorrection, pwrmeter.Calibration,
pwrmeter.ReferenceOscillator):
"Agilent 437B RF power meter"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '437B')
super(agilent437B, self).__init__(*args, **kwargs)
self._channel_count = 1
self._identity_description = "Agilent 437B RF power meter driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['437B']
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent437B, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
#if not self._driver_operation_simulate:
# error_code, error_message = self._ask(":system:error?").split(',')
# error_code = int(error_code)
# error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
code = int(self._ask("*TST?"))
if code != 0:
message = "Self test failed"
return (code, message)
raise ivi.OperationNotSupportedException()
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(agilent437B, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_averaging_count_auto = list()
self._channel_correction_frequency = list()
self._channel_offset = list()
self._channel_range_auto = list()
self._channel_units = list()
for i in range(self._channel_count):
self._channel_name.append("channel%d" % (i+1))
self._channel_averaging_count_auto.append(True)
self._channel_correction_frequency.append(50e6)
self._channel_offset.append(0.0)
self._channel_range_auto.append(True)
self._channel_units.append('dBm')
self.channels._set_list(self._channel_name)
def _get_channel_averaging_count_auto(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_averaging_count_auto[index]
def _set_channel_averaging_count_auto(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
if not value:
raise ivi.ValueNotSupportedException()
self._channel_averaging_count_auto[index] = value
def _get_channel_correction_frequency(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_correction_frequency[index]
def _set_channel_correction_frequency(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("FR%eEN" % (value))
self._channel_correction_frequency[index] = value
self._set_cache_valid(index=index)
def _get_channel_offset(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_offset[index]
def _set_channel_offset(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("OS%eEN" % (value))
self._channel_offset[index] = value
self._set_cache_valid(index=index)
def _get_channel_range_auto(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_auto[index]
def _set_channel_range_auto(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
self._channel_range_auto[index] = value
def _get_channel_units(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_units[index]
def _set_channel_units(self, index, value):
index = ivi.get_index(self._channel_name, index)
if value not in Units:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
if value == 'dBm':
self._write("LG")
elif value == 'Watts':
self._write("LN")
self._channel_units[index] = value
self._set_cache_valid(index=index)
def _get_measurement_measurement_state(self):
return self._measurement_measurement_state
def _measurement_abort(self):
self._clear()
pass
def _measurement_configure(self, operator, operand1, operand2):
pass
def _measurement_fetch(self):
if self._driver_operation_simulate:
return
val = self._read()
return float(val)
def _measurement_initiate(self):
if self._driver_operation_simulate:
return
self._write("TR1")
def _measurement_read(self, maximum_time):
self._measurement_initiate()
return self._measurement_fetch()
def _get_channel_range_lower(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_lower[index]
def _set_channel_range_lower(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_range_lower[index] = value
def _get_channel_range_upper(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_upper[index]
def _set_channel_range_upper(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_range_upper[index] = value
def _get_channel_duty_cycle_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_duty_cycle_enabled[index]
def _set_channel_duty_cycle_enabled(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write("DC%d" % int(value))
self._channel_duty_cycle_enabled[index] = value
self._set_cache_valid(index=index)
def _get_channel_duty_cycle_value(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_duty_cycle_value[index]
def _set_channel_duty_cycle_value(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("DY%eEN" % (value))
self._channel_duty_cycle_value[index] = value
self._set_cache_valid(index=index)
def _get_channel_averaging_count(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_averaging_count[index]
def _set_channel_averaging_count(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = int(value)
if not self._driver_operation_simulate:
self._write("FM%eEN" % (value))
self._channel_averaging_count[index] = value
self._set_cache_valid(index=index)
def _get_channel_zero_state(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_zero_state[index]
def _channel_zero(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return
self._write("CS")
self._write("ZE")
it = 0
while True:
val = self._read_stb()
if val & 2:
break
if val & 8 or it > 20:
return
time.sleep(0.5)
self._channel_zero_state[index] = 'complete'
def _get_channel_calibration_state(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_calibration_state[index]
def _channel_calibrate(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return
self._write("CS")
self._write("CLEN")
it = 0
while True:
val = self._read_stb()
if val & 2:
break
if val & 8 or it > 20:
return
time.sleep(0.5)
self._channel_calibration_state[index] = 'complete'
def _get_reference_oscillator_enabled(self):
return self._reference_oscillator_enabled
def _set_reference_oscillator_enabled(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("OC%d" % int(value))
self._reference_oscillator_enabled = value
self._set_cache_valid()
def _get_reference_oscillator_frequency(self):
return self._reference_oscillator_frequency
def _set_reference_oscillator_frequency(self, value):
value = float(value)
value = 50e6 # fixed at 50 MHz
self._reference_oscillator_frequency = value
def _get_reference_oscillator_level(self):
return self._reference_oscillator_level
def _set_reference_oscillator_level(self, value):
value = float(value)
value = 0.0 # fixed at 1.00 mW (0 dBm)
self._reference_oscillator_level = value
| mit | -4,515,614,092,387,864,600 | 35.726563 | 99 | 0.612139 | false |
CSysTeam/SecurityPackage | MainAlgorithms/Monoalphabetics/Monoalphabetic.py | 1 | 3288 |
class Monoalphabetic:
""" Frequency Information:
E 12.51%
T 9.25
A 8.04
O 7.60
I 7.26
N 7.09
S 6.54
R 6.12
H 5.49
L 4.14
D 3.99
C 3.06
U 2.71
M 2.53
F 2.30
P 2.00
G 1.96
W 1.92
Y 1.73
B 1.54
V 0.99
K 0.67
X 0.19
J 0.16
Q 0.11
Z 0.09
"""
def analyse(self, plainText: str, cipherText: str) -> str:
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
mainKey=""
temp = plainText.upper()
i = 0
while i < len(plainText):
index = LETTERS.index(temp[i])
mainKey = mainKey[:index] + cipherText[i] + mainKey[index+1:]
i = i + 1
temp = mainKey
temp = temp.upper()
i = 0
while i < len(temp):
if temp[i] == '-':
index = LETTERS.index(temp[i - 1])
if index == 25:
index = -1
temp = temp[:i] + LETTERS[index + 1] + temp[i + 1:]
i = i + 1
temp = temp.lower()
return temp
def decrypt(self, cipherText: str, key: str) -> str:
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
temp = cipherText.upper()
KEY = key.upper()
plain = ""
i = 0
while i < len(cipherText):
index = KEY.index(temp[i])
plain += LETTERS[index]
i = i + 1
plain = plain.lower()
return plain
def encrypt(self, plainText: str, key: str) -> str:
LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
temp = plainText.upper()
i = 0
EncryptedText = ""
while i < len(plainText):
index = LETTERS.index(temp[i])
EncryptedText += key[index]
i = i + 1
EncryptedText = EncryptedText.upper()
return EncryptedText
def analyseUsingCharFrequency(self, cipher: str) -> str:
freqInfo = "ETAOINSRHLDCUMFPGWYBVKXJQZ"
newTemp = "-" * len(cipher)
temp = cipher.upper()
dictionary = {}
for letters in temp:
dictionary[letters] = 0
for letters in temp:
dictionary[letters] += 1
dictionary = sorted(dictionary.items(), reverse=True, key=lambda x: x[1])
#print("len: ", len(temp))
for position in range(0, len(temp)):
#print("position: ", position)
#print(dictionary[position])
if position >= len(dictionary) - 1:
break
#print("dict: ", dictionary[1][0])
i = 0
while i < len(dictionary):
#print(len(dictionary))
#print(dictionary[i][0])
j = 0
while j < len(temp):
#print("temp: ", temp[j],"dict: ", dictionary[i][0])
if temp[j] == dictionary[i][0]:
#print("..", temp[j:])
newTemp = newTemp[:j] + freqInfo[i] + newTemp[j + 1:]
#print("tmp: ", temp)
j = j + 1
i = i + 1
return newTemp
| gpl-3.0 | 1,636,525,540,505,321,200 | 27.097345 | 81 | 0.439173 | false |
crazcalm/Py3.4_exploration | DataStructures/Trees/PriorityQueue.py | 1 | 8766 | """
Priority Queue with Binary Heaps:
---------------------------------
Introduction:
------------
A priority queue acts like a queue in that you can dequeue and item by
removing it from the front. However, in a priority queue the logical order
of items inside the queue is determined by their priority. The highest priority
items are at the front of the queue and the lowest priority items are at the
back.
The classic way to implement a priority queue is using a data structure
called a binary heap. A binary heap will allow us to both enqueue and dequeue
items in O(logn).
The binary heap is interesting to study because when we diagram the heap it
looks a lot like a tree, but when we implement it we only use a single list
as an internal representation.
The binary heap has two common variations: the 'min heap,' in which the
smallest key is always at the front, and the 'max heap,' in which the largest
key is always at the front.
In this section, we will implement the min heap.
Basic Operations List:
---------------------
BinaryHeap(): creates a new, empty binary heap.
insert(k): adds a new item to the heap.
findMin(): returns the item with the minimum key value, leaving item in the heap.
delMin(): returns the item with the minimum key value, removing the item from the list.
isEmpty(): returns true if the heap is empty, false otherwise.
size(): returns the number of items in the heap.
buildHeap(list): builds a new heap from a list of keys.
The Structure Property:
-----------------------
In order to make our heap work effeciently, we will take advantage of the
logarithmic nature of the tree to represent our heap.
In order to guarantee logarithmic performance, we must keep our tree balanced.
A balanced binary tree has roughly the same number of nodes in the left and right
subtrees of the root.
In our heap implemention we keep the tree balanced by creating a 'complete
binary tree.'
A complete binary tree is a tree in which each level has all of its nodes.
The exception to this is the bottom of the tree, which we fill in from left to
right.
Interesting Property:
---------------------
Another interesting property of a complete tree is that we can represent it
using a single list. We do not need to use nodes and references or even lists
of lists.
Because the tree is complete, the left child of the parent (at position p) is
is the node that is found a position 2p in the list. Similarly, the right child
of the parent is at position 2p+1 in the list.
The Heap Order Property:
------------------------
The 'heap order property' is as follows: In a heap, for every node x with
parent p, the key in p is smaller than or equal to the key in x.
Heap Operations:
----------------
We will begin our implemention of a binary heap with the constructor.
Since the entire binary heap can be represented by a single list, all the constructor
will do is initialize the list and an attribute currentSize to keep track of the
current size of the heap.
You will notice that an empty binary heap has a single zero as the first element
of heapList and that this zero is not used, but is there so that a simple
integer can be used in later methods.
Insert method:
--------------
The next method we will implement in 'insert.'
The easiest, and most efficient, way to add an item to a list is to simply
append the item to the end of the list. The good news about appending is that
it guarantees that we will maintain the complete tree property.
The bad news is that we will very likely violate the heap structure property.
However, it is possible to write a method that will allow us to regain the heap
structure property by comparing the newly added items with its parent.
If the newly added item is less than its parent, then we can swap the item
with its parent.
Notice that when we percolate an item up, we are restoring the heap property
between the newly added item and the parent. We are also perserving the heap
property for any siblings.
delMin method notes:
--------------------
Since the heap property requires that the root of the tree be the smallest
item in the tree, finding the minimum item is easy. The hard part of delMin
is restoring full compliance with the heap structure and heap order properties
after the root has been removed.
We can restore our heap in two steps.
1. We will restore the root item by taking the last item in the list and moving
it to the root position.
2. We will restore the heap order property by pushng the new root node down
the tree to its proper position.
In order to maintain the heap order property, all we need to do is swap the
root with its smallest child less than the root. After the initial swap, we may
repeat the swapping process with a node and its children until the node is swapped
into a position on the tree where it is already less than both children.
The code for percolating a node down the tree is found in the 'percDown' and
'minChild' methods.
buildHeap method:
-----------------
To finish our discussion of binary heaps, we will look at a method to build
an entire heap from a list of keys.
If we start with an entire list then we can build the whole heap in O(n)
operations.
We will start from the middle of the list. Although we start out in the middle
of the tree and work our way back towards the root, the percDown method enusres that
the largest child is always down the tree. Beacuse it is a complete binary tree,
any nodes past the halfway point will be leaves and therefore have no children.
"""
class BinaryHeap:
"""
A priority queue acts like a queue in that you can dequeue and item by
removing it from the front. However, in a priority queue the logical order
of items inside the queue is determined by their priority. The highest priority
items are at the front of the queue and the lowest priority items are at the
back.
"""
def __init__(self):
"""
You will notice that an empty binary heap has a single zero as the first element
of heapList and that this zero is not used, but is there so that a simple
integer can be used in later methods.
"""
self.heapList = [0]
self.currentSize = 0
def percUp(self, i):
"""
Compares the newly inserted item with its parent. If the item is less
than its parents, then they will be switched.
"""
while i // 2 > 0:
if self.heapList[i] < self.heapList[i // 2]:
tmp = self.heapList[i // 2]
self.heapList[i // 2] = self.heapList[i]
self.heapList[i] = tmp
i = i // 2
def insert(self, k):
"""
Inserts a new item to the binary heap
"""
self.heapList.append(k)
self.currentSize = self.currentSize + 1
self.percUp(self.currentSize)
def percDown(self, i):
"""
Moves the root of the binary heap (or subtree of the heap) down to its
proper place in the tree.
"""
while(i * 2) <= self.currentSize:
mc = self.minChild(i)
if self.heapList[i] > self.heapList[mc]:
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
i = mc
def minChild(self, i):
"""
Returns the index of the min Child
"""
if i * 2 + 1 > self.currentSize:
return i*2
else:
if self.heapList[i*2] < self.heapList[i*2+1]:
return i*2
else:
return i*2+1
def delMin(self):
"""
Returns the smallest item in the Binary Heap
"""
retval = self.heapList[1]
self.heapList[1] = self.heapList[self.currentSize]
self.currentSize = self.currentSize - 1
self.heapList.pop()
self.percDown(1)
return retval
def buildHeap(self, alist):
"""
Builds a Binary Heap from a list
"""
i = len(alist) // 2
self.currentSize = len(alist)
self.heapList = [0] + alist[:]
while (i>0):
self.percDown(i)
i = i - 1
def isEmpty(self):
return self.currentSize == 0
if __name__ == "__main__":
test = [5,7,3,11]
test2 = [9,6,5,2,3]
print("Binary heap test 1:\n\n")
bh = BinaryHeap()
for x in test:
bh.insert(x)
while not bh.isEmpty():
print(bh.delMin())
print("Binary heap test 2: \n\n")
bh2 = BinaryHeap()
bh2.buildHeap(test2)
while not bh2.isEmpty():
print(bh2.delMin())
| mit | 7,038,202,377,744,154,000 | 32.330798 | 89 | 0.66256 | false |
averainy/averainy | python/wechat_test.py | 1 | 7797 | #!/usr/bin/python
#coding=utf-8
import xml.dom.minidom
def get_tagname():
doc = xml.dom.minidom.parseString(input_xml_string)
class msg_parse:
def __init__(self,msg):
self.doc = xml.dom.minidom.parseString(msg)
def _getData(self,tagName):
nodes=self.doc.getElementsByTagName(tagName)
if nodes:
return nodes[0].childNodes[0].data
else:
return None
def getFromUserName(self):
return self._getData("FromUserName")
def getToUserName(self):
return self._getData("ToUserName")
def getCreateTime(self):
return self._getData("CreateTime")
def getMsgType(self):
return self._getData("MsgType")
def getContent(self):
return self._getData("Content")
def getMsgId(self):
return self._getData("MsgId")
def getPicUrl(self):
return self._getData("PicUrl")
def getMediaId(self):
return self._getData("MediaId")
def getFormat(self):
return self._getData("Format")
def getMediaId(self):
return self._getData("MediaId")
def getThumbMediaId(self):
return self._getData("ThumbMediaId")
def getLocation_X(self):
return self._getData("Location_X")
def getLocation_Y(self):
return self._getData("Location_Y")
def getScale(self):
return self._getData("Scale")
def getLabel(self):
return self._getData("Label")
def getTitle(self):
return self._getData("Title")
def getDescription(self):
return self._getData("Description")
def getUrl(self):
return self._getData("Url")
def getEvent(self):
return self._getData("Event")
def getEventKey(self):
return self._getData("EventKey")
def getTicket(self):
return self._getData("Ticket")
def getLatitude(self):
return self._getData("Latitude")
def getLongitude(self):
return self._getData("Longitude")
def getPrecision(self):
return self._getData("Precision")
def getTicket(self):
return self._getData("Ticket")
def getTicket(self):
return self._getData("Ticket")
if __name__ == "__main__":
# 文本消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[ffdfdromUser]]></FromUserName>
<CreateTime>1348831860</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[this is a test]]></Content>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getFromUserName()
print abc.getToUserName()
print abc.getCreateTime()
print abc.getMsgType()
print abc.getContent()
print abc.getMsgId()
# 图片消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1348831860</CreateTime>
<MsgType><![CDATA[image]]></MsgType>
<PicUrl><![CDATA[this is a url]]></PicUrl>
<MediaId><![CDATA[media_id]]></MediaId>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getPicUrl()
print abc.getMediaId()
# 语音消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1357290913</CreateTime>
<MsgType><![CDATA[voice]]></MsgType>
<MediaId><![CDATA[media_id]]></MediaId>
<Format><![CDATA[Format]]></Format>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getFormat()
# 视频消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1357290913</CreateTime>
<MsgType><![CDATA[video]]></MsgType>
<MediaId><![CDATA[media_id]]></MediaId>
<ThumbMediaId><![CDATA[thumb_media_id]]></ThumbMediaId>
<MsgId>1234567890123456</MsgId>
</xml>"""
abc=msg_parse(res)
print abc.getThumbMediaId()
# 地理位置消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1351776360</CreateTime>
<MsgType><![CDATA[location]]></MsgType>
<Location_X>23.134521</Location_X>
<Location_Y>113.358803</Location_Y>
<Scale>20</Scale>
<Label><![CDATA[位置信息]]></Label>
<MsgId>1234567890123456</MsgId>
</xml> """
abc=msg_parse(res)
print abc.getLocation_X()
print abc.getLocation_Y()
print abc.getScale()
print abc.getLabel()
# 链接消息
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1351776360</CreateTime>
<MsgType><![CDATA[link]]></MsgType>
<Title><![CDATA[公众平台官网链接]]></Title>
<Description><![CDATA[公众平台官网链接]]></Description>
<Url><![CDATA[url]]></Url>
<MsgId>1234567890123456</MsgId>
</xml> """
abc=msg_parse(res)
print abc.getTitle()
print abc.getDescription()
print abc.getUrl()
# 关注/取消关注事件
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe]]></Event>
</xml>"""
abc=msg_parse(res)
print abc.getEvent()
# 扫描带参数二维码事件
# 用户未关注时,进行关注后的事件推送
res="""<xml><ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe]]></Event>
<EventKey><![CDATA[qrscene_123123]]></EventKey>
<Ticket><![CDATA[TICKET]]></Ticket>
</xml>"""
abc=msg_parse(res)
print abc.getEventKey()
print abc.getTicket()
# 用户已关注时的事件推送
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[SCAN]]></Event>
<EventKey><![CDATA[SCENE_VALUE]]></EventKey>
<Ticket><![CDATA[TICKET]]></Ticket>
</xml>"""
abc=msg_parse(res)
print abc.getEventKey()
print abc.getTicket()
# 上报地理位置事件
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[LOCATION]]></Event>
<Latitude>23.137466</Latitude>
<Longitude>113.352425</Longitude>
<Precision>119.385040</Precision>
</xml>"""
abc=msg_parse(res)
print abc.getLatitude()
print abc.getLongitude()
print abc.getPrecision()
# 点击菜单拉取消息时的事件推送
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[CLICK]]></Event>
<EventKey><![CDATA[EVENTKEY]]></EventKey>
</xml>"""
abc=msg_parse(res)
print abc.getMsgType()
print abc.getEvent()
print abc.getEventKey()
# 点击菜单跳转链接时的事件推送
res="""<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[VIEW]]></Event>
<EventKey><![CDATA[www.qq.com]]></EventKey>
</xml>"""
abc=msg_parse(res)
print abc.getMsgType()
print abc.getEvent()
print abc.getEventKey()
| gpl-2.0 | -3,903,680,879,921,830,400 | 31.786957 | 59 | 0.618486 | false |
ttanner/kryptomime | docs/conf.py | 1 | 8229 | # -*- coding: utf-8 -*-
#
# kryptomime documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 20 12:30:16 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kryptomime'
copyright = u'2013, Thomas Tanner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kryptomimedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'kryptomime.tex', u'kryptomime Documentation',
u'Thomas Tanner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kryptomime', u'kryptomime Documentation',
[u'Thomas Tanner'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'kryptomime', u'kryptomime Documentation',
u'Thomas Tanner', 'kryptomime', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Autodoc settings ----------------------------------------------------------
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'show-inheritance', 'undoc-members', 'show-hidden']
autoclass_content = 'both'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| lgpl-3.0 | 3,036,455,155,016,638,500 | 31.784861 | 106 | 0.701665 | false |
mjordan/pkppln | server.py | 1 | 1777 | #!/usr/bin/env python
import sys
import bottle
from bottle import Bottle, request, error, response, Response
from os.path import abspath, dirname
import logging
sys.path.append(dirname(abspath(__file__)))
import pkppln
from webapp.admin.terms_server import TermsApp
from webapp.sword.sword_server import SwordServer
from webapp.static.static_server import StaticApp
from webapp.feeds.feed_server import FeedsApp
from webapp.admin.journal_server import JournalsApp
def after_request():
if request.path.startswith('/static'):
return
try:
route_name = request.route.callback.__name__
except:
route_name = '(unknown)'
try:
pkppln.log_message(" - ".join([
'finished', request.get('REMOTE_ADDR'),
request.method, request.path,
type(request.app).__name__ + "#" + route_name
]), logging.INFO)
except:
pass
def before_request():
# pkppln.log_message(" - ".join([
# 'starting', request.get('REMOTE_ADDR'),
# request.method, request.path]))
pkppln.initialize()
static_path = dirname(abspath(__file__)) + '/static'
application = bottle.default_app()
application.add_hook('before_request', before_request)
application.add_hook('after_request', after_request)
application.mount('/static/', StaticApp('Static', static_path))
application.mount('/admin/terms/', TermsApp('Terms'))
application.mount('/admin/journals/', JournalsApp('JournalsApp'))
application.mount('/feeds/', FeedsApp('Feeds'))
application.mount('/api/sword/2.0/', SwordServer('SWORD'))
if __name__ == '__main__':
if len(sys.argv) == 2:
pkppln.config_file_name = sys.argv[1]
bottle.debug(True)
application.run(host='127.0.0.1', port=9999, reloader=True)
| gpl-3.0 | 2,497,417,321,384,419,300 | 28.616667 | 65 | 0.66798 | false |
marshallmcdonnell/interactive_plotting | matplotlib/draggable_legend_code.py | 1 | 3140 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as _plt
class DraggableLegend:
def __init__(self, legend):
self.legend = legend
self.gotLegend = False
legend.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
legend.figure.canvas.mpl_connect('pick_event', self.on_picker)
legend.figure.canvas.mpl_connect('button_release_event', self.on_release)
legend.set_picker(self.my_legend_picker)
#----------------------------------------------------#
# Connected event handlers
def on_motion(self, event):
if self.gotLegend:
dx = event.x - self.mouse_x
dy = event.y - self.mouse_y
loc_in_canvas = self.legend_x + dx, self.legend_y + dy
loc_in_norm_axes = self.legend.parent.transAxes.inverted().transform_point(loc_in_canvas)
self.legend._loc = tuple(loc_in_norm_axes)
self.legend.figure.canvas.draw()
def my_legend_picker(self, legend, event):
return self.legend.legendPatch.contains(event)
def on_picker(self, event):
if event.artist == self.legend:
# left-click
if event.mouseevent.button == 1:
self._move_legend(event)
# mouse button pressed
if event.mouseevent.button == 2:
pass
# right-click
if event.mouseevent.button == 3:
self._hideLegend()
# mouse up
if event.mouseevent.button == 'up':
self._scaleUpLegendFont()
# mouse down
if event.mouseevent.button == 'down':
self._scaleDownLegendFont()
def on_release(self, event):
if self.gotLegend:
self.gotLegend = False
#----------------------------------------------------#
# Utility functions
def _move_legend(self,event):
bbox = self.legend.get_window_extent()
self.mouse_x = event.mouseevent.x
self.mouse_y = event.mouseevent.y
self.legend_x = bbox.xmin
self.legend_y = bbox.ymin
self.gotLegend = 1
def _scaleUpLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size += size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _scaleDownLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size -= size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _hideLegend(self):
if self.legend.get_visible():
self.legend.set_visible(False)
else:
self.legend.set_visible(True)
self.legend.figure.canvas.draw()
figure = _plt.figure()
ax = figure.add_subplot(111)
scatter = ax.scatter(np.random.randn(100), np.random.randn(100), label='hi')
legend = ax.legend()
legend = DraggableLegend(legend)
_plt.show()
| mit | -4,343,959,866,343,381,500 | 31.040816 | 101 | 0.560828 | false |
gromitsun/sim-xrf | python/snr/pysnip.py | 1 | 2468 | import numpy as np
from scipy.optimize import curve_fit
def FWHM(x, noise=100, fano=0.114):
sigma = np.sqrt((noise / 2.3548) ** 2 + 3.58 * fano * x)
return 2.3548 * sigma
def fit_FWHM(x, F):
def _FWHM(x, noise, fano):
return (noise / 2.3548) ** 2 + 3.58 * fano * x
popt, pcov = curve_fit(_FWHM, x, (F / 2.3548) ** 2, p0=[100, 0.114])
return popt
def energy_to_channel(energy, offset=2.97, gain=12.26952):
return 1. * (energy - offset) / gain
# # # Low statistics digital filter
def lsdf(E, y, FWHM=FWHM,
f=1.5,
A=75,
M=10,
r=1.3):
def _reduce(x, length_start):
for i in range(length_start):
length = length_start - i
if x < length:
raise IndexError
L = y[x - length:x].sum()
R = y[x + 1:x + length + 1].sum()
S = y[x] + L + R
slope = (R + 1.) / (L + 1.)
if S < M or S < A * np.sqrt(y[x]) or (1. / r <= slope <= r):
return S / (2. * length + 1)
print 'Not found for x = %d!' % x
return y[x]
y_out = y.copy()
for x in range(len(E)):
try:
len_0 = int(energy_to_channel(f * FWHM(E[x]), E[0], E[1] - E[0]))
y_out[x] = _reduce(x, len_0)
except IndexError:
pass
return y_out
# # # Peak-clipping
def snip(E, y, FWHM=FWHM, offset=0., gain=10., **kwargs):
det = kwargs.get('detector')
loops = kwargs.get('loops', 24)
end_loops = kwargs.get('end_loops', 8)
reduce_factor = kwargs.get('reduce_factor', np.sqrt(2))
factor = kwargs.get('factor', 2)
if det is not None:
FWHM = det.response.FWHM
offset = det.channel.offset
gain = det.channel.gain
def G(y):
return np.log(np.log(y + 1) + 1)
def w(x, factor=2):
return energy_to_channel(factor * FWHM(E[x]), offset=offset, gain=gain)
def G_inv(z):
return np.exp(np.exp(z) - 1) - 1
z_out = G(y)
for i in range(loops):
if i >= loops - end_loops:
factor /= 1. * reduce_factor
z = z_out.copy()
for x in range(len(E)):
try:
_w = w(x, factor=factor)
if _w > x:
raise IndexError
z_bar = (z[x + _w] + z[x - _w]) / 2.
z_out[x] = min(z[x], z_bar)
except IndexError:
pass
return G_inv(z_out)
| mit | 2,221,596,909,784,448,800 | 25.537634 | 79 | 0.480146 | false |
wesm/statsmodels | scikits/statsmodels/sandbox/tsa/examples/ex_mle_garch.py | 1 | 10649 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 01:01:50 2010
Author: josef-pktd
latest result
-------------
all are very close
garch0 has different parameterization of constant
ordering of parameters is different
seed 2780185
h.shape (2000,)
Optimization terminated successfully.
Current function value: 2093.813397
Iterations: 387
Function evaluations: 676
ggres.params [-0.6146253 0.1914537 0.01039355 0.78802188]
Optimization terminated successfully.
Current function value: 2093.972953
Iterations: 201
Function evaluations: 372
ggres0.params [-0.61537527 0.19635128 4.00706058]
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2093.972953
Iterations: 51
Function evaluations: 551
Gradient evaluations: 110
ggres0.params [-0.61537855 0.19635265 4.00694669]
Optimization terminated successfully.
Current function value: 2093.751420
Iterations: 103
Function evaluations: 187
[ 0.78671519 0.19692222 0.61457171]
-2093.75141963
Final Estimate:
LLH: 2093.750 norm LLH: 2.093750
omega alpha1 beta1
0.7867438 0.1970437 0.6145467
long run variance comparison
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
R
>>> 0.7867438/(1- 0.1970437- 0.6145467)
4.1757097302897526
Garch (gjr) asymetric, longrun var ?
>>> 1/(1-0.6146253 - 0.1914537 - 0.01039355) * 0.78802188
4.2937548579245242
>>> 1/(1-0.6146253 - 0.1914537 + 0.01039355) * 0.78802188
3.8569053452140345
Garch0
>>> (1-0.61537855 - 0.19635265) * 4.00694669
0.7543830449902722
>>> errgjr4.var() #for different random seed
4.0924199964716106
todo: add code and verify, check for longer lagpolys
"""
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import numdifftools as ndt
import scikits.statsmodels.api as sm
from scikits.statsmodels.sandbox import tsa
from scikits.statsmodels.sandbox.tsa.garch import * # local import
nobs = 1000
examples = ['garch', 'rpyfit']
if 'garch' in examples:
err,h = generate_kindofgarch(nobs, [1.0, -0.95], [1.0, 0.1], mu=0.5)
plt.figure()
plt.subplot(211)
plt.plot(err)
plt.subplot(212)
plt.plot(h)
#plt.show()
seed = 3842774 #91234 #8837708
seed = np.random.randint(9999999)
print 'seed', seed
np.random.seed(seed)
ar1 = -0.9
err,h = generate_garch(nobs, [1.0, ar1], [1.0, 0.50], mu=0.0,scale=0.1)
# plt.figure()
# plt.subplot(211)
# plt.plot(err)
# plt.subplot(212)
# plt.plot(h)
# plt.figure()
# plt.subplot(211)
# plt.plot(err[-400:])
# plt.subplot(212)
# plt.plot(h[-400:])
#plt.show()
garchplot(err, h)
garchplot(err[-400:], h[-400:])
np.random.seed(seed)
errgjr,hgjr, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.5,0]], mu=0.0,scale=0.1)
garchplot(errgjr[:nobs], hgjr[:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
garchplot(errgjr[-400:nobs], hgjr[-400:nobs], 'GJR-GARCH(1,1) Simulation - symmetric')
np.random.seed(seed)
errgjr2,hgjr2, etax = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr2[:nobs], hgjr2[:nobs], 'GJR-GARCH(1,1) Simulation')
garchplot(errgjr2[-400:nobs], hgjr2[-400:nobs], 'GJR-GARCH(1,1) Simulation')
np.random.seed(seed)
errgjr3,hgjr3, etax3 = generate_gjrgarch(nobs, [1.0, ar1],
[[1,0],[0.1,0.9],[0.1,0.9],[0.1,0.9]], mu=0.0,scale=0.1)
garchplot(errgjr3[:nobs], hgjr3[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr3[-400:nobs], hgjr3[-400:nobs], 'GJR-GARCH(1,3) Simulation')
np.random.seed(seed)
errgjr4,hgjr4, etax4 = generate_gjrgarch(nobs, [1.0, ar1],
[[1., 1,0],[0, 0.1,0.9],[0, 0.1,0.9],[0, 0.1,0.9]],
mu=0.0,scale=0.1)
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation')
garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
varinno = np.zeros(100)
varinno[0] = 1.
errgjr5,hgjr5, etax5 = generate_gjrgarch(100, [1.0, -0.],
[[1., 1,0],[0, 0.1,0.8],[0, 0.05,0.7],[0, 0.01,0.6]],
mu=0.0,scale=0.1, varinnovation=varinno)
garchplot(errgjr5[:20], hgjr5[:20], 'GJR-GARCH(1,3) Simulation')
#garchplot(errgjr4[-400:nobs], hgjr4[-400:nobs], 'GJR-GARCH(1,3) Simulation')
#plt.show()
seed = np.random.randint(9999999) # 9188410
print 'seed', seed
x = np.arange(20).reshape(10,2)
x3 = np.column_stack((np.ones((x.shape[0],1)),x))
y, inp = miso_lfilter([1., 0],np.array([[-2.0,3,1],[0.0,0.0,0]]),x3)
nobs = 1000
warmup = 1000
np.random.seed(seed)
ar = [1.0, -0.7]#7, -0.16, -0.1]
#ma = [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]]
ma = [[1., 0, 0],[0, 0.8,0.0]] #,[0, 0.9,0.0]]
# errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, [1.0, -0.99],
# [[1., 1, 0],[0, 0.6,0.1],[0, 0.1,0.1],[0, 0.1,0.1]],
# mu=0.2, scale=0.25)
errgjr4,hgjr4, etax4 = generate_gjrgarch(warmup+nobs, ar, ma,
mu=0.4, scale=1.01)
errgjr4,hgjr4, etax4 = errgjr4[warmup:], hgjr4[warmup:], etax4[warmup:]
garchplot(errgjr4[:nobs], hgjr4[:nobs], 'GJR-GARCH(1,3) Simulation - DGP')
ggmod = Garch(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print 'ggres.params', ggres.params
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print 'ggres0.params', ggres0.params
ggmod0 = Garch0(errgjr4-errgjr4.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.6, 0.2, 0.1])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, method='bfgs', maxiter=2000)
print 'ggres0.params', ggres0.params
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, errgjr4-errgjr4.mean())[0], [0.93, 0.9, 0.2])
print g11res
llf = loglike_GARCH11(g11res, errgjr4-errgjr4.mean())
print llf[0]
if 'rpyfit' in examples:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = errgjr4-errgjr4.mean(), include_mean=False)
if 'rpysim' in examples:
from rpy import r
f = r.formula('~garch(1, 1)')
#fit = r.garchFit(f, data = errgjr4)
x = r.garchSim( n = 500)
print 'R acf', tsa.acf(np.power(x,2))[:15]
arma3 = Arma(np.power(x,2))
arma3res = arma3.fit(start_params=[-0.2,0.1,0.5],maxiter=5000)
print arma3res.params
arma3b = Arma(np.power(x,2))
arma3bres = arma3b.fit(start_params=[-0.2,0.1,0.5],maxiter=5000, method='bfgs')
print arma3bres.params
xr = r.garchSim( n = 100)
x = np.asarray(xr)
ggmod = Garch(x-x.mean())
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.6, 0.1, 0.2, 0.0])
ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
print 'ggres.params', ggres.params
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, x-x.mean())[0], [0.6, 0.6, 0.2])
print g11res
llf = loglike_GARCH11(g11res, x-x.mean())
print llf[0]
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
fit = r.garchFit(f, data = x-x.mean(), include_mean=False, trace=False)
print r.summary(fit)
'''based on R default simulation
model = list(omega = 1e-06, alpha = 0.1, beta = 0.8)
nobs = 1000
(with nobs=500, gjrgarch doesn't do well
>>> ggres = ggmod.fit(start_params=np.array([-0.6, 0.1, 0.2, 0.0]), maxiter=1000)
Optimization terminated successfully.
Current function value: -448.861335
Iterations: 385
Function evaluations: 690
>>> print 'ggres.params', ggres.params
ggres.params [ -7.75090330e-01 1.57714749e-01 -9.60223930e-02 8.76021411e-07]
rearranged
8.76021411e-07 1.57714749e-01(-9.60223930e-02) 7.75090330e-01
>>> print g11res
[ 2.97459808e-06 7.83128600e-01 2.41110860e-01]
>>> llf = loglike_GARCH11(g11res, x-x.mean())
>>> print llf[0]
442.603541936
Log Likelihood:
-448.9376 normalized: -4.489376
omega alpha1 beta1
1.01632e-06 1.02802e-01 7.57537e-01
'''
''' the following is for errgjr4-errgjr4.mean()
ggres.params [-0.54510407 0.22723132 0.06482633 0.82325803]
Final Estimate:
LLH: 2065.56 norm LLH: 2.06556
mu omega alpha1 beta1
0.07229732 0.83069480 0.26313883 0.53986167
ggres.params [-0.50779163 0.2236606 0.00700036 1.154832
Final Estimate:
LLH: 2116.084 norm LLH: 2.116084
mu omega alpha1 beta1
-4.759227e-17 1.145404e+00 2.288348e-01 5.085949e-01
run3
DGP
0.4/?? 0.8 0.7
gjrgarch:
ggres.params [-0.45196579 0.2569641 0.02201904 1.11942636]
rearranged
const/omega ma1/alpha1 ar1/beta1
1.11942636 0.2569641(+0.02201904) 0.45196579
g11:
[ 1.10262688 0.26680468 0.45724957]
-2055.73912687
R:
Final Estimate:
LLH: 2055.738 norm LLH: 2.055738
mu omega alpha1 beta1
-1.665226e-17 1.102396e+00 2.668712e-01 4.573224e-01
fit = r.garchFit(f, data = errgjr4-errgjr4.mean())
rpy.RPy_RException: Error in solve.default(fit$hessian) :
Lapack routine dgesv: system is exactly singular
run4
DGP:
mu=0.4, scale=1.01
ma = [[1., 0, 0],[0, 0.8,0.0]], ar = [1.0, -0.7]
maybe something wrong with simulation
gjrgarch
ggres.params [-0.50554663 0.24449867 -0.00521004 1.00796791]
rearranged
1.00796791 0.24449867(-0.00521004) 0.50554663
garch11:
[ 1.01258264 0.24149155 0.50479994]
-2056.3877404
R include_constant=False
Final Estimate:
LLH: 2056.397 norm LLH: 2.056397
omega alpha1 beta1
1.0123560 0.2409589 0.5049154
'''
erro,ho, etaxo = generate_gjrgarch(20, ar, ma, mu=0.04, scale=0.01,
varinnovation = np.ones(20))
if 'sp500' in examples:
import tabular as tb
import scikits.timeseries as ts
a = tb.loadSV(r'C:\Josef\work-oth\gspc_table.csv')
s = ts.time_series(a[0]['Close'][::-1],
dates=ts.date_array(a[0]['Date'][::-1],freq="D"))
sp500 = a[0]['Close'][::-1]
sp500r = np.diff(np.log(sp500))
#plt.show()
| bsd-3-clause | -5,336,992,565,001,653,000 | 31.269697 | 108 | 0.6302 | false |
JCardenasRdz/Machine-Learning-4-MRI | Infection_vs_Inflammation/Code/Process_Data.py | 1 | 2713 | # Import Modules as needed
import numpy as np
#import seaborn as sn
import pandas as pd
from pylab import *
from mylocal_functions import *
# ======== T2 MSME============= #
# Make list of all T2.txt files
T2_list = get_ipython().getoutput('ls ../Study_03_CBA/*T2.txt')
# Allocate variables needed for analysis
T2DF=pd.DataFrame()
TR=np.linspace(.012,.012*12,12)
# Fit T2 and construct dataframe
for names in T2_list:
#Convert txt file to array
YDataMatrix=txt_2_array(names)
#Estimate T2
T2time=fitT2(TR,YDataMatrix)
#convert to data frame
df_T2=pd.DataFrame(T2time.T,columns=["Infected","Healthy_R","St_Inf","Healthy_L"])
#df_T2=pd.DataFrame(T2time.T,columns=["ROI-1","ROI-2","ROI-3","ROI-4"])
df_info=name_2_df(names)
df_final=pd.concat([df_T2,df_info], axis=1)
T2DF=T2DF.append(df_final,ignore_index=True)
# Plot T2 Density ROIs 1 and 2
#T2DF[T2DF.Slice==1].iloc[:,:4].plot.density(); title("Slice 01"); xlim((0.025,.15))
#T2DF[T2DF.Slice==2].iloc[:,:4].plot.density(); title("Slice 02"); xlim((0.025,.15))
#T2DF[T2DF.Slice==3].iloc[:,:4].plot.density(); title("Slice 03"); xlim((0.025,.15))
#T2DF[T2DF.Slice==4].iloc[:,:4].plot.density(); title("Slice 04"); xlim((0.025,.15))
#T2DF[T2DF.Slice==5].iloc[:,:4].plot.density(); title("Slice 05"); xlim((0.025,.15))
# ======== CEST============= #
# Make list of all T2.txt files
CEST_list=get_ipython().getoutput('ls ../Study_03_CBA/*CEST.txt')
CEST_DF=pd.DataFrame()
Z=np.zeros((4,110))
def normalize_data(DataMatrix):
rows,cols = DataMatrix.shape
newData = np.zeros_like(DataMatrix)
for row in range(rows):
newData[row,:]=DataMatrix[row,:]/DataMatrix[row,8]
return newData
for names in CEST_list:
#Convert txt file to array
D=txt_2_array(names);
Zn=normalize_data(D.T)
Z=np.concatenate((Z,Zn))
Z=Z[4::,9::]
# define offsets in ppm
a1=np.linspace(-55,-50,9)
ppm=np.linspace(-8,8,101)
full_ppm = np.concatenate((a1, ppm))
# fit CEST data.
y=Z[12,:]
p=fit_L2_scale(ppm,y)
Yhat=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
plt.figure(figsize=(10,6))
plt.plot(ppm,y,'o',label='Signal');
plt.plot(ppm,1-Yhat,'-',label='Fit');
plt.legend()
## ====== BUILD CEST Predictors ======== #####
CEST_predictors=np.zeros_like(Z)
rows,cols = CEST_predictors.shape
Tissue_Class=np.zeros((4,rows))
for i in range(rows):
p=fit_L2_scale(ppm,Z[i,:])
CEST_predictors[i,:]=Lscale(ppm,p[0],p[1],p[2],p[3],p[4],p[5],p[6]);
Tissue_Class=np.zeros((64,1))
for i in range(4):
Tissue_Class[i::4]=i
CEST_Dataframe=pd.DataFrame(CEST_predictors)
CEST_Dataframe["Tissue_Class"]=Tissue_Class
pd.DataFrame.to_csv(CEST_Dataframe,"CEST_infections.csv",header=True,index=False)
| mit | 3,666,962,277,034,696,000 | 28.172043 | 86 | 0.647991 | false |
s-tar/just-a-chat | modules/chat/chat.py | 1 | 5141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mr.S'
from kernel.module import Module
from kernel.server import app
from bottle import jinja2_template as template, request
from entities.s_chat import Chat
from entities.s_message import Message
from kernel.validator import Validator
from kernel.socket import Rooms
import opengraph
import urlparse
import kernel.widget
module = Module('chat', route="/chat")
@app.route('/')
@app.route('/chat/<chat_id:int>')
def main(chat_id=None):
user = request.user.get()
if user:
chats = request.db(Chat).get_all_by_user(user)
current_chat = request.db(Chat).get_by_id(chat_id) if chat_id else None
return template('page', {'content': template('chat/main', {
'chats': chats,
'chat_id': chat_id,
'current_chat': current_chat,
'current_is_new': current_chat and current_chat not in chats
})})
return template('page', {'content': template('index')})
@module.post('/new')
def new_chat():
user = request.user.get()
if user:
data = request.forms
v = Validator(data)
v.field("chat_name").required(message='Назовите как-то чат')
if v.is_valid():
data = v.valid_data
chat = Chat()
chat.name = data.get("chat_name")
chat.members.append(user)
request.db.add(chat)
request.db.flush()
request.db.commit()
Rooms.get('user.'+str(user.usr_id)).emit('chat.new', {
'chat': chat.as_dict(),
'chat_item': kernel.widget.get('chat.item', {'chat': chat})})
return {"status": "ok"}
return {"status": "fail", "errors": v.errors}
@module.post('/<chat_id:int>/join')
def join_chat(chat_id=None):
user = request.user.get()
if user:
chat = request.db(Chat).get_by_id(chat_id)
if chat and user not in chat.members:
chat.members.append(user)
request.db.add(chat)
request.db.commit()
Rooms.get('user.'+str(user.usr_id)).emit('chat.join', { "chat": chat.as_dict(),
"chat_item": kernel.widget.get('chat.item', {'chat': chat})})
new_message(request.db, chat, '%s %s присоединяется к чату.' % (user.usr_firstname, user.usr_lastname), user, True)
return {"status": "ok",
"chat": chat.as_dict(),
"chat_item": kernel.widget.get('chat.item', {'chat': chat}),
"messages": kernel.widget.get('chat.messages', {'chat_id': chat.id})
}
return {"status": "fail"}
@module.post('/<chat_id:int>/leave')
def leave_chat(chat_id=None):
user = request.user.get()
if user:
chat = request.db(Chat).get_by_id(chat_id)
if chat:
chat.members.remove(user)
if len(chat.members) == 0:
chat.deleted = True
request.db.add(chat)
request.db.commit()
new_message(request.db, chat, '%s %s покидает чат.' % (user.usr_firstname, user.usr_lastname), user, True)
return {"status": "ok"}
return {"status": "fail"}
@module.post('/new_message')
def new_message_route():
user = request.user.get()
if user:
data = request.forms
v = Validator(data)
v.field("chat_id").integer()
if v.valid_data.get('chat_id'):
data = v.valid_data
chat = request.db(Chat).get_by_id(data.get('chat_id'))
if chat:
text = data.get('message').strip()
new_message(request.db, chat, text, user)
return {"status": "ok"}
return {"status": "fail"}
@module.post('/search')
def search():
user = request.user.get()
text = request.forms.get('text')
chats = request.db().query(Chat).filter(Chat.deleted == False, Chat.name.ilike(text.strip()+'%'), ~Chat.members.contains(user)).all()
return {
'chats': [c.as_dict() for c in chats],
'chat_items': [kernel.widget.get('chat.item', {'chat': chat}) for chat in chats]
}
def to_url(text):
text = 'http://'+text if text.startswith('www.') else text
return text if text.startswith('http://') or text.startswith('https://') else None
def new_message(db, chat, text, user, system=False):
data = None
url = to_url(text)
if url:
try:
og = opengraph.OpenGraph(url=url)
text = url
data = str(og if og.is_valid() else {})
except:
data = str({})
message = Message()
message.chat = chat
message.text = text
message.data = data
message.sender = user
message.is_system = system
chat.messages.append(message)
db.add(chat)
db.flush()
db.commit()
for member in chat.members:
Rooms.get('user.'+str(member.usr_id)).emit('chat.new_message', {
'is_sender': member.usr_id == user.usr_id,
'message': message.as_dict(),
'message_item': kernel.widget.get('chat.message', {'message': message})}) | mit | -6,152,623,473,492,803,000 | 32.090909 | 137 | 0.564475 | false |
BeataBak/project-euler-problems | 008.py | 1 | 3404 | """
Project Euler Problem 8
=======================
Find the greatest product of thirteen consecutive digits in the 1000-digit
number.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
"""
from functools import reduce
BIG_CUBE = ''.join("""
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
""".split())
def scoper(s, width, pos=0):
"""
Takes a string and a width, and yields consecutive chunks of `s`
to the size of `width` until we hit the end of `s`.
"""
while True:
yield s[pos:pos + width]
if pos + width == len(s):
break
pos += 1
def product_of_string(s):
"""
Takes a string containing integers and returns the product.
"""
return reduce(lambda x, y: x * y, [int(i) for i in s])
def main(length=13):
return max([product_of_string(s) for s in scoper(BIG_CUBE, length)])
def test_scoper():
assert list(scoper('Beata', 2)) == ['Be', 'ea', 'at', 'ta']
assert list(scoper('Beata', 3)) == ['Bea', 'eat', 'ata']
def test_product_of_string():
assert product_of_string('245') == 40
def test_main():
assert main(4) == 5832
print(main())
| mit | 7,358,781,582,324,919,000 | 35.602151 | 74 | 0.765864 | false |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/db/versions/v1_0_0/domain/log.py | 1 | 3092 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from auto_gen import DBLog as _DBLog
from auto_gen import DBAbstraction, DBModule, DBGroup, DBLoopExec, \
DBGroupExec, DBModuleExec
from id_scope import IdScope
import copy
class DBLog(_DBLog):
def __init__(self, *args, **kwargs):
_DBLog.__init__(self, *args, **kwargs)
self.id_scope = IdScope(1,
{DBLoopExec.vtType: 'item_exec',
DBModuleExec.vtType: 'item_exec',
DBGroupExec.vtType: 'item_exec',
DBAbstraction.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
def __copy__(self):
return DBLog.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBLog.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBLog
cp.id_scope = copy.copy(self.id_scope)
return cps
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBLog()
new_obj = _DBLog.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
return new_obj
def update_id_scope(self):
pass
| bsd-3-clause | -260,436,497,900,556,860 | 42.549296 | 79 | 0.635834 | false |
Yukarumya/Yukarum-Redfoxes | testing/marionette/harness/marionette_harness/tests/unit/test_mouse_action.py | 1 | 4981 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver.by import By
from marionette_driver.keys import Keys
from marionette_driver.marionette import Actions
from marionette_harness import MarionetteTestCase
class TestMouseAction(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
if self.marionette.session_capabilities["platformName"] == "darwin":
self.mod_key = Keys.META
else:
self.mod_key = Keys.CONTROL
self.action = Actions(self.marionette)
def test_click_action(self):
test_html = self.marionette.absolute_url("test.html")
self.marionette.navigate(test_html)
link = self.marionette.find_element(By.ID, "mozLink")
self.action.click(link).perform()
self.assertEqual("Clicked", self.marionette.execute_script(
"return document.getElementById('mozLink').innerHTML"))
def test_clicking_element_out_of_view_succeeds(self):
# The action based click doesn"t check for visibility.
test_html = self.marionette.absolute_url("hidden.html")
self.marionette.navigate(test_html)
el = self.marionette.find_element(By.ID, "child")
self.action.click(el).perform()
def test_double_click_action(self):
test_html = self.marionette.absolute_url("double_click.html")
self.marionette.navigate(test_html)
el = self.marionette.find_element(By.ID, "one-word-div")
self.action.double_click(el).perform()
el.send_keys(self.mod_key + "c")
rel = self.marionette.find_element(By.ID, "input-field")
rel.send_keys(self.mod_key + "v")
self.assertEqual("zyxw", rel.get_property("value"))
def test_context_click_action(self):
test_html = self.marionette.absolute_url("javascriptPage.html")
self.marionette.navigate(test_html)
click_el = self.marionette.find_element(By.ID, "resultContainer")
def context_menu_state():
with self.marionette.using_context("chrome"):
cm_el = self.marionette.find_element(By.ID, "contentAreaContextMenu")
return cm_el.get_property("state")
self.assertEqual("closed", context_menu_state())
self.action.context_click(click_el).perform()
self.wait_for_condition(lambda _: context_menu_state() == "open")
with self.marionette.using_context("chrome"):
self.marionette.find_element(By.ID, "main-window").send_keys(Keys.ESCAPE)
self.wait_for_condition(lambda _: context_menu_state() == "closed")
def test_middle_click_action(self):
test_html = self.marionette.absolute_url("clicks.html")
self.marionette.navigate(test_html)
self.marionette.find_element(By.ID, "addbuttonlistener").click()
el = self.marionette.find_element(By.ID, "showbutton")
self.action.middle_click(el).perform()
self.wait_for_condition(lambda _: el.get_property("innerHTML") == "1")
def test_chrome_click(self):
self.marionette.navigate("about:blank")
data_uri = "data:text/html,<html></html>"
with self.marionette.using_context("chrome"):
urlbar = self.marionette.find_element(By.ID, "urlbar")
urlbar.send_keys(data_uri)
go_button = self.marionette.find_element(By.ID, "urlbar-go-button")
self.action.click(go_button).perform()
self.wait_for_condition(lambda mn: mn.get_url() == data_uri)
def test_chrome_double_click(self):
self.marionette.navigate("about:blank")
test_word = "quux"
with self.marionette.using_context("chrome"):
urlbar = self.marionette.find_element(By.ID, "urlbar")
self.assertEqual("", urlbar.get_property("value"))
urlbar.send_keys(test_word)
self.assertEqual(urlbar.get_property("value"), test_word)
(self.action.double_click(urlbar).perform()
.key_down(self.mod_key)
.key_down("x").perform())
self.assertEqual(urlbar.get_property("value"), "")
def test_chrome_context_click_action(self):
self.marionette.set_context("chrome")
def context_menu_state():
cm_el = self.marionette.find_element(By.ID, "tabContextMenu")
return cm_el.get_property("state")
currtab = self.marionette.execute_script("return gBrowser.selectedTab")
self.assertEqual("closed", context_menu_state())
self.action.context_click(currtab).perform()
self.wait_for_condition(lambda _: context_menu_state() == "open")
(self.marionette.find_element(By.ID, "main-window")
.send_keys(Keys.ESCAPE))
self.wait_for_condition(lambda _: context_menu_state() == "closed")
| mpl-2.0 | 1,838,045,678,706,669,000 | 42.692982 | 85 | 0.642441 | false |
pypa/warehouse | warehouse/utils/db/windowed_query.py | 1 | 2050 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken from "Theatrum Chemicum" at
# https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/WindowedRangeQuery
from sqlalchemy import and_, func, text
def column_windows(session, column, windowsize):
"""
Return a series of WHERE clauses against a given column that break it into
windows.
Result is an iterable of tuples, consisting of ((start, end), whereclause),
where (start, end) are the ids.
Requires a database that supports window functions, i.e. Postgresql,
SQL Server, Oracle.
Enhance this yourself ! Add a "where" argument so that windows of just a
subset of rows can be computed.
"""
def int_for_range(start_id, end_id):
if end_id:
return and_(column >= start_id, column < end_id)
else:
return column >= start_id
q = session.query(
column, func.row_number().over(order_by=column).label("rownum")
).from_self(column)
if windowsize > 1:
q = q.filter(text("rownum %% %d=1" % windowsize))
intervals = [row[0] for row in q]
while intervals:
start = intervals.pop(0)
if intervals:
end = intervals[0]
else:
end = None
yield int_for_range(start, end)
def windowed_query(q, column, windowsize):
"""
Break a Query into windows on a given column.
"""
for whereclause in column_windows(q.session, column, windowsize):
for row in q.filter(whereclause).order_by(column):
yield row
| apache-2.0 | 1,703,204,328,040,316,700 | 30.060606 | 79 | 0.666829 | false |
UfSoft/ISPManCCP | extra-packages/pyperl-1.0.1d/t/apply.py | 1 | 2941 | import perl
#if (perl.MULTI_PERL):
# print "1..0"
# raise SystemExit
print "1..14"
def ok(a, b=None):
return "a=" + str(a) + ", b=" + str(b)
perl.eval("""
use Python qw(apply);
$| = 1;
sub {
my $f = shift;
# First some tests that are expected to blow up
eval {
apply($f);
};
#print $@;
# XXX For some strange reason =~ is not to force $@ to stingify, so
# I had to help it with "$@" =~.
# Hmmm, something to fix some other time :-(
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 argument \(0 given\)/;
print "ok 1\n";
eval {
apply($f, undef);
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 argument \(0 given\)/;
print "ok 2\n";
eval {
apply($f, undef, undef);
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 argument \(0 given\)/;
print "ok 3\n";
eval {
apply($f, undef, undef, undef);
};
#print $@;
print "not " unless "$@" =~ /^Too many arguments at \(eval 1\) line \d+./;
print "ok 4\n";
eval {
apply($f, [1,2,3]);
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at most 2 arguments \(3 given\)/;
print "ok 5\n";
eval {
apply($f, [], {b => 2});
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) takes at least 1 non-keyword argument \(0 given\)/;
print "ok 6\n";
eval {
apply($f, [1], {a => 2});
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) got multiple values for keyword argument 'a'/;
print "ok 7\n";
eval {
apply($f, [], {a => 2, b => 3, c => 4});
};
#print $@;
print "not " unless "$@" =~ /^python\.<type 'exceptions.TypeError'>: ok\(\) got an unexpected keyword argument 'c'/;
print "ok 8\n";
eval {
apply($f, 1);
};
#print $@;
print "not " unless "$@" =~ /^/;
print "ok 9\n";
# Then some tests that are expected to work
$res = apply($f, undef, { a => 101, b => 102 });
#print "$res\\n";
print "not " unless $res eq "a=101, b=102";
print "ok 10\n";
$res = apply($f, undef, { a => 101 });
#print "$res\\n";
print "not " unless $res eq "a=101, b=None";
print "ok 11\n";
$res = apply($f, [101, 102]);
#print "$res\\n";
print "not " unless $res eq "a=101, b=102";
print "ok 12\n";
$res = apply($f, Python::list(101, 102), Python::dict());
#print "$res\\n";
print "not " unless $res eq "a=101, b=102";
print "ok 13\n";
$res = apply($f, [], Python::dict(a => 101));
#print "$res\\n";
print "not " unless $res eq "a=101, b=None";
print "ok 14\n";
}
""")(ok)
| bsd-3-clause | -2,264,924,902,036,801,500 | 24.136752 | 131 | 0.50119 | false |
jdgillespie91/trackerSpend | data/expenditure/submit_automated_expenditure.py | 1 | 8161 | # This script adds any spend that occurs regularly on a monthly basis.
import datetime
import gspread
import json
import logging
import os
import requests
import smtplib
import sys
from configs import config
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from oauth2client.client import SignedJwtAssertionCredentials
class Script:
def __init__(self):
self.today = datetime.datetime.today()
self.directory = os.path.dirname(__file__)
self.filename = os.path.splitext(os.path.basename(__file__))[0]
self.path = os.path.join(self.directory, self.filename)
class Flag(Script):
def __init__(self, entry):
Script.__init__(self) # Change this to super if more pythonic.
self.today = self.today.strftime('%Y-%m-%d')
self.directory = os.path.join(self.directory, 'flags')
self.filename = '{0}_{1}.flag'.format(entry.category, self.today)
self.path = os.path.join(self.directory, self.filename)
def exists(self):
if os.path.isfile(self.path):
return True
else:
return False
def touch(self):
open(self.path, 'w').close()
def untouch(self):
os.remove(self.path)
class Entry:
def __init__(self, amount, category, peer_pressure, notes, frequency, due_date, active):
self.amount = amount
self.category = category
self.peer_pressure = peer_pressure
self.notes = notes
self.frequency = frequency
self.due_date = due_date
self.active = active
class Form:
def __init__(self, entry):
self.amount = entry.amount
self.category = entry.category
self.peer_pressure = entry.peer_pressure
self.notes = entry.notes
self.conf = config.Config('expenditure_form')
self.submission = {'entry.1788911046': self.amount,
'entry.22851461': '__other_option__',
'entry.22851461.other_option_response': self.category,
'entry.2106932303': self.peer_pressure,
'entry.1728679999': self.notes}
self.response_code = None
def submit(self):
response = requests.post(self.conf.url, self.submission)
self.response_code = response.status_code
def email(self, success):
# The following code is based on
# http://stackoverflow.com/questions/778202/smtplib-and-gmail-python-script-problems
# http://en.wikibooks.org/wiki/Python_Programming/Email
# I need to troubleshoot and test for errors.
message = MIMEMultipart()
message['From'] = self.conf.sender
message['To'] = self.conf.recipient
message['Subject'] = 'Expenditure Submission Update (Automated Email)'
if success:
body = 'The following entry has been submitted.\n\nAmount: {0}\nCategory: {1}\nPeer pressure: {2}\n' \
'Notes: {3}\n'.format(self.amount, self.category, self.peer_pressure, self.notes)
else:
body = 'The following entry failed submission.\n\nAmount: {0}\nCategory: {1}\nPeer pressure: {2}\n' \
'Notes: {3}\n'.format(self.amount, self.category, self.peer_pressure, self.notes)
message.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(self.conf.username, self.conf.password)
server.sendmail(self.conf.sender, self.conf.recipient, message.as_string())
server.close()
# Initialise the Entry class based on a list row.
def create_entry(row):
category = row[1]
peer_pressure = row[2]
notes = row[3]
frequency = row[4]
active = True if row[6] == 'Yes' else False
# We assign zero to both amount and due_date if either are invalid types. We do this silently because the email
# confirmation will contain the details of the submission and highlight any issues that need to be addressed.
try:
amount = float(row[0])
due_date = int(row[5])
except (TypeError, ValueError):
amount = 0
due_date = 0
entry = Entry(amount, category, peer_pressure, notes, frequency, due_date, active)
return entry
def create_logger(script):
today = script.today.strftime('%Y-%m-%d_%H:%M:%S')
directory = os.path.join(script.directory, 'logs')
filename = '{0}_{1}.log'.format(script.filename, today)
path = os.path.join(directory, filename)
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
# Add file handler to logger.
file_handler = logging.FileHandler(path)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('Log file created: {0}\n'.format(path))
# Add smtp handler to logger.
# smtp_handler = logging.handlers.SMTPHandler(... # Complete this
# logger.debug('SMTP functionality configured.')
return logger
def parse_entries_sheet():
conf = config.Config('expenditure_entries')
json_key = json.load(open(conf.key))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], bytes(json_key['private_key'], 'UTF-8'), scope)
session = gspread.authorize(credentials)
workbook = session.open_by_key(conf.workbook)
worksheet = workbook.worksheet(conf.worksheet)
# Parse row-by-row until an empty row is encountered (data starts on second row).
row_index = 2
entries = []
while worksheet.row_values(row_index) and row_index <= worksheet.row_count:
row = worksheet.row_values(row_index)
entry = create_entry(row)
entries.append(entry)
row_index += 1
return entries
if __name__ == '__main__':
script = Script()
logger = create_logger(script)
logger.info('Processing entries sheet.')
entries = parse_entries_sheet()
logger.info('Entries sheet processed.\n')
for entry in entries:
logger.info('Processing entry: {0}.'.format(entry.category))
if entry.active:
logger.info('Entry is active. Continuing...')
flag = Flag(entry)
if not flag.exists():
logger.info('The flag file does not exist. Touching...')
flag.touch()
if entry.frequency == 'Monthly':
if entry.due_date == script.today.day: # Think about introducing a "today" variable. I don't think it's logical to include "today" in the Script class.
logger.info('An entry is required. Submitting...')
form = Form(entry)
form.submit()
if form.response_code == requests.codes.ok: # Have this as try: form.submit() as opposed to if/else (will read better).
logger.info('The submission was accepted. Moving to next entry.\n')
form.email(success=True)
else:
logger.info('The submission was not accepted. '
'Removing flag file and moving to next entry.\n')
form.email(success=False)
flag.untouch()
else:
logger.info('A submission is not required today. '
'Removing flag file and moving to next entry.\n'.format(entry.frequency))
flag.untouch()
else:
logger.info('{0} spend is not yet implemented. '
'Removing flag file and moving to next entry.\n'.format(entry.frequency))
flag.untouch()
continue
else:
logger.info('The flag file exists. Moving to next entry.\n')
else:
logger.info('Entry is inactive. Moving to next entry.\n')
logger.info('End of script.')
sys.exit(0)
| mit | -8,319,522,157,626,210,000 | 36.782407 | 172 | 0.603357 | false |
a710128/Lesson9 | API/course.py | 1 | 3339 | import re
class CourseException(Exception):
def __init__(self, msg, err):
super(CourseException, self).__init__()
self.msg = msg
self.err = err
def __str__(self):
return "CourseError : " + self.msg
def __repr__(self):
return '<CourseException msg : "%s", errcode : %d>' % (self.msg, self.errcode)
def courseTimeParser(timeStr):
assert isinstance(timeStr, str), "Parameter type error"
timeStr, _ = re.subn('\([^)]*\)', '', timeStr)
ret = []
for item in timeStr.split(","):
if item == '':
continue
ws, pd = item.split('-')
ret.append((int(ws), int(pd)))
return ret
class Course:
def __init__(self, **kwargs):
if 'kch' in kwargs and 'kxh' in kwargs:
self.kch = kwargs['kch']
self.kxh = kwargs['kxh']
elif 'kid' in kwargs:
vs = kwargs['kid'].split(':')
if len(vs) != 2:
raise CourseException("Wrong Course id parameter", 0)
self.kch = vs[0]
self.kxh = vs[1]
else:
raise CourseException("Invalid parameters when Course __init__!", 1)
self.name = ''
self.teacher = ''
self.time = []
self.score = 0
self.feature = ''
self.other = ''
params = {
'name': 'Unknown',
'teacher': 'Unknown',
'time': [],
'score': 0,
'feature': '',
'other': ''
}
for key in params:
if key in kwargs:
if isinstance(kwargs[key], type(params[key])):
self.__dict__[key] = kwargs[key]
else:
raise CourseException("Invalid parameters when Course __init__!", 1)
else:
self.__dict__[key] = params[key]
for item in self.time:
if (not isinstance(item, tuple)) or len(item) != 2 or (not isinstance(item[0], int)) or (not isinstance(item[1], int)):
raise CourseException("Invalid parameters when Course __init__!", 1)
def __eq__(self, other):
if self.kxh == '*' or other.kxh == '*':
return self.kch == other.kch
return self.kch == other.kch and self.kxh == other.kxh
def timeClash(self, other):
if isinstance(other, tuple):
for time in self.time:
if time == other:
return True
return False
elif isinstance(other, Course):
for time in self.time:
if other.timeClash(time):
return True
return False
else:
raise CourseException("Invalid parameters when Course timeClash!", 2)
def __str__(self):
ret = 'Course: %s:%s; Time : ' % (self.kch, self.kxh)
first = True
for wk, pd in self.time:
if first:
first = False
else:
ret += ','
ret += '%d-%d' % (wk, pd)
ret += '; Name: %s; Teacher: %s; Score: %d; Feature: %s; Other: %s' % (self.name, self.teacher, self.score, self.feature, self.other)
return ret
def __repr__(self):
return "<" + self.__str__() + ">"
def __hash__(self):
return hash(self.kch + ":" + self.kxh)
| mit | 2,873,179,843,956,898,300 | 29.633028 | 141 | 0.48248 | false |
robwarm/gpaw-symm | gpaw/cluster.py | 1 | 6122 | """Extensions to the ase Atoms class
"""
import numpy as np
from ase import Atoms
from ase.io import read, write
from ase.data import covalent_radii
from ase.calculators.neighborlist import NeighborList
class Cluster(Atoms):
"""A class for cluster structures
to enable simplified manipulation"""
def __init__(self, *args, **kwargs):
self.data = {}
if len(args) > 0:
filename = args[0]
if isinstance(filename, str):
self.read(filename, kwargs.get('filetype'))
return
else:
Atoms.__init__(self, [])
if kwargs.get('filename') is not None:
filename = kwargs.pop('filename')
Atoms.__init__(self, *args, **kwargs)
self.read(filename, kwargs.get('filetype'))
else:
Atoms.__init__(self, *args, **kwargs)
def extreme_positions(self):
"""get the extreme positions of the structure"""
pos = self.get_positions()
return np.array([np.minimum.reduce(pos), np.maximum.reduce(pos)])
def find_connected(self, index, dmax=None, scale=1.5):
"""Find the atoms connected to self[index] and return them.
If dmax is not None:
Atoms are defined to be connected if they are nearer than dmax
to each other.
If dmax is None:
Atoms are defined to be connected if they are nearer than the
sum of their covalent radii * scale to each other.
"""
# set neighbor lists
neighborlist = []
if dmax is None:
# define neighbors according to covalent radii
radii = scale * covalent_radii[self.get_atomic_numbers()]
for atom in self:
positions = self.positions - atom.position
distances = np.sqrt(np.sum(positions**2, axis=1))
radius = scale * covalent_radii[atom.number]
neighborlist.append(np.where(distances < radii + radius)[0])
else:
# define neighbors according to distance
nl = NeighborList([0.5 * dmax] * len(self), skin=0)
nl.update(self)
for i, atom in enumerate(self):
neighborlist.append(list(nl.get_neighbors(i)[0]))
connected = list(neighborlist[index])
isolated = False
while not isolated:
isolated = True
for i in connected:
for j in neighborlist[i]:
if j in connected:
pass
else:
connected.append(j)
isolated = False
atoms = Cluster()
for i in connected:
atoms.append(self[i])
return atoms
def minimal_box(self, border=0, h=None, multiple=4):
"""The box needed to fit the structure in.
The structure is moved to fit into the box [(0,x),(0,y),(0,z)]
with x,y,z > 0 (fitting the ASE constriction).
The border argument can be used to add a border of empty space
around the structure.
If h is set, the box is extended to ensure that box/h is
a multiple of 'multiple'.
This ensures that GPAW uses the desired h.
The shift applied to the structure is returned.
"""
if len(self) == 0:
return None
extr = self.extreme_positions()
# add borders
if type(border)==type([]):
b = border
else:
b = [border, border, border]
for c in range(3):
extr[0][c] -= b[c]
extr[1][c] += b[c] - extr[0][c] # shifted already
# check for multiple of 4
if h is not None:
if not hasattr(h, '__len__'):
h = np.array([h, h, h])
for c in range(3):
# apply the same as in paw.py
L = extr[1][c] # shifted already
N = np.ceil(L / h[c] / multiple) * multiple
# correct L
dL = N * h[c] - L
# move accordingly
extr[1][c] += dL # shifted already
extr[0][c] -= dL / 2.
# move lower corner to (0, 0, 0)
shift = tuple(-1. * np.array(extr[0]))
self.translate(shift)
self.set_cell(tuple(extr[1]))
return shift
def get(self, name):
"""General get"""
attr = 'get_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
elif self.data.has_key(name):
return self.data[name]
else:
return None
def set(self, name, data):
"""General set"""
attr = 'set_' + name
if hasattr(self, attr):
getattr(self, attr)(data)
else:
self.data[name] = data
def read(self, filename, format=None):
"""Read the structure from some file. The type can be given
or it will be guessed from the filename."""
self.__init__(read(filename, format=format))
return len(self)
def write(self, filename=None, format=None, repeat=None):
"""Write the structure to file.
Parameters
----------
format: string
can be given or it will be guessed from the filename
repeat: array, eg.: [1,0,1]
can be used to repeat the structure
"""
if filename is None:
if format is None:
raise RuntimeError('Please specify either filename or format.')
else:
filename = self.get_name() + '.' + format
out = self
if repeat is None:
out = self
else:
out = Cluster([])
cell = self.get_cell().diagonal()
for i in range(repeat[0] + 1):
for j in range(repeat[1] + 1):
for k in range(repeat[2] + 1):
copy = self.copy()
copy.translate(np.array([i, j, k]) * cell)
out += copy
write(filename, out, format)
| gpl-3.0 | -2,736,821,158,500,044,300 | 30.556701 | 79 | 0.516335 | false |
cggh/DQXServer | responders/recordinfo.py | 1 | 1698 | # This file is part of DQXServer - (C) Copyright 2014, Paul Vauterin, Ben Jeffery, Alistair Miles <[email protected]>
# This program is free software licensed under the GNU Affero General Public License.
# You can find a copy of this license in LICENSE in the top directory of the source code or at <http://opensource.org/licenses/AGPL-3.0>
import DQXDbTools
import DQXUtils
from DQXDbTools import DBCOLESC
from DQXDbTools import DBTBESC
import config
def response(returndata):
mytablename = returndata['tbname']
encodedquery = returndata['qry']
databaseName = None
if 'database' in returndata:
databaseName = returndata['database']
with DQXDbTools.DBCursor(returndata, databaseName, read_timeout=config.TIMEOUT) as cur:
whc = DQXDbTools.WhereClause()
whc.ParameterPlaceHolder = '%s' #NOTE!: MySQL PyODDBC seems to require this nonstardard coding
whc.Decode(encodedquery)
whc.CreateSelectStatement()
sqlquery = "SELECT * FROM {0} WHERE {1}".format(
DBTBESC(mytablename),
whc.querystring_params
)
if DQXDbTools.LogRequests:
DQXUtils.LogServer('###QRY:'+sqlquery)
DQXUtils.LogServer('###PARAMS:'+str(whc.queryparams))
cur.execute(sqlquery, whc.queryparams)
therow = cur.fetchone()
if therow is None:
returndata['Error'] = 'Record not found'
else:
data={}
colnr=0
for column in cur.description:
data[column[0]] = str(therow[colnr])
colnr += 1
returndata['Data'] = data
return returndata
| agpl-3.0 | -3,628,282,154,136,333,300 | 34.913043 | 136 | 0.630153 | false |
hudora/huDjango | hudjango/management/commands/couchdb-init.py | 1 | 1234 | # encoding: utf-8
import couchdb
from optparse import make_option
from hudjango.management.couchdb.support import CouchDBBaseCommand
from django.core.management.base import CommandError
class Command(CouchDBBaseCommand):
help = """ Creates a new couchdb database. """
option_list = CouchDBBaseCommand.option_list + (
make_option('--purge', action='store_true', help='Delete existing database [default: %default]'),
)
def handle(self, *args, **options):
# get the name of the database to create
if len(args) != 1:
raise CommandError("You need to specify exactly one argument as database name")
database = args[0]
# drop a possibly existing database if the user wants us to.
couch = self._server(options)
if options['purge']:
try:
couch.delete(database)
except couchdb.client.ResourceNotFound:
pass
# then create the new database
try:
couch.create(database)
except couchdb.client.PreconditionFailed, exception:
raise CommandError("%s: %s" % (database, str(exception)))
print "database '%s' created succesfully" % database
| bsd-2-clause | -6,668,120,594,051,540,000 | 35.294118 | 105 | 0.636143 | false |
capitalone/cloud-custodian | tools/c7n_mailer/c7n_mailer/replay.py | 1 | 5458 | """
Allow local testing of mailer and templates by replaying an SQS message.
MAILER_FILE input is a file containing the exact base64-encoded, gzipped
data that's enqueued to SQS via :py:meth:`c7n.actions.Notify.send_sqs`.
Alternatively, with -p|--plain specified, the file will be assumed to be
JSON data that can be loaded directly.
"""
import argparse
import base64
import json
import logging
import os
import zlib
import yaml
import boto3
import jsonschema
from c7n_mailer.cli import CONFIG_SCHEMA
from c7n_mailer.email_delivery import EmailDelivery
from c7n_mailer.utils import setup_defaults
from c7n_mailer.utils_email import get_mimetext_message
logger = logging.getLogger(__name__)
class MailerTester:
def __init__(self, msg_file, config, msg_plain=False, json_dump_file=None):
if not os.path.exists(msg_file):
raise RuntimeError("File does not exist: %s" % msg_file)
logger.debug('Reading message from: %s', msg_file)
with open(msg_file, 'r') as fh:
raw = fh.read()
logger.debug('Read %d byte message', len(raw))
if msg_plain:
raw = raw.strip()
else:
logger.debug('base64-decoding and zlib decompressing message')
raw = zlib.decompress(base64.b64decode(raw))
if json_dump_file is not None:
with open(json_dump_file, 'wb') as fh: # pragma: no cover
fh.write(raw)
self.data = json.loads(raw)
logger.debug('Loaded message JSON')
self.config = config
self.session = boto3.Session()
def run(self, dry_run=False, print_only=False):
emd = EmailDelivery(self.config, self.session, logger)
addrs_to_msgs = emd.get_to_addrs_email_messages_map(self.data)
logger.info('Would send email to: %s', addrs_to_msgs.keys())
if print_only:
mime = get_mimetext_message(
self.config,
logger,
self.data,
self.data['resources'],
['[email protected]']
)
logger.info('Send mail with subject: "%s"', mime['Subject'])
print(mime.get_payload(None, True).decode('utf-8'))
return
if dry_run:
for to_addrs, mimetext_msg in addrs_to_msgs.items():
print('-> SEND MESSAGE TO: %s' % '; '.join(to_addrs))
print(mimetext_msg.get_payload(None, True).decode('utf-8'))
return
# else actually send the message...
for to_addrs, mimetext_msg in addrs_to_msgs.items():
logger.info('Actually sending mail to: %s', to_addrs)
emd.send_c7n_email(self.data, list(to_addrs), mimetext_msg)
def setup_parser():
parser = argparse.ArgumentParser('Test c7n-mailer templates and mail')
parser.add_argument('-c', '--config', required=True)
parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
default=False,
help='Log messages that would be sent, but do not send')
parser.add_argument('-T', '--template-print', dest='print_only',
action='store_true', default=False,
help='Just print rendered templates')
parser.add_argument('-t', '--templates', default=None, type=str,
help='message templates folder location')
parser.add_argument('-p', '--plain', dest='plain', action='store_true',
default=False,
help='Expect MESSAGE_FILE to be a plain string, '
'rather than the base64-encoded, gzipped SQS '
'message format')
parser.add_argument('-j', '--json-dump-file', dest='json_dump_file',
type=str, action='store', default=None,
help='If dump JSON of MESSAGE_FILE to this path; '
'useful to base64-decode and gunzip a message')
parser.add_argument('MESSAGE_FILE', type=str,
help='Path to SQS message dump/content file')
return parser
def session_factory(config):
return boto3.Session(
region_name=config['region'],
profile_name=config.get('profile'))
def main():
parser = setup_parser()
options = parser.parse_args()
module_dir = os.path.dirname(os.path.abspath(__file__))
default_templates = [
os.path.abspath(os.path.join(module_dir, 'msg-templates')),
os.path.abspath(os.path.join(module_dir, '..', 'msg-templates')),
os.path.abspath('.')
]
templates = options.templates
if templates:
default_templates.append(
os.path.abspath(os.path.expanduser(os.path.expandvars(templates)))
)
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format)
logging.getLogger('botocore').setLevel(logging.WARNING)
with open(options.config) as fh:
config = yaml.load(fh.read(), Loader=yaml.SafeLoader)
jsonschema.validate(config, CONFIG_SCHEMA)
setup_defaults(config)
config['templates_folders'] = default_templates
tester = MailerTester(
options.MESSAGE_FILE, config, msg_plain=options.plain,
json_dump_file=options.json_dump_file
)
tester.run(options.dry_run, options.print_only)
if __name__ == '__main__':
main()
| apache-2.0 | -7,262,002,555,862,658,000 | 37.43662 | 80 | 0.601502 | false |
dbolgheroni/rswtch | rswtch-legacy.py | 1 | 6564 | #!/usr/bin/env python2.7
#
# Copyright (c) 2016, Daniel Bolgheroni.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import cmd
import signal
import shlex
from time import sleep
from pyfirmata import Arduino, serial
from conf import Config
class Sh(cmd.Cmd):
prompt = 'rswtch> '
intro = 'type \'help\' to see available commands'
def default(self, line):
print(line + ": not found")
def do_EOF(self, line):
exit(0)
# overwrite help, since commands are simple, do not need independent
# help for each command
def do_help(self, line):
print("{0:<16} {1}".format("COMMAND", "DESCRIPTION"))
print("{0:<16} {1}".format("annotate n \"c\"", "annotate c in channel n (use quotes)"))
print("{0:<16} {1}".format("down n", "turn off the n channel"))
print("{0:<16} {1}".format("help", "this help"))
print("{0:<16} {1}".format("reset n", "turn the n channel off and on again after 2 seconds"))
print("{0:<16} {1}".format("status", "display the status of all channels, including annotations"))
print("{0:<16} {1}".format("toggle n", "turn the n channel off if its on, and vice-versa"))
print("{0:<16} {1}".format("up n", "turn on the n channel"))
### commands
# up
def do_up(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].up()
except KeyError:
print("no channel")
# down
def do_down(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].down()
except KeyError:
print("no channel")
# toggle
def do_toggle(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].toggle()
except KeyError:
print("no channel")
# reset
def do_reset(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].reset()
except KeyError:
print("no channel")
# status
def do_status(self, line):
status()
def do_annotate(self, line):
parser = shlex.shlex(line, posix=True)
c = parser.get_token()
try:
channels[c].annotation = parser.get_token()
except KeyError:
print("no channel")
# quit
def do_quit(self, line):
exit(0)
# handle ^C
@staticmethod
def handle_sigint(signum, frame):
exit(0)
class Channel():
# the relay module uses inverted logic, so
# 1 to bring pin down and 0 bring pin up
def __init__(self, pin, boardname):
self.__pin = pin
self.boardname = boardname
self.annotation = None
# up by default
self.__pin.write(0)
def up(self):
self.__pin.write(0)
def down(self):
self.__pin.write(1)
def toggle(self):
if self.__pin.read() == 0:
self.__pin.write(1)
else:
self.__pin.write(0)
def reset(self):
self.__pin.write(1)
sleep(2)
self.__pin.write(0)
@property
def status(self):
return 'up' if self.__pin.read() == 0 else 'down'
def status():
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("CH", "STATUS", "BOARD", "ANNOTATION"))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("1", ch1.status, ch1.boardname, ch1.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("2", ch2.status, ch2.boardname, ch2.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("3", ch3.status, ch3.boardname, ch3.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("4", ch4.status, ch4.boardname, ch4.annotation))
if __name__ == '__main__':
opts = argparse.ArgumentParser()
opts.add_argument("-v", action="store_true",
help="shows board Firmata firmware version")
opts.add_argument("-f",
help="specify config file")
opts.add_argument("dev", help="serial device")
args = opts.parse_args()
# init Firmata module
try:
board = Arduino(args.dev)
except serial.serialutil.SerialException:
print("could not open port {0}".format(args.dev))
exit(1)
# try to get board firmata version
# this fails most of the times
if args.v:
v = board.get_firmata_version()
try:
print("{0}.{1}".format(v[0], v[1]))
exit(0)
except (NameError, TypeError):
print("could not get board firmata version")
exit(1)
# handle configuration file
if args.f:
config = Config(args.f)
else:
config = Config()
# turn off board led
led = board.get_pin('d:13:o')
led.write(0)
# configuring pins
ch1 = Channel(board.get_pin('d:9:o'), config.get_boardname(1))
ch2 = Channel(board.get_pin('d:8:o'), config.get_boardname(2))
ch3 = Channel(board.get_pin('d:7:o'), config.get_boardname(3))
ch4 = Channel(board.get_pin('d:6:o'), config.get_boardname(4))
channels = {'1': ch1, '2': ch2, '3': ch3, '4': ch4}
# start shell
signal.signal(signal.SIGINT, Sh.handle_sigint)
Sh().cmdloop()
| bsd-2-clause | 5,560,561,793,451,545,000 | 29.530233 | 106 | 0.599939 | false |
kevgliss/lemur | lemur/tests/vectors.py | 1 | 14798 | from lemur.common.utils import parse_certificate
VALID_USER_HEADER_TOKEN = {
'Authorization': 'Basic ' + 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjE1MjE2NTIwMjIsImV4cCI6MjM4NTY1MjAyMiwic3ViIjoxfQ.uK4PZjVAs0gt6_9h2EkYkKd64nFXdOq-rHsJZzeQicc',
'Content-Type': 'application/json'
}
VALID_ADMIN_HEADER_TOKEN = {
'Authorization': 'Basic ' + 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1MjE2NTE2NjMsInN1YiI6MiwiYWlkIjoxfQ.wyf5PkQNcggLrMFqxDfzjY-GWPw_XsuWvU2GmQaC5sg',
'Content-Type': 'application/json'
}
VALID_ADMIN_API_TOKEN = {
'Authorization': 'Basic ' + 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjIsImFpZCI6MSwiaWF0IjoxNDM1MjMzMzY5fQ.umW0I_oh4MVZ2qrClzj9SfYnQl6cd0HGzh9EwkDW60I',
'Content-Type': 'application/json'
}
INTERNAL_VALID_LONG_STR = """
-----BEGIN CERTIFICATE-----
MIID1zCCAr+gAwIBAgIBATANBgkqhkiG9w0BAQsFADCBjDELMAkGA1UEBhMCVVMx
CzAJBgNVBAgMAkNBMRAwDgYDVQQHDAdBIHBsYWNlMRcwFQYDVQQDDA5sb25nLmxp
dmVkLmNvbTEQMA4GA1UECgwHRXhhbXBsZTETMBEGA1UECwwKT3BlcmF0aW9uczEe
MBwGCSqGSIb3DQEJARYPamltQGV4YW1wbGUuY29tMB4XDTE1MDYyNjIwMzA1MloX
DTQwMDEwMTIwMzA1MlowgYwxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEQMA4G
A1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5saXZlZC5jb20xEDAOBgNVBAoM
B0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMxHjAcBgkqhkiG9w0BCQEWD2pp
bUBleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKeg
sqb0HI10i2eRSx3pLeA7JoGdUpud7hy3bGws/1HgOSpRMin9Y65DEpVq2Ia9oir7
XOJLpSTEIulnBkgDHNOsdKVYHDR6k0gUisnIKSl2C3IgKHpCouwiOvvVPwd3PExg
17+d7KLBIu8LpG28wkXKFU8vSz5i7H4i/XCEChnKJ4oGJuGAJJM4Zn022U156pco
97aEAc9ZXR/1dm2njr4XxCXmrnKCYTElfRhLkmxtv+mCi6eV//5d12z7mY3dTBkQ
EG2xpb5DQ+ITQ8BzsKcPX80rz8rTzgYFwaV3gUg38+bgka/JGJq8HgBuNnHv5CeT
1T/EoZTRYW2oPfOgQK8CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B
Af8EBAMCAQYwHQYDVR0OBBYEFIuDY73dQIhj2nnd4DG2SvseHVVaMA0GCSqGSIb3
DQEBCwUAA4IBAQBk/WwfoWYdS0M8rz5tJda/cMdYFSugUbTn6JJdmHuw6RmiKzKG
8NzfSqBR6m8MWdSTuAZ/chsUZH9YEIjS9tAH9/FfUFBrsUE7TXaUgpNBm4DBLLfl
fj5xDmEyj17JPN/C36amQ9eU5BNesdCx9EkdWLyVJaM50HFRo71W0/FrpKZyKK68
XPhd1z9w/xgfCfYhe7PjEmrmNPN5Tgk5TyXW+UUhOepDctAv2DBetptcx+gHrtW+
Ygk1wptlt/tg7uUmstmXZA4vTPx83f4P3KSS3XHIYFIyGFWUDs23C20K6mmW1iXa
h0S8LN4iv/+vNFPNiM1z9X/SZgfbwZXrLsSi
-----END CERTIFICATE-----
"""
INTERNAL_VALID_LONG_CERT = parse_certificate(INTERNAL_VALID_LONG_STR)
INTERNAL_INVALID_STR = """
-----BEGIN CERTIFICATE-----
MIIEFTCCAv2gAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVT
MQswCQYDVQQIDAJDQTEQMA4GA1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5s
aXZlZC5jb20xEDAOBgNVBAoMB0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMx
HjAcBgkqhkiG9w0BCQEWD2ppbUBleGFtcGxlLmNvbTAeFw0xNTA2MjYyMDM2NDha
Fw0xNTA2MjcyMDM2NDhaMGkxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEQMA4G
A1UEBxMHQSBwbGFjZTEQMA4GA1UEChMHRXhhbXBsZTETMBEGA1UECxMKT3BlcmF0
aW9uczEUMBIGA1UEAxMLZXhwaXJlZC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQCcSMzRxB6+UONPqYMy1Ojw3Wi8DIpt9USnSR60I8LiEuRK2ayr
0RMjLJ6sBEgy/hISEqpLgTsciDpxwaTC/WNrkT9vaMcwfiG3V0Red8zbKHQzC+Ty
cLRg9wbC3v613kaIZCQCoE7Aouru9WbVPmuRoasfztrgksWmH9infQbL4TDcmcxo
qGaMn4ajQTVAD63CKnut+CULZIMBREBVlSTLiOO7qZdTrd+vjtLWvdXVPcWLSBrd
Vpu3YnhqqTte+DMzQHwY7A2s3fu4Cg4H4npzcR+0H1H/B5z64kxqZq9FWGIcZcz7
0xXeHN9UUKPDSTgsjtIzKTaIOe9eML3jGSU7AgMBAAGjgaIwgZ8wDAYDVR0TAQH/
BAIwADAOBgNVHQ8BAf8EBAMCBaAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwEwHQYD
VR0OBBYEFKwBYaxCLxK0csmV319rbRdqDllWMEgGA1UdHwRBMD8wPaA7oDmGN2h0
dHA6Ly90ZXN0LmNsb3VkY2EuY3JsLm5ldGZsaXguY29tL2xvbmdsaXZlZENBL2Ny
bC5wZW0wDQYJKoZIhvcNAQELBQADggEBADFngqsMsGnNBWknphLDvnoWu5MTrpsD
AgN0bktv5ACKRWhi/qtCmkEf6TieecRMwpQNMpE50dko3LGGdWlZRCI8wdH/zrw2
8MnOeCBxuS1nB4muUGjbf4LIbtuwoHSESrkfmuKjGGK9JTszLL6Hb9YnoFefeg8L
T7W3s8mm5bVHhQM7J9tV6dz/sVDmpOSuzL8oZkqeKP+lWU6ytaohFFpbdzaxWipU
3+GobVe4vRqoF1kwuhQ8YbMbXWDK6zlrT9pjFABcQ/b5nveiW93JDQUbjmVccx/u
kP+oGWtHvhteUAe8Gloo5NchZJ0/BqlYRCD5aAHcmbXRsDid9mO4ADU=
-----END CERTIFICATE-----
"""
INTERNAL_INVALID_CERT = parse_certificate(INTERNAL_INVALID_STR)
INTERNAL_VALID_SAN_STR = """
-----BEGIN CERTIFICATE-----
MIIESjCCAzKgAwIBAgICA+kwDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVT
MQswCQYDVQQIDAJDQTEQMA4GA1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5s
aXZlZC5jb20xEDAOBgNVBAoMB0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMx
HjAcBgkqhkiG9w0BCQEWD2ppbUBleGFtcGxlLmNvbTAeFw0xNTA2MjYyMDU5MDZa
Fw0yMDAxMDEyMDU5MDZaMG0xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEQMA4G
A1UEBxMHQSBwbGFjZTEQMA4GA1UEChMHRXhhbXBsZTETMBEGA1UECxMKT3BlcmF0
aW9uczEYMBYGA1UEAxMPc2FuLmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEA2Nq5zFh2WiqtNIPssdSwQ9/00j370VcKPlOATLqK24Q+
dr2hWP1WlZJ0NOoPefhoIysccs2tRivosTpViRAzNJXigBHhxe8ger0QhVW6AXIp
ov327N689TgY4GzRrwqavjz8cqussIcnEUr4NLLsU5AvXE7e3WxYkkskzO497UOI
uCBtWdCXZ4cAGhtVkkA5uQHfPsLmgRVoUmdMDt5ZmA8HhLX4X6vkT3oGIhdGCw6T
W+Cu7PfYlSaggSBbBniU0YKTFLfGLkYFZN/b6bxzvt6CTJLoVFAYXyLJwUvd3EAm
u23HgUflIyZNG3xVPml/lah0OIX7RtSigXUSLm7lYwIDAQABo4HTMIHQMAwGA1Ud
EwEB/wQCMAAwDgYDVR0PAQH/BAQDAgWgMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMB
MC8GA1UdEQQoMCaCEWV4YW1wbGUyLmxvbmcuY29tghFleGFtcGxlMy5sb25nLmNv
bTAdBgNVHQ4EFgQUiiIyclcBIfJ5PE3OCcTXwzJAM+0wSAYDVR0fBEEwPzA9oDug
OYY3aHR0cDovL3Rlc3QuY2xvdWRjYS5jcmwubmV0ZmxpeC5jb20vbG9uZ2xpdmVk
Q0EvY3JsLnBlbTANBgkqhkiG9w0BAQsFAAOCAQEAgcTioq70B/aPWovNTy+84wLw
VX1q6bCdH3FJwAv2rc28CHp5mCGdR6JqfT/H/CbfRwT1Yh/5i7T5kEVyz+Dp3+p+
AJ2xauHrTvWn0QHQYbUWICwkuZ7VTI9nd0Fry1FQI1EeKiCmyrzNljiN2l+GZw6i
NJUpVNtwRyWRzB+yIx2E9wyydqDFH+sROuQok7EgzlQileitPrF4RrkfIhQp2/ki
YBrY/duF15YpoMKAlFhDBh6R9/nb5kI2n3pY6I5h6LEYfLStazXbIu61M8zu9TM/
+t5Oz6rmcjohL22+sEmmRz86dQZlrBBUxX0kCQj6OAFB4awtRd4fKtkCkZhvhQ==
-----END CERTIFICATE-----
"""
INTERNAL_VALID_SAN_CERT = parse_certificate(INTERNAL_VALID_SAN_STR)
INTERNAL_VALID_WILDCARD_STR = """
-----BEGIN CERTIFICATE-----
MIIEHDCCAwSgAwIBAgICA+owDQYJKoZIhvcNAQELBQAwgYwxCzAJBgNVBAYTAlVT
MQswCQYDVQQIDAJDQTEQMA4GA1UEBwwHQSBwbGFjZTEXMBUGA1UEAwwObG9uZy5s
aXZlZC5jb20xEDAOBgNVBAoMB0V4YW1wbGUxEzARBgNVBAsMCk9wZXJhdGlvbnMx
HjAcBgkqhkiG9w0BCQEWD2ppbUBleGFtcGxlLmNvbTAeFw0xNTA2MjYyMTEzMTBa
Fw0yMDAxMDEyMTEzMTBaMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEQMA4G
A1UEBxMHQSBwbGFjZTEQMA4GA1UEChMHRXhhbXBsZTETMBEGA1UECxMKT3BlcmF0
aW9uczEbMBkGA1UEAxQSKi50ZXN0LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA0T7OEY9FxMIdhe1CwLc+TbDeSfDN6KRHlp0I9MwK
3Pre7A1+1vmRzLiS5qAdOh3Oexelmgdkn/fZUFI+IqEVJwmeUiq13Kib3BFnVtbB
N1RdT7rZF24Bqwygf1DHAekEBYdvu4dGD/gYKsLYsSMD7g6glUuhTbgR871updcV
USYJ801y640CcHjai8UCLxpqtkP/Alob+/KDczUHbhdxYgmH34aQgxC8zg+uzuq6
bIqUAc6SctI+6ArXOqri7wSMgZUnogpF4R5QbCnlDfSzNcNxJFtGp8cy7CNWebMd
IWgBYwee8i8S6Q90B2QUFD9EGG2pEZldpudTxWUpq0tWmwIDAQABo4GiMIGfMAwG
A1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgWgMBYGA1UdJQEB/wQMMAoGCCsGAQUF
BwMBMB0GA1UdDgQWBBTH2KIECrqPHMbsVysGv7ggkYYZGDBIBgNVHR8EQTA/MD2g
O6A5hjdodHRwOi8vdGVzdC5jbG91ZGNhLmNybC5uZXRmbGl4LmNvbS9sb25nbGl2
ZWRDQS9jcmwucGVtMA0GCSqGSIb3DQEBCwUAA4IBAQBjjfur2B6BcdIQIouwhXGk
IFE5gUYMK5S8Crf/lpMxwHdWK8QM1BpJu9gIo6VoM8uFVa8qlY8LN0SyNyWw+qU5
Jc8X/qCeeJwXEyXY3dIYRT/1aj7FCc7EFn1j6pcHPD6/0M2z0Zmj+1rWNBJdcYor
pCy27OgRoJKZ6YhEYekzwIPeFPL6irIN9xKPnfH0b2cnYa/g56DyGmyKH2Kkhz0A
UGniiUh4bAUuppbtSIvUTsRsJuPYOqHC3h8791JZ/3Sr5uB7QbCdz9K14c9zi6Z1
S0Xb3ZauZJQI7OdHeUPDRVq+8hcG77sopN9pEYrIH08oxvLX2US3GqrowjOxthRa
-----END CERTIFICATE-----
"""
INTERNAL_VALID_WILDCARD_CERT = parse_certificate(INTERNAL_VALID_WILDCARD_STR)
EXTERNAL_VALID_STR = """
-----BEGIN CERTIFICATE-----
MIIFHzCCBAegAwIBAgIQGFWCciDWzbOej/TbAJN0WzANBgkqhkiG9w0BAQsFADCB
pDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w
HQYDVQQLExZGT1IgVEVTVCBQVVJQT1NFUyBPTkxZMR8wHQYDVQQLExZTeW1hbnRl
YyBUcnVzdCBOZXR3b3JrMTQwMgYDVQQDEytTeW1hbnRlYyBDbGFzcyAzIFNlY3Vy
ZSBTZXJ2ZXIgVEVTVCBDQSAtIEc0MB4XDTE1MDYyNDAwMDAwMFoXDTE1MDYyNTIz
NTk1OVowgYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDQUxJRk9STklBMRIwEAYD
VQQHDAlMb3MgR2F0b3MxFjAUBgNVBAoMDU5ldGZsaXgsIEluYy4xEzARBgNVBAsM
Ck9wZXJhdGlvbnMxHjAcBgNVBAMMFXR0dHQyLm5ldGZsaXh0ZXN0Lm5ldDCCASIw
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALwMY/yod9YGLKLCzbbsSUBWm4ZC
DfcgbUNL3JLtZaFCaOeUPLa4YNqty+9ACXBLYPNMm+dgsRHix8N2uwtZrGazHILK
qey96eSTosPsvKFt0KLNpUl8GC/YxA69L128SJgFaaq5Dr2Mp3NP0rt0RIz5luPj
Oae0hkGOS8uS0dySlAmfOw2OsJY3gCw5UHcmpcCHpO2f7uU+tWKmgfz4U/PpQ0kz
WVJno+JhcaXIximtiLreCNF1LpraAjrcZJ+ySJwYaLaYMiJoFkdXUtKJcyqmkbA3
Splt7N4Hb8c+5aXv225uQYCh0HXQeMyBotlaIrAddP5obrtjxhXBxB4ysEcCAwEA
AaOCAWowggFmMCAGA1UdEQQZMBeCFXR0dHQyLm5ldGZsaXh0ZXN0Lm5ldDAJBgNV
HRMEAjAAMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB
BQUHAwIwYQYDVR0gBFowWDBWBgZngQwBAgIwTDAjBggrBgEFBQcCARYXaHR0cHM6
Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYBBQUHAgIwGRoXaHR0cHM6Ly9kLnN5bWNi
LmNvbS9ycGEwHwYDVR0jBBgwFoAUNI9UtT8KH1K6nLJl7bqLCGcZ4AQwKwYDVR0f
BCQwIjAgoB6gHIYaaHR0cDovL3NzLnN5bWNiLmNvbS9zcy5jcmwwVwYIKwYBBQUH
AQEESzBJMB8GCCsGAQUFBzABhhNodHRwOi8vc3Muc3ltY2QuY29tMCYGCCsGAQUF
BzAChhpodHRwOi8vc3Muc3ltY2IuY29tL3NzLmNydDANBgkqhkiG9w0BAQsFAAOC
AQEAQuIfyBltvCZ9orqNdS6PUo2PaeUgJzkmdDwbDVd7rTwbZIwGZXZjeKseqMSb
L+r/jN6DWrScVylleiz0N/D0lSUhC609dQKuicGpy3yQaXwhfYZ6duxrW3Ii/+Vz
pFv7DnG3JPZjIXCmVhQVIv/8oaV0bfUF/1mrWRFwZiBILxa7iaycRhjusJEVRtzN
Ot/qkLluHO0wbEHnASV4P9Y5NuR/bliuFS/DeRczofNS78jJuZrGvl2AqS/19Hvm
Bs63gULVCqWygt5KEbv990m/XGuRMaXuHzHCHB4v5LRM30FiFmqCzyD8d+btzW9B
1hZ5s3rj+a6UwvpinKJoPfgkgg==
-----END CERTIFICATE-----
"""
EXTERNAL_CERT = parse_certificate(EXTERNAL_VALID_STR)
PRIVATE_KEY_STR = """
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAnEjM0cQevlDjT6mDMtTo8N1ovAyKbfVEp0ketCPC4hLkStms
q9ETIyyerARIMv4SEhKqS4E7HIg6ccGkwv1ja5E/b2jHMH4ht1dEXnfM2yh0Mwvk
8nC0YPcGwt7+td5GiGQkAqBOwKLq7vVm1T5rkaGrH87a4JLFph/Yp30Gy+Ew3JnM
aKhmjJ+Go0E1QA+twip7rfglC2SDAURAVZUky4jju6mXU63fr47S1r3V1T3Fi0ga
3Vabt2J4aqk7XvgzM0B8GOwNrN37uAoOB+J6c3EftB9R/wec+uJMamavRVhiHGXM
+9MV3hzfVFCjw0k4LI7SMyk2iDnvXjC94xklOwIDAQABAoIBAGeykly5MeD70OgB
xPEMfoebkav88jklnekVxk6mz9+rw1i6+CyFLJqRN7NRoApdtOXTBrXUyMEUzxq9
7zIGaVptZNbqggh2GK8LM20vNnlQbVGVmdMX30fbgNv6lK1eEBTdxVsMvVRqhVIK
+LGTmlJmICKZ4XdTS9v/k4UGm2TZPCt2pvrNzIpT7TIm2QybCbZoOPY8SHx0U8c5
lmtdqmIsy2JPNSOsOCiJgzQIvkR/fMGWFgNE4fEHsHAfubgpK97TGzwLiFRmlTb+
QUDaz0YbwhF+5bQjHtaGUGATcg5bvV1UWBUvp+g4gRIfwzG+3PAGacYE/djouAdG
PHbxuCkCgYEAz/LsgMgsaV3arlounviSwc8wG9WcI5gbYw5qwX0P57ZoxS7EBAGu
yYtudurJrU9SfsSV44GL11UzBcAGOeS0btddrcMiNBhc7fY7P/1xaufQ3GjG06/v
kH4gOjzsGSTJliZ709g4J6hnMCxz0O0PS31Qg5cBD8UG8xO7/AV0is0CgYEAwGWy
A6YPinpZuenaxrivM5AcVDWmj7aeC29M63l/GY+O5LQH2PKVESH0vL5PvG3LkrCR
SUbaMKdKR0wnZsJ89z21eZ54ydUgj41bZJczl8drxcY0GSajj6XZXGTUjtoVrWsB
A0kJbjsrpd+8J316Y9iCgpopmbVd965pUHe4ACcCgYAamJlDB1cWytgzQHmB/4zV
mOgwRyvHKacnDir9QD+OhTf1MDwFvylZwamJMBJHRkPozr/U7zaxfcYe0CZ7tRKW
spjapoBzZUJNdRay4nllEO0Xo5b6cCAVvOvmRvBzbs8Rky53M8pK2DEKakUNzaQN
JaPskJ2kJLD02etLGm+DaQKBgQCTI/NNmQ2foUzHw1J+0jWjoJ4ZxOI6XLZoFlnk
aInMuZ7Vx92MjJF2hdqPEpkWiX28FO839EjgFsDW4CXuD+XUjEwi1BCagzWgs8Hm
n0Bk3q3MlnW3mnZSYMtoPvDUw3L6qrAenBfrRrNt6zsRlIQqoiXFzjLsi+luh+Oh
F74P1wKBgQCPQGKLUcfAvjIcZp4ECH0K8sBEmoEf8pceuALZ3H5vneYDzqMDIceo
t5Gpocpt77LJnNiszXSerj/KjX2MflY5xUXeekWowLVTBOK5+CZ8+XBIgBt1hIG3
XKxcRgm/Va4QMEAnec0qXfdTVJaJiAW0bdKwKRRrrbwcTdNRGibdng==
-----END RSA PRIVATE KEY-----
"""
INTERNAL_CERTIFICATE_A_STR = """
-----BEGIN CERTIFICATE-----
MIIDazCCAlOgAwIBAgIBATANBgkqhkiG9w0BAQsFADB5MQswCQYDVQQGEwJVUzET
MBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJTG9zIEdhdG9zMRYwFAYDVQQK
DA1OZXRmbGl4LCBJbmMuMRMwEQYDVQQLDApPcGVyYXRpb25zMRQwEgYDVQQDDAtB
Y29tbW9uTmFtZTAeFw0xNjA2MjkyMjE0NDdaFw0zNjA2MjkyMjE0NDdaMHkxCzAJ
BgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlMb3MgR2F0
b3MxFjAUBgNVBAoMDU5ldGZsaXgsIEluYy4xEzARBgNVBAsMCk9wZXJhdGlvbnMx
FDASBgNVBAMMC0Fjb21tb25OYW1lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAtkyvL6EqSgYSJX11635Hb8FBG/8Wey6C2KtG7M+GXvGCsSmfNqQMeZdf
W9Avxelkstp5/K+ilVJJ2TJRelu1yVUUkQcrP7imgf7CxKQAnPz2oXQImLFbm7OS
1zKA+qwtLGrId3vVQaotUtdI+wxx0YE66pyfOhQJsVOeuYwG8CCxnAj/lXeNLA1t
n39A8FLfj9nxjvZWWm2z8qXO2IYOWEMOOel1zixhypeJoTD2cJHDKNlUnXN4q5ej
psD4ehLFXIPXsKJv5XOtNYB9UHB3moXlEOuKAquRzBOfTP+rUYyfbHmzCN4eXekp
R6vze49hlg8QdCNjVY6jHRrOuVKGuwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAt
rE2Ee6a0zRlJHiuP5Zr61s6ZnwIsPN5sjo3pFJ/goHeNWbq+02FUJLXROtxSMlo8
jLYpnQbm3Qoyd0KjGn9myP1vqBL6Yzf9dRI2li9XYmavxU7OK/KJtBo/Wnw3DVT5
jxYrn4YKJU9+T0hr57bWUQ7HjMNojwBcgglzPN9KOtfTfbPEUIeoRpCjeyjwBUSN
nrTDiYPV+XI4LAyDmuR7esSvm2+0h6C0dmUbVspkxBaKFEYUKIYaZbEFEBsyZGri
qDIyu9HSvu2MJ2lVxfMNsW+IYG74DOqJQsIFP+7hrfdPoMGm4GvAiHR1IuSmq+sf
L0Ew8hy0GG3nZ6uXLW7q
-----END CERTIFICATE-----
"""
INTERNAL_PRIVATE_KEY_A_STR = """
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAtkyvL6EqSgYSJX11635Hb8FBG/8Wey6C2KtG7M+GXvGCsSmf
NqQMeZdfW9Avxelkstp5/K+ilVJJ2TJRelu1yVUUkQcrP7imgf7CxKQAnPz2oXQI
mLFbm7OS1zKA+qwtLGrId3vVQaotUtdI+wxx0YE66pyfOhQJsVOeuYwG8CCxnAj/
lXeNLA1tn39A8FLfj9nxjvZWWm2z8qXO2IYOWEMOOel1zixhypeJoTD2cJHDKNlU
nXN4q5ejpsD4ehLFXIPXsKJv5XOtNYB9UHB3moXlEOuKAquRzBOfTP+rUYyfbHmz
CN4eXekpR6vze49hlg8QdCNjVY6jHRrOuVKGuwIDAQABAoIBACYPnqfwGzc3S0Se
jCctx1Zy39grixMO4+y+3eEFdwWNoP7CNOagm6YrT5KIxeCpWQfqi3uRY/2PH7IE
SnSkfzDY3aFmAMaeE82iViHeJ+6e9hNBeaX/qaO5e1gIyFsN5aSXauFfbmf2Ut4v
6qHXuE/Ijnd7WdczZc6rKcGNlck+f/QtsZhYEYbgHT3Nrt0ztlvkdrcyRIxZTeS7
7gvVWrVv6rviTobi/ZkeM9pqe5bbLuWgb/ArvI52pJwaUcz9LPGo+miank6e4gAd
cTudoREtBKVgXROhTSz33mdjjUTCDGdtILTztDSgLpJXYT0w2h1zmfV7t4tztzzQ
xW5LVCECgYEA33YG/gaZbfH6szC/heilojelrIG+n7GjsqpfMqGFofYNBAswUC3w
qZdeXxqGZEXC8mx8CufDhC50vJv353WAHaFFJcwy2QeGvHfPAZ4ZQ68o9XLeva4t
M6+ZtOiaK8u/mzxq43Jj7FbXmxxlJXY3B0uWdWpKGsPRTmSaUw0lKPECgYEA0NhG
74C6zRgHY2Eq2Qq7+NtlvpzUtVtalhiDoCEpDMhjzLUTBNy6yMsSdP8SyCy9O7Ng
rrXJdgKHvpjnJyUvB3hhEAurPIPWJArEfEHAF+V8mHY8f58xZqgHRsYsH3tWHYx4
2lzmposTES5KKV4xsYbjjyzXX+WNdaOkC4JBCmsCgYEA3j2JKL0xfMordmlmIWzG
xnWnnNCQ4EwQrVGKSlWgDPsj6MCj9Sorbs9veRBtVm6XOvkvyLzFk8GMMkTAIf+X
QmCw362daIF2vBw/0bEGGW2sQ6hR5L3EkOH08ZpgMmx6DI7jE4Ah5txbpBVydvaC
Ngw0AGSMfOABW4DshurM6VECgYEAxeH3rJ2r4gL/lSGPaOGr5At2Z1rQjRqHRarq
pQJmk+8X6PI1mCjRbspDrcm2cSc7EmNPm5sxzXhuSKE2fLfVzN06EusLkCZW9AWj
0Ry3t6zBFvEJN9+N/nf9lQjW6+mAWjUsmbLm9SzXnzLeID5ZFZ365kGVvQ6Tr8Cj
AiikGgsCgYEAlYGNwBKWClm797YVyPhmqrFX4T9Hpxc7oC3vVwd96tAbLlSrW8r5
o6ynBW1bG+qfjx9GyThgudvRtB+0vTSShrT5GftLCyMtOiYSHkGEvMOGFBuowzoz
3i841gR9+cwA0S1hy7fC0PDmTo0xC91JocwesPQ023MmECPfu6Frzog=
-----END RSA PRIVATE KEY-----
"""
CSR_STR = """
-----BEGIN CERTIFICATE REQUEST-----
MIIC1zCCAb8CAQAwczEUMBIGA1UEAwwLQUNvbW1vbk5hbWUxFTATBgNVBAoMDG9y
Z2FuaXphdGlvbjEOMAwGA1UECwwFZ3VuaXQxCzAJBgNVBAYTAlVTMRMwEQYDVQQI
DApDYWxpZm9ybmlhMRIwEAYDVQQHDAlzb21ld2hlcmUwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDNnY+Ap+V9+Eg/PAtd7bq27D7tDvbL10AysNUSazy7
gJyHfJyE3oiXm28zjFNzRQ35qhsCFpWg8M36FpdP9fIFG9sVXV/ye+YNBkZ2aTJi
RnbErZcy8qc+2MRd2JKE9g0pISp9hAEeEPLTwSoGqf5VqOaBehBqL5OKNUr7JAxV
TIH1oVU87w/6xg/WsUiyPo49WXxF/3DZNP1UOTYiffxIiARhTb9EtlXpt5iOlic3
w/vBX6qsH++XJIus2WE+ABlAVUQTCvc6bgpu4zjc8nlm3ClqkAKcxn2ubEder+Fh
hagMYGsbYG+/IWrKYN6S0BjE26tNMiOlmIebimjEdFpnAgMBAAGgHzAdBgkqhkiG
9w0BCQ4xEDAOMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggEBAE5OKI/n
b1ZRJDL4SpjWggRjfwBdYmb96lGH0aGDoVUP9UUusLzpWLtutkgr9Hh29agSsLZF
j535NeXHf+Jc4UyR288WQVJthgAT1e5+jBNPxz4IcTnDW7ZMJLGm495XaKi6Krcg
+8Qn2+h04jBTbN2Z9+MXGak0B8ycrbDx/FYL4KgBJRvS805d43zC6L1aUfRbpZgN
QeQoBdLhFNB1kAYSWCyETwRQOeGEphBJYBPcXsQVBWbMtLpbhjRZ1uTVZEFIh8Oa
zm3Cn4Ul8DO26w9QS4fmZjmnPOZFXYMWoOR6osHzb62PWQ8FBMqXcdToBV2Q9Iw4
PiFAxlc0tVjlLqQ=
-----END CERTIFICATE REQUEST-----
"""
| apache-2.0 | 904,782,322,918,498,400 | 52.422383 | 174 | 0.930666 | false |
T-R0D/JustForFun | aoc2016/aoc2016/day18/solution.py | 1 | 2158 | # This file is part of aoc2016.
#
# aoc2016 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aoc2016 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aoc2016. If not, see <http://www.gnu.org/licenses/>.
SAFE_TILE = '.'
TRAP_TILE = '^'
def part_one(puzzle_input):
return count_safe_tiles_in_room(first_row_of_tiles=puzzle_input, n_rows=40)
def part_two(puzzle_input):
return count_safe_tiles_in_room(first_row_of_tiles=puzzle_input, n_rows=400000)
def count_safe_tiles_in_room(first_row_of_tiles, n_rows):
current_row = list(first_row_of_tiles)
n_safe_tiles = count_safe_tiles(current_row)
for _ in range(n_rows - 1):
current_row = decode_next_row_of_tiles(current_row)
n_safe_tiles += count_safe_tiles((current_row))
return n_safe_tiles
def count_safe_tiles(row_of_tiles):
n_traps = 0
for tile in row_of_tiles:
if tile == SAFE_TILE:
n_traps += 1
return n_traps
def decode_next_row_of_tiles(input_row):
new_row = ['' for _ in range(len(input_row))]
new_row[0] = determine_tile(SAFE_TILE, input_row[0], input_row[1])
new_row[-1] = determine_tile(input_row[-2], input_row[-1], SAFE_TILE)
for i in range(1, len(input_row) - 1):
new_row[i] = determine_tile(*input_row[i - 1: i + 2])
return new_row
def determine_tile(left, center, right):
if (left == TRAP_TILE and center == SAFE_TILE and right == SAFE_TILE) or \
(left == SAFE_TILE and center == SAFE_TILE and right == TRAP_TILE) or \
(left == TRAP_TILE and center == TRAP_TILE and right == SAFE_TILE) or \
(left == SAFE_TILE and center == TRAP_TILE and right == TRAP_TILE):
return TRAP_TILE
return SAFE_TILE
| gpl-2.0 | 2,805,949,098,795,429,000 | 34.966667 | 83 | 0.667285 | false |
joshmoore/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/controller/impexp.py | 1 | 1058 | #!/usr/bin/env python
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from webclient.controller import BaseController
class BaseImpexp(BaseController):
def __init__(self, conn, **kw):
BaseController.__init__(self, conn)
self.eContext['breadcrumb'] = ['Import']
| gpl-2.0 | 1,196,257,949,167,562,000 | 32.0625 | 79 | 0.724953 | false |
wenbinf/ndkale | kale/tests/test_task.py | 1 | 9275 | """Module testing the kale.task module."""
import mock
import unittest
from kale import exceptions
from kale import task
from kale import test_utils
class TaskFailureTestCase(unittest.TestCase):
"""Test handle_failure logic."""
def _create_patch(self, name):
"""Helper method for creating scoped mocks."""
patcher = mock.patch(name)
patch = patcher.start()
self.addCleanup(patcher.stop)
return patch
def testRunWorker(self):
"""Test running a task."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
post_run = self._create_patch('kale.task.Task._post_run')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
task_inst = test_utils.new_mock_task(task_class=test_utils.MockTask)
task_args = [1, 'a']
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
post_run.assert_called_once_with(*task_args)
clean_env.assert_called_once_with(
task_id='mock_task', task_name='kale.test_utils.MockTask')
def testRunWorkerFailTask(self):
"""Test running a task."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
post_run = self._create_patch('kale.task.Task._post_run')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
task_inst._start_time = 1
task_args = [1, 'a']
with self.assertRaises(exceptions.TaskException) as exc_ctxt_mngr:
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
assert not post_run.called, '_post_run should not have been called.'
clean_env.assert_called_once_with(
task_id='fail_task', task_name='kale.test_utils.FailTask',
exc=exc_ctxt_mngr.exception)
self.assertTrue(task_inst._end_time > 0)
self.assertTrue(task_inst._task_latency_sec > 0)
def testTaskUnrecoverableException(self):
"""Task task failing with unrecoverable exception."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
message = test_utils.MockMessage(task_inst)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = SyntaxError('Unrecoverable Error')
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_UNRECOVERABLE, True)
def testTaskNoRetries(self):
"""Task task failing with retries disabled."""
task_inst = test_utils.new_mock_task(
task_class=test_utils.FailTaskNoRetries)
message = test_utils.MockMessage(task_inst)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
retried = test_utils.FailTaskNoRetries.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_NO_RETRY, True)
def testTaskRetriesExceeded(self):
"""Task task failing with retries exceeded."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
message = test_utils.MockMessage(
task_inst, retry_num=test_utils.FailTask.max_retries)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_RETRIES_EXCEEDED, False)
def testTaskRetries(self):
"""Task task failing with retries exceeded."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
message = test_utils.MockMessage(
task_inst, retry_num=test_utils.FailTask.max_retries)
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_RETRIES_EXCEEDED, False)
def testTaskRuntimeExceeded(self):
"""Task task failing from timeout."""
task_inst = test_utils.new_mock_task(task_class=test_utils.FailTask)
sample_values = [
(i, test_utils.FailTask._get_delay_sec_for_retry(i)) for i in
range(task_inst.max_retries)]
payload = {
'args': [],
'kwargs': {},
'app_data': {}}
for retry, delay_sec in sample_values:
with mock.patch(
'kale.publisher.Publisher.publish') as publish_func:
message = test_utils.MockMessage(task_inst, retry_num=retry)
retried = test_utils.FailTask.handle_failure(
message, exceptions.TaskException('Exception'))
self.assertTrue(retried)
publish_func.assert_called_once_with(
test_utils.FailTask, message.task_id, payload,
current_retry_num=(retry + 1), delay_sec=delay_sec)
retry = retry + 1
with mock.patch(
'kale.task.Task._report_permanent_failure') as fail_func:
exc = exceptions.TaskException('Exception')
message = test_utils.MockMessage(task_inst, retry_num=retry)
retried = test_utils.FailTask.handle_failure(message, exc)
self.assertFalse(retried)
fail_func.assert_called_once_with(
message, exc, task.PERMANENT_FAILURE_RETRIES_EXCEEDED, False)
def testTargetRuntimeExceeded(self):
"""Task task target runtime exceeded."""
task_inst = test_utils.new_mock_task(
task_class=test_utils.SlowButNotTooSlowTask)
with mock.patch(
'kale.task.Task._alert_runtime_exceeded') as time_exceeded:
task_inst.run()
self.assertTrue(time_exceeded.called)
def testBlacklistedTaskFails(self):
"""Test that a blacklisted task raises an exception."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
run_task = self._create_patch('kale.task.Task.run_task')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
check_blacklist = self._create_patch('kale.task.Task._check_blacklist')
raised_exc = exceptions.BlacklistedException()
check_blacklist.side_effect = raised_exc
task_inst = test_utils.new_mock_task(task_class=test_utils.MockTask)
task_inst._start_time = 1
task_args = [1, 'a']
with self.assertRaises(exceptions.BlacklistedException):
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
self.assertFalse(run_task.called)
clean_env.assert_called_once_with(
task_id='mock_task', task_name='kale.test_utils.MockTask',
exc=raised_exc)
def testBlacklistedTaskNoRetries(self):
"""Test that a blacklisted task raises an exception."""
setup_env = self._create_patch(
'kale.task.Task._setup_task_environment')
pre_run = self._create_patch('kale.task.Task._pre_run')
run_task = self._create_patch('kale.task.Task.run_task')
clean_env = self._create_patch(
'kale.task.Task._clean_task_environment')
check_blacklist = self._create_patch('kale.task.Task._check_blacklist')
raised_exc = exceptions.BlacklistedException()
check_blacklist.side_effect = raised_exc
mock_message = test_utils.new_mock_message(
task_class=test_utils.MockTask)
task_inst = mock_message.task_inst
task_inst._start_time = 1
task_args = [1, 'a']
with self.assertRaises(exceptions.BlacklistedException):
task_inst.run(*task_args)
setup_env.assert_called_once_with()
pre_run.assert_called_once_with(*task_args)
self.assertFalse(run_task.called)
clean_env.assert_called_once_with(
task_id='mock_task', task_name='kale.test_utils.MockTask',
exc=raised_exc)
# Check that task
permanent_failure = not task_inst.__class__.handle_failure(
mock_message, raised_exc)
self.assertTrue(permanent_failure)
| bsd-2-clause | 2,734,338,449,871,767,600 | 40.039823 | 79 | 0.618976 | false |
pyfa-org/Pyfa | gui/builtinViews/implantEditor.py | 1 | 12699 | import re
# noinspection PyPackageRequirements
import wx
# noinspection PyPackageRequirements
from wx.lib.buttons import GenBitmapButton
import gui.builtinMarketBrowser.pfSearchBox as SBox
import gui.display as d
from gui.bitmap_loader import BitmapLoader
from gui.marketBrowser import SearchBox
from service.market import Market
def stripHtml(text):
text = re.sub('<\s*br\s*/?\s*>', '\n', text)
text = re.sub('</?[^/]+?(/\s*)?>', '', text)
return text
class BaseImplantEditorView(wx.Panel):
def addMarketViewImage(self, iconFile):
if iconFile is None:
return -1
bitmap = BitmapLoader.getBitmap(iconFile, "icons")
if bitmap is None:
return -1
else:
return self.availableImplantsImageList.Add(bitmap)
def __init__(self, parent):
wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.TAB_TRAVERSAL)
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
pmainSizer = wx.BoxSizer(wx.HORIZONTAL)
availableSizer = wx.BoxSizer(wx.VERTICAL)
self.searchBox = SearchBox(self)
self.itemView = ItemView(self)
self.itemView.Hide()
availableSizer.Add(self.searchBox, 0, wx.EXPAND)
availableSizer.Add(self.itemView, 1, wx.EXPAND)
self.availableImplantsTree = wx.TreeCtrl(self, wx.ID_ANY, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT)
root = self.availableRoot = self.availableImplantsTree.AddRoot("Available")
self.availableImplantsImageList = wx.ImageList(16, 16)
self.availableImplantsTree.SetImageList(self.availableImplantsImageList)
availableSizer.Add(self.availableImplantsTree, 1, wx.EXPAND)
pmainSizer.Add(availableSizer, 1, wx.ALL | wx.EXPAND, 5)
buttonSizer = wx.BoxSizer(wx.VERTICAL)
buttonSizer.AddStretchSpacer()
self.btnAdd = GenBitmapButton(self, wx.ID_ADD, BitmapLoader.getBitmap("fit_add_small", "gui"),
style=wx.BORDER_NONE)
buttonSizer.Add(self.btnAdd, 0)
self.btnRemove = GenBitmapButton(self, wx.ID_REMOVE, BitmapLoader.getBitmap("fit_delete_small", "gui"),
style=wx.BORDER_NONE)
buttonSizer.Add(self.btnRemove, 0)
buttonSizer.AddStretchSpacer()
pmainSizer.Add(buttonSizer, 0, wx.EXPAND, 0)
characterImplantSizer = wx.BoxSizer(wx.VERTICAL)
self.pluggedImplantsTree = AvailableImplantsView(self)
characterImplantSizer.Add(self.pluggedImplantsTree, 1, wx.ALL | wx.EXPAND, 5)
pmainSizer.Add(characterImplantSizer, 1, wx.EXPAND, 5)
self.SetSizer(pmainSizer)
self.hoveredLeftTreeTypeID = None
self.hoveredRightListRow = None
# Populate the market tree
sMkt = Market.getInstance()
for mktGrp in sMkt.getImplantTree():
iconId = self.addMarketViewImage(sMkt.getIconByMarketGroup(mktGrp))
childId = self.availableImplantsTree.AppendItem(root, mktGrp.name, iconId, data=mktGrp.ID)
if sMkt.marketGroupHasTypesCheck(mktGrp) is False:
self.availableImplantsTree.AppendItem(childId, "dummy")
self.availableImplantsTree.SortChildren(self.availableRoot)
# Bind the event to replace dummies by real data
self.availableImplantsTree.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.expandLookup)
self.availableImplantsTree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.itemSelected)
self.availableImplantsTree.Bind(wx.EVT_MOTION, self.OnLeftTreeMouseMove)
self.availableImplantsTree.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeftTreeMouseLeave)
self.itemView.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.itemSelected)
self.pluggedImplantsTree.Bind(wx.EVT_MOTION, self.OnRightListMouseMove)
# Bind add & remove buttons
self.btnAdd.Bind(wx.EVT_BUTTON, self.itemSelected)
self.btnRemove.Bind(wx.EVT_BUTTON, self.removeItem)
# We update with an empty list first to set the initial size for Layout(), then update later with actual
# implants for character. This helps with sizing issues.
self.pluggedImplantsTree.update([])
self.bindContext()
self.Layout()
self.update()
def bindContext(self):
# Binds self.contextChanged to whatever changes the context
raise NotImplementedError()
def getImplantsFromContext(self):
""" Gets list of implants from current context """
raise NotImplementedError()
def addImplantToContext(self, item):
""" Adds implant to the current context"""
raise NotImplementedError()
def removeImplantFromContext(self, implant):
""" Removes implant from the current context"""
raise NotImplementedError()
def update(self):
"""Updates implant list based off the current context"""
self.implants = self.getImplantsFromContext()[:]
self.implants.sort(key=lambda i: int(i.getModifiedItemAttr("implantness")))
self.pluggedImplantsTree.update(self.implants)
def contextChanged(self, event):
self.update()
event.Skip()
def expandLookup(self, event):
tree = self.availableImplantsTree
sMkt = Market.getInstance()
parent = event.Item
child, _ = tree.GetFirstChild(parent)
text = tree.GetItemText(child)
if text == "dummy" or text == "itemdummy":
tree.Delete(child)
# if the dummy item is a market group, replace with actual market groups
if text == "dummy":
# Add 'real stoof!' instead
currentMktGrp = sMkt.getMarketGroup(tree.GetItemData(parent), eager="children")
for childMktGrp in sMkt.getMarketGroupChildren(currentMktGrp):
iconId = self.addMarketViewImage(sMkt.getIconByMarketGroup(childMktGrp))
childId = tree.AppendItem(parent, childMktGrp.name, iconId, data=childMktGrp.ID)
if sMkt.marketGroupHasTypesCheck(childMktGrp) is False:
tree.AppendItem(childId, "dummy")
else:
tree.AppendItem(childId, "itemdummy")
# replace dummy with actual items
if text == "itemdummy":
currentMktGrp = sMkt.getMarketGroup(tree.GetItemData(parent))
items = sMkt.getItemsByMarketGroup(currentMktGrp)
for item in items:
iconId = self.addMarketViewImage(item.iconID)
tree.AppendItem(parent, item.name, iconId, data=item)
tree.SortChildren(parent)
def itemSelected(self, event):
if event.EventObject is self.btnAdd:
# janky fix that sets EventObject so that we don't have similar code elsewhere.
if self.itemView.IsShown():
event.EventObject = self.itemView
else:
event.EventObject = self.availableImplantsTree
if event.EventObject is self.itemView:
curr = event.EventObject.GetFirstSelected()
while curr != -1:
item = self.itemView.items[curr]
self.addImplantToContext(item)
curr = event.EventObject.GetNextSelected(curr)
else:
root = self.availableImplantsTree.GetSelection()
if not root.IsOk():
return
nchilds = self.availableImplantsTree.GetChildrenCount(root)
if nchilds == 0:
item = self.availableImplantsTree.GetItemData(root)
self.addImplantToContext(item)
else:
event.Skip()
return
self.update()
def removeItem(self, event):
pos = self.pluggedImplantsTree.GetFirstSelected()
if pos != -1:
self.removeImplantFromContext(self.implants[pos])
self.update()
# Due to https://github.com/wxWidgets/Phoenix/issues/1372 we cannot set tooltips on
# tree itself; work this around with following two methods, by setting tooltip to
# parent window
def OnLeftTreeMouseMove(self, event):
event.Skip()
treeItemId, _ = self.availableImplantsTree.HitTest(event.Position)
if not treeItemId:
if self.hoveredLeftTreeTypeID is not None:
self.hoveredLeftTreeTypeID = None
self.SetToolTip(None)
return
item = self.availableImplantsTree.GetItemData(treeItemId)
isImplant = getattr(item, 'isImplant', False)
if not isImplant:
if self.hoveredLeftTreeTypeID is not None:
self.hoveredLeftTreeTypeID = None
self.SetToolTip(None)
return
if self.hoveredLeftTreeTypeID == item.ID:
return
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredLeftTreeTypeID = item.ID
toolTip = wx.ToolTip(stripHtml(item.description))
toolTip.SetMaxWidth(self.GetSize().Width)
self.SetToolTip(toolTip)
def OnLeftTreeMouseLeave(self, event):
event.Skip()
self.SetToolTip(None)
def OnRightListMouseMove(self, event):
event.Skip()
row, _, col = self.pluggedImplantsTree.HitTestSubItem(event.Position)
if row != self.hoveredRightListRow:
if self.pluggedImplantsTree.ToolTip is not None:
self.pluggedImplantsTree.SetToolTip(None)
else:
self.hoveredRightListRow = row
try:
implant = self.implants[row]
except IndexError:
self.pluggedImplantsTree.SetToolTip(None)
else:
toolTip = wx.ToolTip(stripHtml(implant.item.description))
toolTip.SetMaxWidth(self.pluggedImplantsTree.GetSize().Width)
self.pluggedImplantsTree.SetToolTip(toolTip)
class AvailableImplantsView(d.Display):
DEFAULT_COLS = ["attr:implantness",
"Base Name"]
def __init__(self, parent):
d.Display.__init__(self, parent, style=wx.LC_SINGLE_SEL)
self.Bind(wx.EVT_LEFT_DCLICK, parent.removeItem)
class ItemView(d.Display):
DEFAULT_COLS = ["Base Icon",
"Base Name"]
def __init__(self, parent):
d.Display.__init__(self, parent)
self.parent = parent
self.searchBox = parent.searchBox
self.hoveredRow = None
self.items = []
# Bind search actions
self.searchBox.Bind(SBox.EVT_TEXT_ENTER, self.scheduleSearch)
self.searchBox.Bind(SBox.EVT_SEARCH_BTN, self.scheduleSearch)
self.searchBox.Bind(SBox.EVT_CANCEL_BTN, self.clearSearch)
self.searchBox.Bind(SBox.EVT_TEXT, self.scheduleSearch)
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
def clearSearch(self, event=None):
if self.IsShown():
self.parent.availableImplantsTree.Show()
self.Hide()
self.parent.Layout()
if event:
self.searchBox.Clear()
self.items = []
self.update(self.items)
def scheduleSearch(self, event=None):
sMkt = Market.getInstance()
search = self.searchBox.GetLineText(0)
# Make sure we do not count wildcards as search symbol
realsearch = search.replace('*', '').replace('?', '')
# Show nothing if query is too short
if len(realsearch) < 3:
self.clearSearch()
return
sMkt.searchItems(search, self.populateSearch, 'implants')
def populateSearch(self, itemIDs):
if not self.IsShown():
self.parent.availableImplantsTree.Hide()
self.Show()
self.parent.Layout()
items = Market.getItems(itemIDs)
items = [i for i in items if i.group.name != 'Booster']
self.items = sorted(list(items), key=lambda i: i.name)
self.update(self.items)
def OnMouseMove(self, event):
event.Skip()
row, _, col = self.HitTestSubItem(event.Position)
if row != self.hoveredRow:
if self.ToolTip is not None:
self.SetToolTip(None)
else:
self.hoveredRow = row
try:
item = self.items[row]
except IndexError:
self.SetToolTip(None)
else:
toolTip = wx.ToolTip(stripHtml(item.description))
toolTip.SetMaxWidth(self.GetSize().Width)
self.SetToolTip(toolTip)
| gpl-3.0 | 8,534,346,245,961,577,000 | 36.35 | 112 | 0.62438 | false |
bchareyre/ratchet | py/ymport.py | 1 | 14686 | """
Import geometry from various formats ('import' is python keyword, hence the name 'ymport').
"""
from yade.wrapper import *
from yade import utils
try:
from minieigen import *
except ImportError:
from miniEigen import *
def textExt(fileName,format='x_y_z_r',shift=Vector3.Zero,scale=1.0,**kw):
"""Load sphere coordinates from file in specific format, returns a list of corresponding bodies; that may be inserted to the simulation with O.bodies.append().
:param str filename: file name
:param str format: the name of output format. Supported `x_y_z_r`(default), `x_y_z_r_matId`
:param [float,float,float] shift: [X,Y,Z] parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere`
:returns: list of spheres.
Lines starting with # are skipped
"""
infile = open(fileName,"r")
lines = infile.readlines()
infile.close()
ret=[]
for line in lines:
data = line.split()
if (data[0] == "#format"):
format=data[1]
continue
elif (data[0][0] == "#"): continue
if (format=='x_y_z_r'):
pos = Vector3(float(data[0]),float(data[1]),float(data[2]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
elif (format=='x_y_z_r_matId'):
pos = Vector3(float(data[0]),float(data[1]),float(data[2]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),material=int(data[4]),**kw))
elif (format=='id_x_y_z_r_matId'):
pos = Vector3(float(data[1]),float(data[2]),float(data[3]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[4]),material=int(data[5]),**kw))
else:
raise RuntimeError("Please, specify a correct format output!");
return ret
def textClumps(fileName,shift=Vector3.Zero,discretization=0,orientation=Quaternion((0,1,0),0.0),scale=1.0,**kw):
"""Load clumps-members from file, insert them to the simulation.
:param str filename: file name
:param str format: the name of output format. Supported `x_y_z_r`(default), `x_y_z_r_clumpId`
:param [float,float,float] shift: [X,Y,Z] parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere`
:returns: list of spheres.
Lines starting with # are skipped
"""
infile = open(fileName,"r")
lines = infile.readlines()
infile.close()
ret=[]
curClump=[]
newClumpId = -1
for line in lines:
data = line.split()
if (data[0][0] == "#"): continue
pos = orientation*Vector3(float(data[0]),float(data[1]),float(data[2]))
if (newClumpId<0 or newClumpId==int(data[4])):
idD = curClump.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
newClumpId = int(data[4])
else:
newClumpId = int(data[4])
ret.append(O.bodies.appendClumped(curClump,discretization=discretization))
curClump=[]
idD = curClump.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
if (len(curClump)<>0):
ret.append(O.bodies.appendClumped(curClump,discretization=discretization))
# Set the mask to a clump the same as the first member of it
for i in range(len(ret)):
O.bodies[ret[i][0]].mask = O.bodies[ret[i][1][0]].mask
return ret
def text(fileName,shift=Vector3.Zero,scale=1.0,**kw):
"""Load sphere coordinates from file, returns a list of corresponding bodies; that may be inserted to the simulation with O.bodies.append().
:param string filename: file which has 4 colums [x, y, z, radius].
:param [float,float,float] shift: [X,Y,Z] parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.sphere`
:returns: list of spheres.
Lines starting with # are skipped
"""
return textExt(fileName=fileName,format='x_y_z_r',shift=shift,scale=scale,**kw)
def stl(file, dynamic=None,fixed=True,wire=True,color=None,highlight=False,noBound=False,material=-1):
""" Import geometry from stl file, return list of created facets."""
imp = STLImporter()
facets=imp.ymport(file)
for b in facets:
b.shape.color=color if color else utils.randomColor()
b.shape.wire=wire
b.shape.highlight=highlight
pos=b.state.pos
utils._commonBodySetup(b,0,Vector3(0,0,0),material=material,pos=pos,noBound=noBound,dynamic=dynamic,fixed=fixed)
b.aspherical=False
return facets
def gts(meshfile,shift=(0,0,0),scale=1.0,**kw):
""" Read given meshfile in gts format.
:Parameters:
`meshfile`: string
name of the input file.
`shift`: [float,float,float]
[X,Y,Z] parameter moves the specimen.
`scale`: float
factor scales the given data.
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.facet`
:Returns: list of facets.
"""
import gts,yade.pack
surf=gts.read(open(meshfile))
surf.scale(scale)
surf.translate(shift)
yade.pack.gtsSurface2Facets(surf,**kw)
def gmsh(meshfile="file.mesh",shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),**kw):
""" Imports geometry from mesh file and creates facets.
:Parameters:
`shift`: [float,float,float]
[X,Y,Z] parameter moves the specimen.
`scale`: float
factor scales the given data.
`orientation`: quaternion
orientation of the imported mesh
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.facet`
:Returns: list of facets forming the specimen.
mesh files can be easily created with `GMSH <http://www.geuz.org/gmsh/>`_.
Example added to :ysrc:`examples/regular-sphere-pack/regular-sphere-pack.py`
Additional examples of mesh-files can be downloaded from
http://www-roc.inria.fr/gamma/download/download.php
"""
infile = open(meshfile,"r")
lines = infile.readlines()
infile.close()
nodelistVector3=[]
findVerticesString=0
while (lines[findVerticesString].split()[0]<>'Vertices'): #Find the string with the number of Vertices
findVerticesString+=1
findVerticesString+=1
numNodes = int(lines[findVerticesString].split()[0])
for i in range(numNodes):
nodelistVector3.append(Vector3(0.0,0.0,0.0))
id = 0
for line in lines[findVerticesString+1:numNodes+findVerticesString+1]:
data = line.split()
nodelistVector3[id] = orientation*Vector3(float(data[0])*scale,float(data[1])*scale,float(data[2])*scale)+shift
id += 1
findTriangleString=findVerticesString+numNodes
while (lines[findTriangleString].split()[0]<>'Triangles'): #Find the string with the number of Triangles
findTriangleString+=1
findTriangleString+=1
numTriangles = int(lines[findTriangleString].split()[0])
triList = []
for i in range(numTriangles):
triList.append([0,0,0,0])
tid = 0
for line in lines[findTriangleString+1:findTriangleString+numTriangles+1]:
data = line.split()
id1 = int(data[0])-1
id2 = int(data[1])-1
id3 = int(data[2])-1
triList[tid][0] = tid
triList[tid][1] = id1
triList[tid][2] = id2
triList[tid][3] = id3
tid += 1
ret=[]
for i in triList:
a=nodelistVector3[i[1]]
b=nodelistVector3[i[2]]
c=nodelistVector3[i[3]]
ret.append(utils.facet((nodelistVector3[i[1]],nodelistVector3[i[2]],nodelistVector3[i[3]]),**kw))
return ret
def gengeoFile(fileName="file.geo",shift=Vector3.Zero,scale=1.0,orientation=Quaternion((0,1,0),0.0),**kw):
""" Imports geometry from LSMGenGeo .geo file and creates spheres.
Since 2012 the package is available in Debian/Ubuntu and known as python-demgengeo
http://packages.qa.debian.org/p/python-demgengeo.html
:Parameters:
`filename`: string
file which has 4 colums [x, y, z, radius].
`shift`: Vector3
Vector3(X,Y,Z) parameter moves the specimen.
`scale`: float
factor scales the given data.
`orientation`: quaternion
orientation of the imported geometry
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.sphere`
:Returns: list of spheres.
LSMGenGeo library allows one to create pack of spheres
with given [Rmin:Rmax] with null stress inside the specimen.
Can be useful for Mining Rock simulation.
Example: :ysrc:`examples/packs/packs.py`, usage of LSMGenGeo library in :ysrc:`examples/test/genCylLSM.py`.
* https://answers.launchpad.net/esys-particle/+faq/877
* http://www.access.edu.au/lsmgengeo_python_doc/current/pythonapi/html/GenGeo-module.html
* https://svn.esscc.uq.edu.au/svn/esys3/lsm/contrib/LSMGenGeo/"""
from yade.utils import sphere
infile = open(fileName,"r")
lines = infile.readlines()
infile.close()
numSpheres = int(lines[6].split()[0])
ret=[]
for line in lines[7:numSpheres+7]:
data = line.split()
pos = orientation*Vector3(float(data[0]),float(data[1]),float(data[2]))
ret.append(utils.sphere(shift+scale*pos,scale*float(data[3]),**kw))
return ret
def gengeo(mntable,shift=Vector3.Zero,scale=1.0,**kw):
""" Imports geometry from LSMGenGeo library and creates spheres.
Since 2012 the package is available in Debian/Ubuntu and known as python-demgengeo
http://packages.qa.debian.org/p/python-demgengeo.html
:Parameters:
`mntable`: mntable
object, which creates by LSMGenGeo library, see example
`shift`: [float,float,float]
[X,Y,Z] parameter moves the specimen.
`scale`: float
factor scales the given data.
`**kw`: (unused keyword arguments)
is passed to :yref:`yade.utils.sphere`
LSMGenGeo library allows one to create pack of spheres
with given [Rmin:Rmax] with null stress inside the specimen.
Can be useful for Mining Rock simulation.
Example: :ysrc:`examples/packs/packs.py`, usage of LSMGenGeo library in :ysrc:`examples/test/genCylLSM.py`.
* https://answers.launchpad.net/esys-particle/+faq/877
* http://www.access.edu.au/lsmgengeo_python_doc/current/pythonapi/html/GenGeo-module.html
* https://svn.esscc.uq.edu.au/svn/esys3/lsm/contrib/LSMGenGeo/"""
try:
from GenGeo import MNTable3D,Sphere
except ImportError:
from gengeo import MNTable3D,Sphere
ret=[]
sphereList=mntable.getSphereListFromGroup(0)
for i in range(0, len(sphereList)):
r=sphereList[i].Radius()
c=sphereList[i].Centre()
ret.append(utils.sphere([shift[0]+scale*float(c.X()),shift[1]+scale*float(c.Y()),shift[2]+scale*float(c.Z())],scale*float(r),**kw))
return ret
def unv(fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw):
""" Import geometry from unv file, return list of created facets.
:param string fileName: name of unv file
:param (float,float,float)|Vector3 shift: (X,Y,Z) parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.facet`
:param bool returnConnectivityTable: if True, apart from facets returns also nodes (list of (x,y,z) nodes coordinates) and elements (list of (id1,id2,id3) element nodes ids). If False (default), returns only facets
unv files are mainly used for FEM analyses (are used by `OOFEM <http://www.oofem.org/>`_ and `Abaqus <http://www.simulia.com/products/abaqus_fea.html>`_), but triangular elements can be imported as facets.
These files cen be created e.g. with open-source free software `Salome <http://salome-platform.org>`_.
Example: :ysrc:`examples/test/unv-read/unvRead.py`."""
class UNVReader:
# class used in ymport.unv function
# reads and evaluate given unv file and extracts all triangles
# can be extended to read tetrahedrons as well
def __init__(self,fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw):
self.shift = shift
self.scale = scale
self.unvFile = open(fileName,'r')
self.flag = 0
self.line = self.unvFile.readline()
self.lineSplit = self.line.split()
self.nodes = []
self.elements = []
self.read(**kw)
def readLine(self):
self.line = self.unvFile.readline()
self.lineSplit = self.line.split()
def read(self,**kw):
while self.line:
self.evalLine()
self.line = self.unvFile.readline()
self.unvFile.close()
self.createFacets(**kw)
def evalLine(self):
self.lineSplit = self.line.split()
if len(self.lineSplit) <= 1: # eval special unv format
if self.lineSplit[0] == '-1': pass
elif self.lineSplit[0] == '2411': self.flag = 1; # nodes
elif self.lineSplit[0] == '2412': self.flag = 2; # edges (lines)
else: self.flag = 4; # volume elements or other, not interesting for us (at least yet)
elif self.flag == 1: self.evalNodes()
elif self.flag == 2: self.evalEdge()
elif self.flag == 3: self.evalFacet()
#elif self.flag == 4: self.evalGroup()
def evalNodes(self):
self.readLine()
self.nodes.append((
self.shift[0]+self.scale*float(self.lineSplit[0]),
self.shift[1]+self.scale*float(self.lineSplit[1]),
self.shift[2]+self.scale*float(self.lineSplit[2])))
def evalEdge(self):
if self.lineSplit[1]=='41':
self.flag = 3
self.evalFacet()
else:
self.readLine()
self.readLine()
def evalFacet(self):
if self.lineSplit[1]=='41': # triangle
self.readLine()
self.elements.append((
int(self.lineSplit[0])-1,
int(self.lineSplit[1])-1,
int(self.lineSplit[2])-1))
else: # is not triangle
self.readLine()
self.flag = 4
# can be added function to handle tetrahedrons
def createFacets(self,**kw):
self.facets = [utils.facet(tuple(self.nodes[i] for i in e),**kw) for e in self.elements]
#
unvReader = UNVReader(fileName,shift,scale,returnConnectivityTable,**kw)
if returnConnectivityTable:
return unvReader.facets, unvReader.nodes, unvReader.elements
return facets
def iges(fileName,shift=(0,0,0),scale=1.0,returnConnectivityTable=False,**kw):
""" Import triangular mesh from .igs file, return list of created facets.
:param string fileName: name of iges file
:param (float,float,float)|Vector3 shift: (X,Y,Z) parameter moves the specimen.
:param float scale: factor scales the given data.
:param \*\*kw: (unused keyword arguments) is passed to :yref:`yade.utils.facet`
:param bool returnConnectivityTable: if True, apart from facets returns also nodes (list of (x,y,z) nodes coordinates) and elements (list of (id1,id2,id3) element nodes ids). If False (default), returns only facets
"""
nodes,elems = [],[]
f = open(fileName)
for line in f:
if line.startswith('134,'): # read nodes coordinates
ls = line.split(',')
v = Vector3(
float(ls[1])*scale + shift[0],
float(ls[2])*scale + shift[1],
float(ls[3])*scale + shift[2]
)
nodes.append(v)
if line.startswith('136,'): # read elements
ls = line.split(',')
i1,i2,i3 = int(ls[3])/2, int(ls[4])/2, int(ls[5])/2 # the numbering of nodes is 1,3,5,7,..., hence this int(ls[*])/2
elems.append( (i1,i2,i3) )
facets = [utils.facet( ( nodes[e[0]], nodes[e[1]], nodes[e[2]] ), **kw) for e in elems]
if returnConnectivityTable:
return facets, nodes, elems
return facets
| gpl-2.0 | 2,926,112,471,460,078,000 | 35.899497 | 216 | 0.702778 | false |
nimiq/moogle-project | magpie/response.py | 1 | 3599 | from abc import ABCMeta, abstractmethod
from utils.exceptions import ResponseError, InconsistentItemError, EntryNotToBeIndexed
class AbstractApiResponse(metaclass=ABCMeta):
"""
Response got after a query to a `Provider`.
Parameters:
response -- a `requests.models.Response` instance.
"""
def __init__(self, response):
self.response = response
self.updates_cursor = ''
self.has_more = False
self.pagination_cursor = ''
self._sanity_check()
def _sanity_check(self):
"""
Check whether the current response got is an error response.
"""
# If the HTTP status code is not 200, then it is an error.
if self.response.status_code != 200:
msg = 'HTTP Status: {}\n{}'.format(self.response.status_code, self.response.json())
raise ResponseError(msg)
def parse(self, bearertoken_id):
redis = self._init_redis_list(bearertoken_id)
self._hook_parse_entire_response(redis)
is_first_entry = True
entry = None
for entry in self._entries_to_apientries():
# `entry` is a `Api<Provider>Entry` instance.
redis.buffer(entry)
# Updates cursor: the `updated_time` of the most recent post.
if is_first_entry:
self._hook_parse_first_entry(entry)
is_first_entry = False
if entry: # if there is at least 1 `entry`.
self._hook_parse_last_entry(entry)
redis.flush_buffer()
@abstractmethod
def _init_redis_list(self, *args, **kwargs):
pass
def _hook_parse_entire_response(self, redis):
pass
def _hook_parse_first_entry(self, entry):
pass
def _hook_parse_last_entry(self, entry):
pass
@abstractmethod
def _build_pagination_cursor(self):
pass
@abstractmethod
def _build_updates_cursor(self):
pass
def _entries_to_apientries(self):
"""
Iter over all entries in the response.
Each entry in the response is converted to a `Api<Provider>Entry` instance.
"""
entries_list = self._extract_entries_list()
def _lpop():
"""
Pop from the head of the list.
Convert the item to `Api<Provider>Entry`.
"""
while True:
try:
entry = entries_list.pop(0) # Raise IndexError when completely consumed.
entry = self._init_api_provider_entry(entry)
return entry
except IndexError:
# `self.response` is empty, return None to stop the iter.
return None
except EntryNotToBeIndexed:
# The entry is probably a dir or not a textual file and we don't need to
# index it
continue
except InconsistentItemError as e:
# The entry is not consistent, like some important metadata are missing,
# we just skip it
# TODO log it anyway
continue
# The first argument of iter must be a callable, that's why we created the _lpop()
# closure. This closure will be called for each iteration and the result is returned
# until the result is None.
return iter(_lpop, None)
def _extract_entries_list(self):
return self.response.json()
@abstractmethod
def _init_api_provider_entry(self, *args, **kwargs):
pass | apache-2.0 | 3,897,381,863,946,606,000 | 30.304348 | 95 | 0.571548 | false |
pdsteele/DES-Python | rvms.py | 1 | 20759 |
# -------------------------------------------------------------------------
# * This is an ANSI C library that can be used to evaluate the probability
# * density functions (pdf's), cumulative distribution functions (cdf's), and
# * inverse distribution functions (idf's) for a variety of discrete and
# * continuous random variables.
# *
# * The following notational conventions are used
# * x : possible value of the random variable
# * u : real variable (probability) between 0.0 and 1.0
# * a, b, n, p, m, s : distribution-specific parameters
# *
# * There are pdf's, cdf's and idf's for 6 discrete random variables
# *
# * Random Variable Range (x) Mean Variance
# *
# * Bernoulli(p) 0..1 p p*(1-p)
# * Binomial(n, p) 0..n n*p n*p*(1-p)
# * Equilikely(a, b) a..b (a+b)/2 ((b-a+1)*(b-a+1)-1)/12
# * Geometric(p) 0... p/(1-p) p/((1-p)*(1-p))
# * Pascal(n, p) 0... n*p/(1-p) n*p/((1-p)*(1-p))
# * Poisson(m) 0... m m
# *
# * and for 7 continuous random variables
# *
# * Uniform(a, b) a < x < b (a+b)/2 (b-a)*(b-a)/12
# * Exponential(m) x > 0 m m*m
# * Erlang(n, b) x > 0 n*b n*b*b
# * Normal(m, s) all x m s*s
# * Lognormal(a, b) x > 0 see below
# * Chisquare(n) x > 0 n 2*n
# * Student(n) all x 0 (n > 1) n/(n-2) (n > 2)
# *
# * For the Lognormal(a, b), the mean and variance are
# *
# * mean = Exp(a + 0.5*b*b)
# * variance = (Exp(b*b) - )1*Exp(2*a + b*b)
# *
# * Name : rvms.c (Random Variable ModelS)
# * Author : Steve Park & Dave Geyer
# * Language : ANSI C
# * Latest Revision : 11-22-97
# Translated by : Philip Steele
# Language : Python 3.3
# Latest Revision : 3/26/14
# * -------------------------------------------------------------------------
from math import exp, log, fabs, sqrt
#from rvgs import
TINY= 1.0e-10
SQRT2PI= 2.506628274631 # #/* sqrt(2 * pi) */
# static double pdfStandard(x)
# static double cdfStandard(x)
# static double idfStandard(u)
# static double LogGamma(a)
# static double LogBeta(a, b)
# static double InGamma(a, b)
# static double InBeta(a, b, x)
def pdfBernoulli(p,x):
# =======================================
# * NOTE: use 0.0 < p < 1.0 and 0 <= x <= 1
# * =======================================
if(x==0):
return (1.0-p)
else:
return (p)
def cdfBernoulli(p,x):
# =======================================
# * NOTE: use 0.0 < p < 1.0 and 0 <= x <= 1
# * =======================================
if(x==0):
return (1.0-p)
else:
return (1)
def idfBernoulli(p,u):
# =========================================
# * NOTE: use 0.0 < p < 1.0 and 0.0 < u < 1.0
# * =========================================
if (u < 1.0 - p):
return(0)
else:
return(1)
def pdfEquilikely(a,b,x):
# ============================================
# * NOTE: use a <= x <= b
# * ============================================
return (1.0 / (b - a + 1.0))
def cdfEquilikely(a,b,x):
# ============================================
# * NOTE: use a <= x <= b
# * ============================================
return ((x - a + 1.0) / (b - a + 1.0))
def idfEquilikely(a,b,u):
# ============================================
# * NOTE: use a <= b and 0.0 < u < 1.0
# * ============================================
#LIKELY NEEDS TEST
return (a + int(u * (b - a + 1)))
def pdfBinomial(n,p,x):
# ============================================
# * NOTE: use 0 <= x <= n and 0.0 < p < 1.0
# * ============================================
# TEST
s = LogChoose(n, x)
t = x * log(p) + (n - x) * log(1.0 - p)
return (exp(s + t))
def cdfBinomial(n,p,x):
# ============================================
# * NOTE: use 0 <= x <= n and 0.0 < p < 1.0
# * ============================================
if (x < n):
return (1.0 - InBeta(x + 1, n - x, p))
else:
return (1.0)
def idfBinomial(n,p,u):
# =================================================
# * NOTE: use 0 <= n, 0.0 < p < 1.0 and 0.0 < u < 1.0
# * =================================================
x = int(n * p) #/* start searching at the mean */
if (cdfBinomial(n, p, x) <= u):
while (cdfBinomial(n, p, x) <= u):
x += 1
elif (cdfBinomial(n, p, 0) <= u):
while (cdfBinomial(n, p, x - 1) > u):
x -= 1
else:
x = 0
return (x)
def pdfGeometric(p,x):
# =====================================
# * NOTE: use 0.0 < p < 1.0 and x >= 0
# * =====================================
return ((1.0 - p) * exp(x * log(p)))
def cdfGeometric(p,x):
# =====================================
# * NOTE: use 0.0 < p < 1.0 and x >= 0
# * =====================================
return (1.0 - exp((x + 1) * log(p)))
def idfGeometric(p,u):
# =========================================
# * NOTE: use 0.0 < p < 1.0 and 0.0 < u < 1.0
# * =========================================
return ((long) (log(1.0 - u) / log(p)))
def pdfPascal(n,p,x):
# ===========================================
# * NOTE: use n >= 1, 0.0 < p < 1.0, and x >= 0
# * ===========================================
s = LogChoose(n + x - 1, x)
t = x * log(p) + n * log(1.0 - p)
return (exp(s + t))
def cdfPascal(n,p,x):
# ===========================================
# * NOTE: use n >= 1, 0.0 < p < 1.0, and x >= 0
# * ===========================================
return (1.0 - InBeta(x + 1, n, p))
def idfPascal(n,p,u):
# ==================================================
# * NOTE: use n >= 1, 0.0 < p < 1.0, and 0.0 < u < 1.0
# * ==================================================
x = int(n * p / (1.0 - p)) #/* start searching at the mean */
if (cdfPascal(n, p, x) <= u):
while (cdfPascal(n, p, x) <= u):
x += 1
elif (cdfPascal(n, p, 0) <= u):
while (cdfPascal(n, p, x - 1) > u):
x -= 1
else:
x = 0
return (x)
def pdfPoisson(m,x):
# ===================================
# * NOTE: use m > 0 and x >= 0
# * ===================================
t = - m + x * log(m) - LogFactorial(x)
return (exp(t))
def cdfPoisson(m,x):
# ===================================
# * NOTE: use m > 0 and x >= 0
# * ===================================
return (1.0 - InGamma(x + 1, m))
def idfPoisson(m,u):
# ===================================
# * NOTE: use m > 0 and 0.0 < u < 1.0
# * ===================================
x = int(m) #/* start searching at the mean */
if (cdfPoisson(m, x) <= u):
while (cdfPoisson(m, x) <= u):
x += 1
elif (cdfPoisson(m, 0) <= u):
while (cdfPoisson(m, x - 1) > u):
x -= 1
else:
x = 0
return (x)
def pdfUniform(a, b, x):
# ===============================================
# * NOTE: use a < x < b
# * ===============================================
return (1.0 / (b - a))
def cdfUniform(a, b, x):
# ===============================================
# * NOTE: use a < x < b
# * ===============================================
return ((x - a) / (b - a))
def idfUniform(a, b, u):
# ===============================================
# * NOTE: use a < b and 0.0 < u < 1.0
# * ===============================================
return (a + (b - a) * u)
def pdfExponential(m, x):
# =========================================
# * NOTE: use m > 0 and x > 0
# * =========================================
return ((1.0 / m) * exp(- x / m))
def cdfExponential(m, x):
# =========================================
# * NOTE: use m > 0 and x > 0
# * =========================================
return (1.0 - exp(- x / m))
def idfExponential(m, u):
# =========================================
# * NOTE: use m > 0 and 0.0 < u < 1.0
# * =========================================
return (- m * log(1.0 - u))
def pdfErlang(n, b, x):
# ============================================
# * NOTE: use n >= 1, b > 0, and x > 0
# * ============================================
t = (n - 1) * log(x / b) - (x / b) - log(b) - LogGamma(n)
return (exp(t))
def cdfErlang(n, b, x):
# ============================================
# * NOTE: use n >= 1, b > 0, and x > 0
# * ============================================
return (InGamma(n, x / b))
def idfErlang(n, b, u):
# ============================================
# * NOTE: use n >= 1, b > 0 and 0.0 < u < 1.0
# * ============================================
x = n*b
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
x = t + (u - cdfErlang(n, b, t)) / pdfErlang(n, b, t)
if (x <= 0.0):
x = 0.5 * t
condition = (fabs(x - t) >= TINY)
return (x)
def pdfStandard(x):
# ===================================
# * NOTE: x can be any value
# * ===================================
return (exp(- 0.5 * x * x) / SQRT2PI)
def cdfStandard(x):
# ===================================
# * NOTE: x can be any value
# * ===================================
t = InGamma(0.5, 0.5 * x * x)
if (x < 0.0):
return (0.5 * (1.0 - t))
else:
return (0.5 * (1.0 + t))
def idfStandard(u):
# ===================================
# * NOTE: 0.0 < u < 1.0
# * ===================================
t = 0.0
x = 0.0 #/* initialize to the mean, then */
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
x = t + (u - cdfStandard(t)) / pdfStandard(t)
condition = (fabs(x - t) >= TINY)
return (x)
def pdfNormal(m, s, x):
# ==============================================
# * NOTE: x and m can be any value, but s > 0.0
# * =============================================
t = (x - m) / s
return (pdfStandard(t) / s)
def cdfNormal(m, s, x):
# ==============================================
# * NOTE: x and m can be any value, but s > 0.0
# * ==============================================
t = (x - m) / s
return (cdfStandard(t))
def idfNormal(m, s, u):
# =======================================================
# * NOTE: m can be any value, but s > 0.0 and 0.0 < u < 1.0
# * =======================================================
return (m + s * idfStandard(u))
def pdfLognormal(a, b, x):
# ===================================================
# * NOTE: a can have any value, but b > 0.0 and x > 0.0
# * ===================================================
t = (log(x) - a) / b
return (pdfStandard(t) / (b * x))
def cdfLognormal(a, b, x):
# ===================================================
# * NOTE: a can have any value, but b > 0.0 and x > 0.0
# * ===================================================
t = (log(x) - a) / b
return (cdfStandard(t))
def idfLognormal(a, b, u):
# =========================================================
# * NOTE: a can have any value, but b > 0.0 and 0.0 < u < 1.0
# * =========================================================
t = a + b * idfStandard(u)
return (exp(t))
def pdfChisquare(n, x):
# =====================================
# * NOTE: use n >= 1 and x > 0.0
# * =====================================
t= n/2.0
s = n / 2.0
t = (s - 1.0) * log(x / 2.0) - (x / 2.0) - log(2.0) - LogGamma(s)
return (exp(t))
def cdfChisquare(n, x):
# =====================================
# * NOTE: use n >= 1 and x > 0.0
# * ====================================
return (InGamma(n / 2.0, x / 2))
def idfChisquare(n, u):
# =====================================
# * NOTE: use n >= 1 and 0.0 < u < 1.0
# * =====================================
x = n #/* initialize to the mean, then */
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
x = t + (u - cdfChisquare(n, t)) / pdfChisquare(n, t)
if (x <= 0.0):
x = 0.5 * t
condition = (fabs(x - t) >= TINY)
return (x)
def pdfStudent(n, x):
# ===================================
# * NOTE: use n >= 1 and x > 0.0
# * ===================================
s = -0.5 * (n + 1) * log(1.0 + ((x * x) / float(n)))
t = -1*LogBeta(0.5, n / 2.0)
return (exp(s + t) / sqrt(float(n)))
def cdfStudent(n, x):
# ===================================
# * NOTE: use n >= 1 and x > 0.0
# * ===================================
t = (x * x) / (n + x * x)
s = InBeta(0.5, n / 2.0, t)
if (x >= 0.0):
return (0.5 * (1.0 + s))
else:
return (0.5 * (1.0 - s))
def idfStudent(n, u):
# ===================================
# * NOTE: use n >= 1 and 0.0 < u < 1.0
# * ===================================
t = 0.0
x = 0.0 #/* initialize to the mean, then */
condition = True
while(condition == True): #/* use Newton-Raphson iteration */
t = x
# print("t is set to "+ t)
x = t + (u - cdfStudent(n, t)) / pdfStudent(n, t)
# print("x is set to "+x)
# print(fabs(x-t))
condition = (fabs(x - t) >= TINY)
return (x)
# ===================================================================
# * The six functions that follow are a 'special function' mini-library
# * used to support the evaluation of pdf, cdf and idf functions.
# * ===================================================================
def LogGamma(a):
# ========================================================================
# * LogGamma returns the natural log of the gamma function.
# * NOTE: use a > 0.0
# *
# * The algorithm used to evaluate the natural log of the gamma function is
# * based on an approximation by C. Lanczos, SIAM J. Numerical Analysis, B,
# * vol 1, 1964. The constants have been selected to yield a relative error
# * which is less than 2.0e-10 for all positive values of the parameter a.
# * ========================================================================
s = []
s.append(76.180091729406 / a)
s.append(-86.505320327112 / (a + 1.0))
s.append(24.014098222230 / (a + 2.0))
s.append(-1.231739516140 / (a + 3.0))
s.append(0.001208580030 / (a + 4.0))
s.append(-0.000005363820 / (a + 5.0))
sum = 1.000000000178
for i in range(0,6):
sum += s[i]
temp = (a - 0.5) * log(a + 4.5) - (a + 4.5) + log(SQRT2PI * sum)
return (temp)
def LogFactorial(n):
# ==================================================================
# * LogFactorial(n) returns the natural log of n!
# * NOTE: use n >= 0
# *
# * The algorithm used to evaluate the natural log of n! is based on a
# * simple equation which relates the gamma and factorial functions.
# * ==================================================================
return (LogGamma(n + 1))
def LogBeta(a,b):
# ======================================================================
# * LogBeta returns the natural log of the beta function.
# * NOTE: use a > 0.0 and b > 0.0
# *
# * The algorithm used to evaluate the natural log of the beta function is
# * based on a simple equation which relates the gamma and beta functions.
# *
return (LogGamma(a) + LogGamma(b) - LogGamma(a + b))
def LogChoose(n,m):
# ========================================================================
# * LogChoose returns the natural log of the binomial coefficient C(n,m).
# * NOTE: use 0 <= m <= n
# *
# * The algorithm used to evaluate the natural log of a binomial coefficient
# * is based on a simple equation which relates the beta function to a
# * binomial coefficient.
# * ========================================================================
if (m > 0):
return (-LogBeta(m, n - m + 1) - log(m))
else:
return (0.0)
def InGamma(a,x):
# ========================================================================
# * Evaluates the incomplete gamma function.
# * NOTE: use a > 0.0 and x >= 0.0
# *
# * The algorithm used to evaluate the incomplete gamma function is based on
# * Algorithm AS 32, J. Applied Statistics, 1970, by G. P. Bhattacharjee.
# * See also equations 6.5.29 and 6.5.31 in the Handbook of Mathematical
# * Functions, Abramowitz and Stegum (editors). The absolute error is less
# * than 1e-10 for all non-negative values of x.
# * ========================================================================
if (x > 0.0):
factor = exp(-1*x + a*log(x) - LogGamma(a))
else:
factor = 0.0
if (x < a + 1.0): ##/* evaluate as an infinite series - */
t = a ##/* A & S equation 6.5.29 */
term = 1.0 / a
sum = term
while (term >= TINY * sum): ##/* sum until 'term' is small */
t += 1
term = term*(x / t)
sum += term
#EndWhile
return (factor * sum)
else: ##/* evaluate as a continued fraction - */
p = [0.0,1.0, -1] ##/* A & S eqn 6.5.31 with the extended */
q = [1.0,x, -1] ##/* pattern 2-a, 2, 3-a, 3, 4-a, 4,... */
##/* - see also A & S sec 3.10, eqn (3) */
f = p[1] / q[1]
n = 0
condition = True
while(condition == True): ##/* recursively generate the continued */
g = f ##/* fraction 'f' until two consecutive */
n += 1 ##/* values are small */
if ((n % 2) > 0):
c=[(((n + 1) / 2.0) - a), 1]
else:
c=[(n / 2.0),x]
p[2] = (c[1] * p[1] + c[0] * p[0])
q[2] = (c[1] * q[1] + c[0] * q[0])
if (q[2] != 0.0): ##/* rescale to avoid overflow */
p[0] = p[1] / q[2]
q[0] = q[1] / q[2]
p[1] = p[2] / q[2]
q[1] = 1.0
f = p[1]
condition = (fabs(f - g) >= TINY) or (q[1] != 1.0)
return (1.0 - factor * f)
def InBeta(a,b,x):
# =======================================================================
# * Evaluates the incomplete beta function.
# * NOTE: use a > 0.0, b > 0.0 and 0.0 <= x <= 1.0
# *
# * The algorithm used to evaluate the incomplete beta function is based on
# * equation 26.5.8 in the Handbook of Mathematical Functions, Abramowitz
# * and Stegum (editors). The absolute error is less than 1e-10 for all x
# * between 0 and 1.
# * =======================================================================
if (x > (a + 1.0) / (a + b + 1.0)): # #/* to accelerate convergence */
swap = 1 ##/* complement x and swap a & b */
x = 1.0 - x
t = a
a = b
b = t
else: ##/* do nothing */
swap = 0
if (x > 0):
factor = exp(a * log(x) + b * log(1.0 - x) - LogBeta(a,b)) / a
else:
factor = 0.0
p = [0.0,1.0, -1]
q = [1.0,1.0, -1]
f = p[1] / q[1]
n = 0
condition = True
while (condition==True): ##/* recursively generate the continued */
g = f ##/* fraction 'f' until two consecutive */
n += 1 ##/* values are small */
if ((n % 2) > 0):
t = (n - 1) / 2.0
c = -(a + t) * (a + b + t) * x / ((a + n - 1.0) * (a + n))
else:
t = n / 2.0
c = t * (b - t) * x / ((a + n - 1.0) * (a + n))
p[2] = (p[1] + c * p[0])
q[2] = (q[1] + c * q[0])
if (q[2] != 0.0): ##/* rescale to avoid overflow */
p[0] = p[1] / q[2]
q[0] = q[1] / q[2]
p[1] = p[2] / q[2]
q[1] = 1.0
f = p[1]
condition = ((fabs(f - g) >= TINY) or (q[1] != 1.0))
#endWhile
if (swap == 1):
return (1.0 - factor * f)
else:
return (factor * f)
# C output:
# IDFSTU(10,.8) is 0.879058 - PASS
# IDFStud(10,.975) is 2.228139 - PASS
# IDFStud(100,.975) is 1.983972 - PASS
# IDFchisq(10,.5) is 9.341818 - PASS
# IDFchisq(15,.8) is 19.310657 - PASS
# IDFerlang(16,4,.878) is 82.934761 - PASS
# IDFerlang(20,7,.113) is 103.476309 - PASS
# IDFpoisson(16,.878) is 21.000000 - PASS
# IDFpoisson(19,.231) is 16.000000 - PASS
# IDFNorm(9,2,.66) is 9.824926 - PASS
# IDFNorm(-19,3.4,.81) is -16.015153 - PASS
# idfPascal(23,.11,.90) is 5.000000 - PASS
# idfPascal(6,.5,.5) is 6.000000 - PASS
# idfBinomial(23,.11,.90) is 5.000000 - PASS
# idfBinomial(6,.5,.5) is 3.000000 - PASS | mit | 617,795,352,525,077,500 | 28.446809 | 86 | 0.366877 | false |
derekjamescurtis/veritranspay | tests/response_virtualaccount_charge_tests.py | 1 | 5544 | from unittest import TestCase
from veritranspay.response.response import VirtualAccountBniChargeResponse, VirtualAccountPermataChargeResponse, \
VirtualAccountBcaChargeResponse, VirtualAccountMandiriChargeResponse
class VirtualAccountPermataChargeResponseTests(TestCase):
"""
https://api-docs.midtrans.com/#permata-virtual-account
"""
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#permata-virtual-account
self.response_json = {
"status_code": "201",
"status_message": "Success, PERMATA VA transaction is successful",
"transaction_id": "6fd88567-62da-43ff-8fe6-5717e430ffc7",
"order_id": "H17550",
"gross_amount": "145000.00",
"payment_type": "bank_transfer",
"transaction_time": "2016-06-19 13:42:29",
"transaction_status": "pending",
"fraud_status": "accept",
"permata_va_number": "8562000087926752"
}
self.parsed_response = VirtualAccountPermataChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('bank_transfer', self.parsed_response.payment_type)
def test_payment_code(self):
self.assertEqual('8562000087926752', self.parsed_response.permata_va_number)
class VirtualAccountBcaChargeResponseTests(TestCase):
"""
https://api-docs.midtrans.com/#bca-virtual-account
"""
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#bca-virtual-account
self.response_json = {
"status_code": "201",
"status_message": "Success, Bank Transfer transaction is created",
"transaction_id": "9aed5972-5b6a-401e-894b-a32c91ed1a3a",
"order_id": "1466323342",
"gross_amount": "20000.00",
"payment_type": "bank_transfer",
"transaction_time": "2016-06-19 15:02:22",
"transaction_status": "pending",
"va_numbers": [
{
"bank": "bca",
"va_number": "91019021579"
}
],
"fraud_status": "accept"
}
self.parsed_response = VirtualAccountBcaChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('bank_transfer', self.parsed_response.payment_type)
def test_payment_bank(self):
self.assertEqual('bca', self.parsed_response.va_numbers[0]['bank'])
def test_payment_vanumber(self):
self.assertEqual('91019021579', self.parsed_response.va_numbers[0]['va_number'])
class VirtualAccountBniChargeResponseTests(TestCase):
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#bni-virtual-account
self.response_json = {
"status_code": "201",
"status_message": "Success, Bank Transfer transaction is created",
"transaction_id": "9aed5972-5b6a-401e-894b-a32c91ed1a3a",
"order_id": "1466323342",
"gross_amount": "20000.00",
"payment_type": "bank_transfer",
"transaction_time": "2016-06-19 15:02:22",
"transaction_status": "pending",
"va_numbers": [
{
"bank": "bni",
"va_number": "8578000000111111"
}
],
"fraud_status": "accept"
}
self.parsed_response = VirtualAccountBniChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('bank_transfer', self.parsed_response.payment_type)
def test_payment_bank(self):
self.assertEqual('Bni', self.parsed_response.bank)
def test_payment_vabank(self):
self.assertEqual('bni', self.parsed_response.va_numbers[0]['bank'])
def test_payment_vanumber(self):
self.assertEqual('8578000000111111', self.parsed_response.va_numbers[0]['va_number'])
class VirtualAccountMandiriChargeResponseTests(TestCase):
def setUp(self):
# example response data from
# https://api-docs.midtrans.com/#mandiri-bill-payment
self.response_json = {
"status_code": "201",
"status_message": "Success, Mandiri Bill transaction is successful",
"transaction_id": "883af6a4-c1b4-4d39-9bd8-b148fcebe853",
"order_id": "tes",
"gross_amount": "1000.00",
"payment_type": "echannel",
"transaction_time": "2016-06-19 14:40:19",
"transaction_status": "pending",
"fraud_status": "accept",
"bill_key": "990000000260",
"biller_code": "70012"
}
self.parsed_response = VirtualAccountMandiriChargeResponse(**self.response_json)
def test_status_code(self):
self.assertEqual(201, self.parsed_response.status_code)
def test_payment_type(self):
self.assertEqual('echannel', self.parsed_response.payment_type)
def test_payment_bank(self):
self.assertEqual('990000000260', self.parsed_response.bill_key)
def test_payment_vanumber(self):
self.assertEqual('70012', self.parsed_response.biller_code)
| bsd-3-clause | 169,590,069,608,813,860 | 36.208054 | 114 | 0.613095 | false |
NicholasColotouros/RaspiCorder | RaspiCorder/Menus.py | 1 | 2928 | #!/usr/bin/python
from time import sleep
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
class Instrument:
drums = 1
guitar = 2
bass = 3
other = 4
@staticmethod
def instrumentName(num):
if num == 1:
return "drums"
elif num == 2:
return "guitar"
elif num == 3:
return "bass"
else:
return "other"
class ConfirmationMenu:
menuText = None
selected = None
lcd = None
def __init__(self, plcd, instrument):
self.menuText = " START REC " + instrument + "\n RESELECT instr"
self.lcd = plcd
self.selected = 0
def InstrumentConfirm(self):
lcd = self.lcd
lcd.clear()
lcd.message(self.menuText)
lcd.blink()
while True:
lcd.setCursor(0, self.selected)
if lcd.buttonPressed(lcd.UP):
self.selected = 0
elif lcd.buttonPressed(lcd.DOWN):
self.selected = 1
elif lcd.buttonPressed(lcd.SELECT):
lcd.noBlink()
if self.selected == 1:
return False
else:
return True
class InstrumentMenu:
instrumentSelection = " Drums Bass\n Guitar Other"
selected = 1
delayTime = 0.5 # The time it takes to look for another button press
def __init__(self):
selected = Instrument.drums
delayTime = 0.5
def updateCursor(self, lcd):
if self.selected == Instrument.drums:
lcd.setCursor(0,0)
elif self.selected == Instrument.guitar:
lcd.setCursor(0,1)
elif self.selected == Instrument.bass:
lcd.setCursor(10,0)
else:
lcd.setCursor(10,1)
def getInstrumentInput(self, lcd):
lcd.clear()
lcd.message(self.instrumentSelection)
lcd.blink()
while True:
self.updateCursor(lcd)
# Move left
if lcd.buttonPressed(lcd.LEFT):
if self.selected == Instrument.bass:
self.selected = Instrument.drums
sleep(self.delayTime)
elif self.selected == Instrument.other:
self.selected = Instrument.guitar
sleep(self.delayTime)
# Move right
elif lcd.buttonPressed(lcd.RIGHT):
if self.selected == Instrument.drums:
self.selected = Instrument.bass
sleep(self.delayTime)
elif self.selected == Instrument.guitar:
self.selected = Instrument.other
sleep(self.delayTime)
# Move up
elif lcd.buttonPressed(lcd.UP):
if self.selected == Instrument.guitar:
self.selected = Instrument.drums
sleep(self.delayTime)
elif self.selected == Instrument.other:
self.selected = Instrument.bass
sleep(self.delayTime)
# Move down
elif lcd.buttonPressed(lcd.DOWN):
if self.selected == Instrument.drums:
self.selected = Instrument.guitar
sleep(self.delayTime)
elif self.selected == Instrument.bass:
self.selected = Instrument.other
sleep(self.delayTime)
# Select the current entry
elif lcd.buttonPressed(lcd.SELECT):
lcd.noBlink()
return self.selected | gpl-2.0 | 9,203,748,813,401,811,000 | 22.246032 | 74 | 0.644467 | false |
ppwwyyxx/tensorpack | examples/DoReFa-Net/resnet-dorefa.py | 1 | 6148 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: resnet-dorefa.py
import argparse
import numpy as np
import os
import cv2
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils.varreplace import remap_variables
from dorefa import get_dorefa
from imagenet_utils import ImageNetModel, eval_classification, fbresnet_augmentor
"""
This script loads the pre-trained ResNet-18 model with (W,A,G) = (1,4,32)
It has 59.2% top-1 and 81.5% top-5 validation error on ILSVRC12 validation set.
To run on images:
./resnet-dorefa.py --load ResNet-18-14f.npz --run a.jpg b.jpg
To eval on ILSVRC validation set:
./resnet-dorefa.py --load ResNet-18-14f.npz --eval --data /path/to/ILSVRC
"""
BITW = 1
BITA = 4
BITG = 32
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec([None, 224, 224, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]
def build_graph(self, image, label):
image = image / 256.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
def new_get_variable(v):
name = v.op.name
# don't binarize first and last layer
if not name.endswith('W') or 'conv1' in name or 'fct' in name:
return v
else:
logger.info("Binarizing weight {}".format(v.op.name))
return fw(v)
def nonlin(x):
return tf.clip_by_value(x, 0.0, 1.0)
def activate(x):
return fa(nonlin(x))
def resblock(x, channel, stride):
def get_stem_full(x):
return (LinearWrap(x)
.Conv2D('c3x3a', channel, 3)
.BatchNorm('stembn')
.apply(activate)
.Conv2D('c3x3b', channel, 3)())
channel_mismatch = channel != x.get_shape().as_list()[3]
if stride != 1 or channel_mismatch or 'pool1' in x.name:
# handling pool1 is to work around an architecture bug in our model
if stride != 1 or 'pool1' in x.name:
x = AvgPooling('pool', x, stride, stride)
x = BatchNorm('bn', x)
x = activate(x)
shortcut = Conv2D('shortcut', x, channel, 1)
stem = get_stem_full(x)
else:
shortcut = x
x = BatchNorm('bn', x)
x = activate(x)
stem = get_stem_full(x)
return shortcut + stem
def group(x, name, channel, nr_block, stride):
with tf.variable_scope(name + 'blk1'):
x = resblock(x, channel, stride)
for i in range(2, nr_block + 1):
with tf.variable_scope(name + 'blk{}'.format(i)):
x = resblock(x, channel, 1)
return x
with remap_variables(new_get_variable), \
argscope(BatchNorm, decay=0.9, epsilon=1e-4), \
argscope(Conv2D, use_bias=False, nl=tf.identity):
logits = (LinearWrap(image)
# use explicit padding here, because our private training framework has
# different padding mechanisms from TensorFlow
.tf.pad([[0, 0], [3, 2], [3, 2], [0, 0]])
.Conv2D('conv1', 64, 7, stride=2, padding='VALID', use_bias=True)
.tf.pad([[0, 0], [1, 1], [1, 1], [0, 0]], 'SYMMETRIC')
.MaxPooling('pool1', 3, 2, padding='VALID')
.apply(group, 'conv2', 64, 2, 1)
.apply(group, 'conv3', 128, 2, 2)
.apply(group, 'conv4', 256, 2, 2)
.apply(group, 'conv5', 512, 2, 2)
.BatchNorm('lastbn')
.apply(nonlin)
.GlobalAvgPooling('gap')
.tf.multiply(49) # this is due to a bug in our model design
.FullyConnected('fct', 1000)())
tf.nn.softmax(logits, name='output')
ImageNetModel.compute_loss_and_error(logits, label)
def get_inference_augmentor():
return fbresnet_augmentor(False)
def run_image(model, sess_init, inputs):
pred_config = PredictConfig(
model=model,
session_init=sess_init,
input_names=['input'],
output_names=['output']
)
predict_func = OfflinePredictor(pred_config)
meta = dataset.ILSVRCMeta()
words = meta.get_synset_words_1000()
transformers = get_inference_augmentor()
for f in inputs:
assert os.path.isfile(f)
img = cv2.imread(f).astype('float32')
assert img is not None
img = transformers.augment(img)[np.newaxis, :, :, :]
o = predict_func(img)
prob = o[0][0]
ret = prob.argsort()[-10:][::-1]
names = [words[i] for i in ret]
print(f + ":")
print(list(zip(names, prob[ret])))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the physical ids of GPUs to use')
parser.add_argument('--load', help='load a npz pretrained model')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--dorefa',
help='number of bits for W,A,G, separated by comma. Defaults to \'1,4,32\'',
default='1,4,32')
parser.add_argument(
'--run', help='run on a list of images with the pretrained model', nargs='*')
parser.add_argument('--eval', action='store_true')
args = parser.parse_args()
BITW, BITA, BITG = map(int, args.dorefa.split(','))
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.eval:
ds = dataset.ILSVRC12(args.data, 'val', shuffle=False)
ds = AugmentImageComponent(ds, get_inference_augmentor())
ds = BatchData(ds, 192, remainder=True)
eval_classification(Model(), SmartInit(args.load), ds)
elif args.run:
assert args.load.endswith('.npz')
run_image(Model(), SmartInit(args.load), args.run)
| apache-2.0 | 7,932,895,381,036,762,000 | 35.378698 | 100 | 0.544242 | false |
ani2404/ee6761cloud | inference.py | 1 | 1332 | # Build the model, restore the variables and run the inference
# Need to use SavedModel builder and loader instead - future work
import sys
sys.path.append('/home/ani2404/Desktop/ee6761cloud/')
import numpy as np
#Need to replace with the actual model
from code_ref.model import Model
class infer(object):
def __init__(self,session,checkpoint_dir,image_size_x,image_size_y,resolution_factor=4,batch_size=1):
#Build the model based on resolution factor
self.session = session
self.model = Model(session, checkpoint_dir=checkpoint_dir,batch_size=batch_size,
image_size_x=image_size_x,image_size_y=image_size_y,resolution_factor=resolution_factor)
self.resolution_factor = resolution_factor
# Restores the variables from the checkpoint dir
if self.model.load(checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [*] Load Failed")
def super_resolute(self,input_image):
# Super resolutes the input image
output_images,up_input = self.session.run([self.model.ESCNN,self.model.interpolation],
feed_dict={self.model.inputs:input_image})
output_images = np.array(output_images).astype(np.float32)
return output_images,up_input
| mit | 3,867,639,695,548,613,600 | 27.340426 | 115 | 0.660661 | false |
hzlf/openbroadcast | website/apps/alibrary/migrations/0100_auto__del_field_distributor_email_main__add_field_distributor_email.py | 1 | 53909 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Distributor.email_main'
db.delete_column('alibrary_distributor', 'email_main')
# Adding field 'Distributor.email'
db.add_column('alibrary_distributor', 'email',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Distributor.email_main'
db.add_column('alibrary_distributor', 'email_main',
self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True),
keep_default=False)
# Deleting field 'Distributor.email'
db.delete_column('alibrary_distributor', 'email')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'alibrary.apilookup': {
'Meta': {'ordering': "('created',)", 'object_name': 'APILookup'},
'api_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'provider': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'ressource_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.artist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Artist'},
'aliases': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aliases_rel_+'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disambiguation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Artist']", 'through': "orm['alibrary.ArtistMembership']", 'symmetrical': 'False'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Profession']", 'through': "orm['alibrary.ArtistProfessions']", 'symmetrical': 'False'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artists_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.artistmembership': {
'Meta': {'object_name': 'ArtistMembership'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_child'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_parent'", 'to': "orm['alibrary.Artist']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_membership_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.artistplugin': {
'Meta': {'object_name': 'ArtistPlugin', 'db_table': "'cmsplugin_artistplugin'", '_ormbases': ['cms.CMSPlugin']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'alibrary.artistprofessions': {
'Meta': {'object_name': 'ArtistProfessions'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Profession']"})
},
'alibrary.daypart': {
'Meta': {'ordering': "('day', 'time_start')", 'object_name': 'Daypart'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_end': ('django.db.models.fields.TimeField', [], {}),
'time_start': ('django.db.models.fields.TimeField', [], {})
},
'alibrary.distributor': {
'Meta': {'ordering': "('name',)", 'object_name': 'Distributor'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Distributor']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'distributors_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.format': {
'Meta': {'ordering': "('format', 'version')", 'object_name': 'Format'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_price': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'base'", 'max_length': '10'})
},
'alibrary.label': {
'Meta': {'ordering': "('name',)", 'object_name': 'Label'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disable_editing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'label_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelcode': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Label']"}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'labels_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '12'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.license': {
'Meta': {'ordering': "('name',)", 'object_name': 'License'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'license_children'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'restricted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'405431c1-a180-4b22-8383-64392cf37ae9'", 'max_length': '36'})
},
'alibrary.licensetranslation': {
'Meta': {'ordering': "('language_code',)", 'unique_together': "(('language_code', 'master'),)", 'object_name': 'LicenseTranslation', 'db_table': "'alibrary_license_translation'"},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '15', 'blank': 'True'}),
'license_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['alibrary.License']"}),
'name_translated': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'alibrary.media': {
'Meta': {'ordering': "('tracknumber',)", 'object_name': 'Media'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_artist'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'base_bitrate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_duration': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'base_filesize': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_format': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'base_samplerate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'conversion_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'echoprint_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.MediaExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'lock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1'}),
'master': ('django.db.models.fields.files.FileField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'master_sha1': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'mediatype': ('django.db.models.fields.CharField', [], {'default': "'track'", 'max_length': '12'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_release'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Release']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tracknumber': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.mediaextraartists': {
'Meta': {'ordering': "('profession__name', 'artist__name')", 'object_name': 'MediaExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_media'", 'to': "orm['alibrary.Media']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.mediaformat': {
'Meta': {'ordering': "('name',)", 'object_name': 'Mediaformat'},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'alibrary.mediaplugin': {
'Meta': {'object_name': 'MediaPlugin', 'db_table': "'cmsplugin_mediaplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"})
},
'alibrary.playlist': {
'Meta': {'ordering': "('-updated',)", 'object_name': 'Playlist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'dayparts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'daypart_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Daypart']"}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '12', 'null': 'True'}),
'edit_mode': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.PlaylistItem']", 'null': 'True', 'through': "orm['alibrary.PlaylistItemPlaylist']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'seasons': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'season_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Season']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'weather': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'weather_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Weather']"})
},
'alibrary.playlistitem': {
'Meta': {'object_name': 'PlaylistItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistitemplaylist': {
'Meta': {'object_name': 'PlaylistItemPlaylist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.PlaylistItem']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistmedia': {
'Meta': {'object_name': 'PlaylistMedia'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.profession': {
'Meta': {'ordering': "('name',)", 'object_name': 'Profession'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'alibrary.relation': {
'Meta': {'ordering': "('url',)", 'object_name': 'Relation'},
'action': ('django.db.models.fields.CharField', [], {'default': "'information'", 'max_length': '50'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "'generic'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'})
},
'alibrary.release': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Release'},
'asin': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cover_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_cover_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.ReleaseExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_label'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Label']"}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'main_format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Mediaformat']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'media': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'to': "orm['alibrary.Media']", 'through': "orm['alibrary.ReleaseMedia']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'pressings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'releases_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release_country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'releasedate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'releasedate_approx': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'releasestatus': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'releasetype': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'totaltracks': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.releaseextraartists': {
'Meta': {'object_name': 'ReleaseExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.releasemedia': {
'Meta': {'object_name': 'ReleaseMedia'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaseplugin': {
'Meta': {'object_name': 'ReleasePlugin', 'db_table': "'cmsplugin_releaseplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaserelations': {
'Meta': {'object_name': 'ReleaseRelations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_relation'", 'to': "orm['alibrary.Relation']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.season': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Season'},
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.weather': {
'Meta': {'ordering': "('-name',)", 'object_name': 'Weather'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'arating.vote': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['alibrary'] | gpl-3.0 | -1,103,363,076,099,706,400 | 97.018182 | 240 | 0.555863 | false |
jodygarnett/qgis-geoserver-plugin | src/geoserverexplorer/gui/gsoperations.py | 1 | 6538 | from PyQt4 import QtCore
from qgis.core import *
from geoserverexplorer.qgis import layers as qgislayers
from geoserverexplorer.qgis.catalog import CatalogWrapper
from geoserverexplorer.gui.confirm import publishLayer
from geoserverexplorer.gui.dialogs.projectdialog import PublishProjectDialog
from geoserver.catalog import ConflictingDataError
from geoserverexplorer.gui.dialogs.layerdialog import PublishLayersDialog
def publishDraggedGroup(explorer, groupItem, catalog, workspace):
groupName = groupItem.element
groups = qgislayers.getGroups()
group = groups[groupName]
gslayers= [layer.name for layer in catalog.get_layers()]
missing = []
overwrite = bool(QtCore.QSettings().value("/GeoServer/Settings/GeoServer/OverwriteGroupLayers", True, bool))
for layer in group:
if layer.name() not in gslayers or overwrite:
missing.append(layer)
if missing:
explorer.setProgressMaximum(len(missing), "Publish layers")
progress = 0
cat = CatalogWrapper(catalog)
for layer in missing:
explorer.setProgress(progress)
explorer.run(cat.publishLayer,
None,
[],
layer, workspace, True)
progress += 1
explorer.setProgress(progress)
explorer.resetActivity()
names = [layer.name() for layer in group]
layergroup = catalog.create_layergroup(groupName, names, names)
explorer.run(catalog.save, "Create layer group from group '" + groupName + "'",
[], layergroup)
def publishDraggedLayer(explorer, layer, workspace):
cat = workspace.catalog
cat = CatalogWrapper(cat)
ret = explorer.run(publishLayer,
"Publish layer from layer '" + layer.name() + "'",
[],
cat, layer, workspace)
return ret
def addDraggedLayerToGroup(explorer, layer, groupItem):
group = groupItem.element
styles = group.styles
layers = group.layers
if layer.name not in layers:
layers.append(layer.name)
styles.append(layer.default_style.name)
group.dirty.update(layers = layers, styles = styles)
explorer.run(layer.catalog.save,
"Update group '" + group.name + "'",
[groupItem],
group)
def addDraggedUrisToWorkspace(uris, catalog, workspace, tree):
if uris:
if len(uris) > 1:
explorer.setProgressMaximum(len(uris))
for i, uri in enumerate(uris):
if isinstance(uri, basestring):
layerName = QtCore.QFileInfo(uri).completeBaseName()
layer = QgsRasterLayer(uri, layerName)
else:
layer = QgsRasterLayer(uri.uri, uri.name)
if not layer.isValid() or layer.type() != QgsMapLayer.RasterLayer:
if isinstance(uri, basestring):
layerName = QtCore.QFileInfo(uri).completeBaseName()
layer = QgsVectorLayer(uri, layerName, "ogr")
else:
layer = QgsVectorLayer(uri.uri, uri.name, uri.providerKey)
if not layer.isValid() or layer.type() != QgsMapLayer.VectorLayer:
layer.deleteLater()
name = uri if isinstance(uri, basestring) else uri.uri
explorer.setError("Error reading file {} or it is not a valid layer file".format(name))
else:
if not publishDraggedLayer(explorer, layer, workspace):
return []
else:
if not publishDraggedLayer(explorer, layer, workspace):
return []
setProgress(i + 1)
resetActivity()
return [tree.findAllItems(catalog)[0]]
else:
return []
def addDraggedStyleToLayer(tree, explorer, styleItem, layerItem):
catalog = layerItem.element.catalog
catItem = tree.findFirstItem(catalog)
style = styleItem.element
layer = layerItem.element
if not hasattr(layer, "default_style") or layer.default_style is None:
# if default style is missing, make dragged style the layer's default
# without a default style, some GeoServer operations may fail
layer.default_style = style
else:
# add to layer's additional styles
styles = layer.styles
styles.append(style)
layer.styles = styles
explorer.run(catalog.save,
"Add style '" + style.name + "' to layer '" + layer.name + "'",
[catItem],
layer)
def publishProject(tree, explorer, catalog):
layers = qgislayers.getAllLayers()
dlg = PublishProjectDialog(catalog)
dlg.exec_()
if not dlg.ok:
return
workspace = dlg.workspace
groupName = dlg.groupName
explorer.setProgressMaximum(len(layers), "Publish layers")
progress = 0
cat = CatalogWrapper(catalog)
for layer in layers:
explorer.setProgress(progress)
explorer.run(publishLayer,
None,
[],
cat, layer, workspace)
progress += 1
explorer.setProgress(progress)
explorer.resetActivity()
groups = qgislayers.getGroups()
for group in groups:
names = [layer.name() for layer in groups[group]]
try:
layergroup = catalog.create_layergroup(group, names, names)
explorer.run(catalog.save, "Create layer group '" + group + "'",
[], layergroup)
except ConflictingDataError, e:
explorer.setWarning(str(e))
if groupName is not None:
names = [layer.name() for layer in layers]
layergroup = catalog.create_layergroup(groupName, names, names)
explorer.run(catalog.save, "Create global layer group",
[], layergroup)
tree.findAllItems(catalog)[0].refreshContent(explorer)
explorer.resetActivity()
def publishLayers(tree, explorer, catalog):
dlg = PublishLayersDialog(catalog)
dlg.exec_()
if dlg.topublish is None:
return
cat = CatalogWrapper(catalog)
progress = 0
explorer.setProgressMaximum(len(dlg.topublish), "Publish layers")
for layer, workspace, name in dlg.topublish:
explorer.run(cat.publishLayer,
None,
[],
layer, workspace, True, name)
progress += 1
explorer.setProgress(progress)
catItem = tree.findAllItems(catalog)[0]
catItem.refreshContent(explorer)
explorer.resetActivity()
| gpl-2.0 | -6,896,900,767,553,893,000 | 38.149701 | 112 | 0.620526 | false |
TeamCohen/TensorLog | tensorlog/debug.py | 1 | 8193 | # (C) William W. Cohen and Carnegie Mellon University, 2016
#
# support for debugging/visualization
#
import sys
import tkinter as TK
import tkinter.ttk
import tkinter.font
import time
from tensorlog import comline
from tensorlog import config
from tensorlog import dataset
from tensorlog import declare
from tensorlog import learn
from tensorlog import matrixdb
from tensorlog import mutil
from tensorlog import opfunutil
conf = config.Config()
conf.sortByValue = True; conf.help.sortByValue = "In displaying message values, sort entries by weight if true, by name if false."
conf.fontsize = None; conf.help.fontsize = "Size of font, eg 14"
conf.fontweight = None; conf.help.fontsize = "Weight of font, eg 'bold'"
class Debugger(object):
def __init__(self,initProgram,targetPred,trainData,gradient=False):
self.rendered = False
self.sortByValue = conf.sortByValue
self.prog = initProgram
self.trainData = trainData
self.targetPred = targetPred
#evaluate the function so the outputs are cached
assert self.targetPred,'most specify targetPred'
self.mode = declare.asMode(self.targetPred)
assert self.trainData.hasMode(self.mode),"No mode '%s' in trainData" % self.mode
self.X = self.trainData.getX(self.mode)
self.Y = self.trainData.getY(self.mode)
self.fun = self.prog.getPredictFunction(self.mode)
self.pad = opfunutil.Scratchpad()
self.P = self.fun.eval(self.prog.db, [self.X], self.pad)
# find the symbols that correspond to the inputs
dd = self.prog.db.matrixAsSymbolDict(self.X)
self.xSymbols = [list(d.keys())[0] for d in list(dd.values())]
# evaluate the gradient so that's cached
if gradient:
learner = learn.OnePredFixedRateGDLearner(self.prog, tracer=learn.Tracer.silent)
self.grad = learner.crossEntropyGrad(self.mode, self.X, self.Y, pad=self.pad)
else:
self.grad = None
def render(self):
#set up a window
self.root = TK.Tk()
default_font = tkinter.font.nametofont("TkDefaultFont")
if conf.fontsize:
default_font.configure(size=conf.fontsize)
if conf.fontweight:
default_font.configure(weight=conf.fontweight)
self.root.option_add("*Font", default_font)
#labels on the top
self.treeLabel = tkinter.ttk.Label(self.root,text="Listing of %s" % str(self.mode))
self.treeLabel.grid(row=0,column=1,sticky=TK.EW)
self.msgLabel = tkinter.ttk.Label(self.root,text="Details")
self.msgLabel.grid(row=0,column=2,sticky=TK.EW)
#put a scrollbars on the left and right
#these don't work now? maybe they worked with pack?
# self.scrollbarL = ttk.Scrollbar(self.root)
# self.scrollbarL.grid(row=1,column=0)
# self.scrollbarR = ttk.Scrollbar(self.root)
# self.scrollbarR.grid(row=1,column=4)
#set up a treeview widget and tie it to the left scrollbar
self.tree = tkinter.ttk.Treeview(self.root)
self.tree.grid(row=1,column=1,sticky=TK.NSEW)
# self.tree.config(yscrollcommand=self.scrollbarL.set)
# self.scrollbarL.config(command=self.tree.yview)
#adjust the columns
self.tree["columns"]=("comment","output","delta")
self.tree.column("#0", width=300 )
self.tree.column("comment", width=300 )
self.tree.column("output", width=150)
self.tree.column("delta", width=150)
self.tree.heading("comment", text="comment")
self.tree.heading("output", text="output")
self.tree.heading("delta", text="delta")
# save the function/op deltas and outputs for each tree node,
# indexed by the tree id
self.treeOutputs = {}
self.treeDeltas = {}
#fill the tree with the function and its children
self.populateTree([self.fun],"")
# set up another treeview to display the function output/deltas,
# which will be triggered when you doubleclick
self.msg = tkinter.ttk.Treeview(self.root,height=30)
self.msg["columns"] = ("weight")
self.msg.heading("weight", text="weight")
self.msg.grid(row=1,column=2)
self.msgItems = set()
#tree will fill the msg window on doubleclick
self.tree.bind("<Button-1>", self.DisplayMsg)
# tie it to the right scrollbar
# self.tree.config(yscrollcommand=self.scrollbarR.set)
# self.scrollbarR.config(command=self.msg.yview)
def DisplayMsg(self,event):
"""display the message sent by with an op
or the output for a function."""
key = self.tree.identify_row(event.y)
# figure out where we clicked - returns #0, #1, ...
colStr = self.tree.identify_column(event.x)
colNum = int(colStr[1:])
tag = self.tree.item(key,option='text')
if colNum>=3:
m = self.treeDeltas[key]
if m==None:
self.msgLabel.config(text='Delta for %s unavailable' % tag)
else:
self.msgLabel.config(text='Delta for %s' % tag)
else:
self.msgLabel.config(text='Output for %s' % tag)
m = self.treeOutputs[key]
for it in self.msgItems:
self.msg.delete(it)
self.msgItems = set()
if m!=None:
dOfD = self.prog.db.matrixAsSymbolDict(m)
rowVector = len(list(dOfD.keys()))==1
for r in sorted(dOfD.keys()):
rowName = "Row Vector:" if rowVector else self.xSymbols[r]
rowChild = self.msg.insert("",r,text=rowName,open=True)
self.msgItems.add(rowChild)
def sortKey(k):
if self.sortByValue==True:
return -dOfD[r][k]
else:
return k
for offset,sym in enumerate(sorted(list(dOfD[r].keys()), key=sortKey)):
#why are some of these None?
if sym!=None:
w = dOfD[r][sym]
child = self.msg.insert(rowChild,offset,text=sym,values=("%.5f" % w),open=True)
def populateTree(self,funs,parent):
for offset,fun in enumerate(funs):
description = fun.pprintSummary()
comment = fun.pprintComment()
key = "iid%d" % len(list(self.treeOutputs.keys()))
funOutput = self.pad[fun.id].output
if self.grad:
#todo: clean up
if 'delta' in self.pad[fun.id].__dict__:
funDelta = self.pad[fun.id].delta
else:
funDelta = None
else:
funDelta = None
child = self.tree.insert(
parent,offset,iid=key,text=description,
values=(comment,mutil.pprintSummary(funOutput),mutil.pprintSummary(funDelta)),open=True)
self.treeOutputs[key] = funOutput
self.treeDeltas[key] = funDelta
self.populateTree(fun.children(), child)
def mainloop(self):
if not self.rendered:
self.render()
self.root.mainloop()
if __name__ == "__main__":
def usage():
print('debug.py [usual tensorlog options] mode [inputs]')
optdict,args = comline.parseCommandLine(sys.argv[1:])
dset = optdict.get('trainData') or optdict.get('testData')
if dset==None and len(args)<2:
usage()
print('debug on what input? specify --trainData or give a function input')
elif len(args)<1:
usage()
elif dset and len(args)>2:
print('using --trainData not the function input given')
elif dset:
mode = declare.asMode(args[0])
Debugger(optdict['prog'],mode,dset,gradient=True).mainloop()
else:
mode = declare.asMode(args[0])
assert db.isTypeless(),'cannot debug a database with declared types'
X = optdict['prog'].db.onehot(args[1])
dset = dataset.Dataset({mode:X},{mode:optdict['prog'].db.zeros()})
Debugger(optdict['prog'],mode,dset,gradient=False).mainloop()
| apache-2.0 | 1,307,318,208,221,430,500 | 40.588832 | 132 | 0.605517 | false |
crckyl/pixplus | tools/conf-parser.py | 1 | 1750 | import sys
import json
from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
format = ' <preference name="%(name)s" value=%(value)s />'
if sys.argv[1] == 'safari':
format = ''' <dict>
<key>Title</key>
<string>%(name)s</string>
<key>Key</key>
<string>%(name)s</string>
<key>DefaultValue</key>
%(value_safari)s
<key>Type</key>
<string>%(type_safari)s</string>%(more)s
</dict>'''
pass
def print_conf(conf):
for sec in conf:
for item in sec['items']:
name = 'conf_%s_%s' % (sec['name'], item['key'])
value = item['value']
type_safari = 'TextField'
value_safari = '<string>%s</string>' % escape(str(value))
more = ''
if 'hint' in item:
type_safari = 'PopUpButton'
more = '''
<key>Titles</key>
<array>'''
for hint in item['hint']:
more += '\n <string>%s</string>' % hint['title']
pass
more += '\n </array>'
more += '''
<key>Values</key>
<array>'''
for hint in item['hint']:
more += '\n <string>%s</string>' % str(hint['value'])
pass
more += '\n </array>'
elif isinstance(value, bool):
type_safari = 'CheckBox'
if value:
value = 'true'
else:
value = 'false'
pass
value_safari = '<%s/>' % value
pass
params = {
'name': name,
'value': quoteattr(str(value)),
'type_safari': type_safari,
'value_safari': value_safari,
'more': more
}
print(format % params)
pass
pass
pass
print_conf(json.loads(sys.stdin.read()))
| mit | -6,539,849,018,826,677,000 | 25.119403 | 70 | 0.487429 | false |
mbeacom/locust | locust/test/test_wait_time.py | 1 | 2327 | import random
import time
from locust import User, TaskSet, between, constant, constant_pacing
from locust.exception import MissingWaitTimeError
from .testcases import LocustTestCase
class TestWaitTime(LocustTestCase):
def test_between(self):
class MyUser(User):
wait_time = between(3, 9)
class TaskSet1(TaskSet):
pass
class TaskSet2(TaskSet):
wait_time = between(20.0, 21.0)
u = MyUser(self.environment)
ts1 = TaskSet1(u)
ts2 = TaskSet2(u)
for i in range(100):
w = u.wait_time()
self.assertGreaterEqual(w, 3)
self.assertLessEqual(w, 9)
w = ts1.wait_time()
self.assertGreaterEqual(w, 3)
self.assertLessEqual(w, 9)
for i in range(100):
w = ts2.wait_time()
self.assertGreaterEqual(w, 20)
self.assertLessEqual(w, 21)
def test_constant(self):
class MyUser(User):
wait_time = constant(13)
class TaskSet1(TaskSet):
pass
self.assertEqual(13, MyUser(self.environment).wait_time())
self.assertEqual(13, TaskSet1(MyUser(self.environment)).wait_time())
def test_default_wait_time(self):
class MyUser(User):
pass # default is wait_time = constant(0)
class TaskSet1(TaskSet):
pass
self.assertEqual(0, MyUser(self.environment).wait_time())
self.assertEqual(0, TaskSet1(MyUser(self.environment)).wait_time())
taskset = TaskSet1(MyUser(self.environment))
start_time = time.monotonic()
taskset.wait()
self.assertLess(time.monotonic() - start_time, 0.002)
def test_constant_pacing(self):
class MyUser(User):
wait_time = constant_pacing(0.1)
class TS(TaskSet):
pass
ts = TS(MyUser(self.environment))
ts2 = TS(MyUser(self.environment))
previous_time = time.monotonic()
for i in range(7):
ts.wait()
since_last_run = time.monotonic() - previous_time
self.assertLess(abs(0.1 - since_last_run), 0.02)
previous_time = time.monotonic()
time.sleep(random.random() * 0.1)
_ = ts2.wait_time()
_ = ts2.wait_time()
| mit | -4,270,839,053,049,579,000 | 28.455696 | 76 | 0.576708 | false |
cessor/galena | src/galena/stopwords.py | 1 | 1030 | from .corpus import *
from .document import *
import itertools
class Stopwords(object):
def __init__(self, stopwords):
self._stopwords = stopwords
def remove_from(self, words):
for word in words:
if not word:
continue
if word not in self._stopwords:
yield word
class Lexicon(object):
def __init__(self, *iterables):
self._iterables = list(itertools.chain(*iterables))
def __contains__(self, item):
return item in self._iterables
class NltkStopwords(object):
def __iter__(self):
from nltk.corpus import stopwords
return stopwords.words('english').__iter__()
class StopwordsFolder(object):
def __init__(self, directory):
self._directory = directory
def __iter__(self):
for file in self._directory:
for line in Lines(file.content()):
for word in Words(line):
if word:
yield word.strip().lower()
| lgpl-3.0 | -3,632,805,703,669,371,400 | 20.458333 | 59 | 0.566019 | false |
deejross/python3-pywbem | lex.py | 1 | 39948 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley ([email protected])
#
# Copyright (C) 2001-2009, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
# -----------------------------------------------------------------------------
__version__ = "3.0"
__tabversion__ = "3.0" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| lgpl-2.1 | -8,469,962,769,702,211,000 | 37.118321 | 134 | 0.495519 | false |
Azure/azure-sdk-for-python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_tag_operations.py | 1 | 86989 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TagOperations(object):
"""TagOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists all Tags associated with the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags'} # type: ignore
def get_entity_state_by_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def get_by_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Get tag associated with the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def assign_to_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Assign tag to the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.assign_to_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
assign_to_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def detach_from_operation(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
operation_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Detach the tag from the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.detach_from_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def list_by_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists all Tags associated with the API.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags'} # type: ignore
def get_entity_state_by_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def get_by_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Get tag associated with the API.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def assign_to_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Assign tag to the Api.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.assign_to_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
assign_to_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def detach_from_api(
self,
resource_group_name, # type: str
service_name, # type: str
api_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Detach the tag from the Api.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.detach_from_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def list_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists all Tags associated with the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags'} # type: ignore
def get_entity_state_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def get_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Get tag associated with the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def assign_to_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Assign tag to the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.assign_to_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
assign_to_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def detach_from_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Detach the tag from the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.detach_from_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def list_by_service(
self,
resource_group_name, # type: str
service_name, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
scope=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TagCollection"]
"""Lists a collection of tags defined within a service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
displayName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param scope: Scope like 'apis', 'products' or 'apis/{apiId}.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
if scope is not None:
query_parameters['scope'] = self._serialize.query("scope", scope, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags'} # type: ignore
def get_entity_state(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_state.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Gets the details of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
parameters, # type: "_models.TagCreateUpdateParameters"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Creates a tag.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param parameters: Create parameters.
:type parameters: ~azure.mgmt.apimanagement.models.TagCreateUpdateParameters
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def update(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
if_match, # type: str
parameters, # type: "_models.TagCreateUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.TagContract"
"""Updates the details of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.TagCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
service_name, # type: str
tag_id, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes specific tag of the API Management service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
| mit | -8,513,760,111,303,371,000 | 51.026914 | 245 | 0.621596 | false |
4Quant/tensorflow | tensorflow/python/ops/rnn.py | 1 | 21568 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
def rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
The simplest form of RNN network generated is:
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time t for batch row b,
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size [batch_size]. Values in [0, T).
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
state is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
fixed_batch_size = inputs[0].get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(inputs[0])[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length: # Prepare variables
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs[0].dtype)
zero_output.set_shape(
tensor_shape.TensorShape([fixed_batch_size.value, cell.output_size]))
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0: vs.get_variable_scope().reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length:
(output, state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def state_saving_rnn(cell, inputs, state_saver, state_name,
sequence_length=None, scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
state_saver: A state saver object with methods `state` and `save_state`.
state_name: The name to use with the state_saver.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
initial_state = state_saver.state(state_name)
(outputs, state) = rnn(cell, inputs, initial_state=initial_state,
sequence_length=sequence_length, scope=scope)
save_state = state_saver.save_state(state_name, state)
with ops.control_dependencies([save_state]):
outputs[-1] = array_ops.identity(outputs[-1])
return (outputs, state)
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
The pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: `Tensor` matrix of shape [batch_size, state_size]
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape [batch_size, output_size]
new_state is a `Tensor` matrix of shape [batch_size, state_size]
Returns:
A tuple of (final_output, final_state) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is a `Tensor` matrix of shape [batch_size, state_size]
"""
# Step 1: determine whether we need to call_cell or not
empty_update = lambda: (zero_output, state)
state_shape = state.get_shape()
output, new_state = control_flow_ops.cond(
time < max_sequence_length, call_cell, empty_update)
# Step 2: determine whether we need to copy through state and/or outputs
existing_output_state = lambda: (output, new_state)
def copy_through():
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
copy_cond = (time >= sequence_length)
return (math_ops.select(copy_cond, zero_output, output),
math_ops.select(copy_cond, state, new_state))
(output, state) = control_flow_ops.cond(
time < min_sequence_length, existing_output_state, copy_through)
output.set_shape(zero_output.get_shape())
state.set_shape(state_shape)
return (output, state)
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if either
of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size [batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
output_state_fw is the final state of the forward rnn
output_state_bw is the final state of the backward rnn
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not isinstance(inputs, list):
raise TypeError("inputs must be a list")
if not inputs:
raise ValueError("inputs must not be empty")
name = scope or "BiRNN"
# Forward direction
with vs.variable_scope(name + "_FW") as fw_scope:
output_fw, output_state_fw = rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
# Backward direction
with vs.variable_scope(name + "_BW") as bw_scope:
tmp, output_state_bw = rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return (outputs, output_state_fw, output_state_bw)
def dynamic_rnn(cell, inputs, sequence_length, initial_state=None, dtype=None,
parallel_iterations=None, swap_memory=False, time_major=False,
scope=None):
"""Creates a recurrent neural network specified by RNNCell "cell".
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`. Instead,
it is a single `Tensor` where the maximum time is either the first or second
dimension (see the parameter `time_major`). The corresponding output is
a single `Tensor` having the same number of time steps and batch size.
The parameter `sequence_length` is required and dynamic calculation is
automatically performed.
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, cell.input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, cell.input_size]`.
sequence_length: An int32/int64 vector (tensor) size [batch_size].
initial_state: (optional) An initial state for the RNN. This must be
a tensor of appropriate type and shape [batch_size x cell.state_size].
dtype: (optional) The data type for the initial state. Required if
initial_state is not provided.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Swap the tensors produced in forward inference but needed
for back prop from GPU to CPU.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using time_major = False is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state, shaped:
`[batch_size, cell.state_size]`.
Raises:
TypeError: If "cell" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,D) => (T,B,D)
parallel_iterations = parallel_iterations or 32
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = array_ops.identity(sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "RNN") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.pack(shape)
return logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name="CheckSeqLen")
(outputs, final_state) = _dynamic_rnn_loop(
cell, inputs, state, sequence_length,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
outputs = array_ops.transpose(outputs, [1, 0, 2]) # (T,B,D) => (B,T,D)
return (outputs, final_state)
def _dynamic_rnn_loop(cell, inputs, initial_state, sequence_length,
parallel_iterations, swap_memory):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, depth].
initial_state: A `Tensor` of shape [batch_size, depth].
sequence_length: An `int32` `Tensor` of shape [batch_size].
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
Returns:
Tuple (final_outputs, final_state).
final_outputs:
A `Tensor` of shape [time, batch_size, depth]`.
final_state:
A `Tensor` of shape [batch_size, depth].
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
# Construct an initial output
input_shape = array_ops.shape(inputs)
(time_steps, batch_size, unused_depth) = array_ops.unpack(input_shape, 3)
inputs_got_shape = inputs.get_shape().with_rank(3)
(const_time_steps, const_batch_size, const_depth) = inputs_got_shape.as_list()
# Prepare dynamic conditional copying of state & output
zero_output = array_ops.zeros(
array_ops.pack([batch_size, cell.output_size]), inputs.dtype)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.op_scope([], "dynamic_rnn") as scope:
base_name = scope
output_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "output")
input_ta = tensor_array_ops.TensorArray(
dtype=inputs.dtype, size=time_steps,
tensor_array_name=base_name + "input")
input_ta = input_ta.unpack(inputs)
def _time_step(time, state, output_ta_t):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
state: Vector.
output_ta_t: `TensorArray`, the output with existing flow.
Returns:
The tuple (time + 1, new_state, output_ta_t with updated flow).
"""
input_t = input_ta.read(time)
# Restore some shape information
input_t.set_shape([const_batch_size, const_depth])
(output, new_state) = _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, lambda: cell(input_t, state))
output_ta_t = output_ta_t.write(time, output)
return (time + 1, new_state, output_ta_t)
(unused_final_time, final_state, output_final_ta) = control_flow_ops.While(
cond=lambda time, _1, _2: time < time_steps,
body=_time_step,
loop_vars=(time, state, output_ta),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs = output_final_ta.pack()
# Restore some shape information
final_outputs.set_shape([
const_time_steps, const_batch_size, cell.output_size])
return (final_outputs, final_state)
| apache-2.0 | 5,813,275,549,810,683,000 | 39.314019 | 80 | 0.688335 | false |
sknepneklab/SAMoS | utils/make_circular_patch.py | 1 | 3349 | # ***************************************************************************
# *
# * Copyright (C) 2013-2016 University of Dundee
# * All rights reserved.
# *
# * This file is part of SAMoS (Soft Active Matter on Surfaces) program.
# *
# * SAMoS is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * SAMoS is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# *****************************************************************************
# Utility code for generating intial configuration for cell simulations.
# This code places N cells in a patch of radius R keeing in mind that the
# minimum distance between two cells shold be greater than a certain value.
import sys
import argparse
import numpy as np
from random import uniform
from datetime import *
import math as m
from CellList2D import *
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", type=str, default='patch.dat', help="output file name")
parser.add_argument("-R", "--radius", type=float, default=20.0, help="patch radius")
parser.add_argument("-N", "--num", type=int, default=100, help="number of particles")
parser.add_argument("-m", "--min_dist", type=float, default=1.5, help="minium distance between particles")
parser.add_argument("-A", "--A0", type=float, default=m.pi, help="native cell area")
args = parser.parse_args()
print
print "\tSoft Actve Matter on Surfaces (SAMoS)"
print "\tGenerates a circial cell patch"
print
print "\tRastko Sknepnek"
print "\tUniversity of Dundee"
print "\t(c) 2015"
print "\t----------------------------------------------"
print
print "\tOutput files : ", args.output
print "\tPatch radius : ", args.radius
print "\tNumber of cells : ", args.num
print "\tMinimum distance between cells : ", args.min_dist
print
start = datetime.now()
R = args.radius
cl = CellList2D([2.2*R,2.2*R],2*args.min_dist)
particles = []
i = 0
while i < args.num:
x, y = uniform(-R,R), uniform(-R,R)
if (x**2 + y**2 < R**2):
cid = cl.get_cell_idx((x,y))
can_add = True
for nb in cl.cell_list[cid].neighbors:
for idx in cl.cell_list[nb].indices:
xi, yi = particles[idx]
dx, dy = x-xi, y-yi
if dx*dx + dy*dy < args.min_dist**2:
can_add = False
break
if not can_add:
break
if can_add:
print "Successfully added particle : ", i
particles.append((x,y))
cl.add_particle((x,y),i)
i += 1
out = open(args.output,'w')
out.write('keys: id x y nx ny nvx nvy nvz area\n')
for i in range(len(particles)):
x,y = particles[i]
phi = uniform(0,2*m.pi)
out.write('%4d %f %f %f %f %f %f %f %f\n' % (i,x,y, m.cos(phi),m.sin(phi), 0, 0, 1.0, args.A0))
out.close()
end = datetime.now()
total = end - start
print
print " *** Completed in ", total.total_seconds(), " seconds *** "
print | gpl-3.0 | 5,762,786,436,947,331,000 | 31.843137 | 106 | 0.622574 | false |
chrisjrn/registrasion | registrasion/tests/controller_helpers.py | 1 | 2034 | from registrasion.controllers.cart import CartController
from registrasion.controllers.credit_note import CreditNoteController
from registrasion.controllers.invoice import InvoiceController
from registrasion.models import commerce
from django.core.exceptions import ObjectDoesNotExist
class TestingCartController(CartController):
def set_quantity(self, product, quantity, batched=False):
''' Sets the _quantity_ of the given _product_ in the cart to the given
_quantity_. '''
self.set_quantities(((product, quantity),))
def add_to_cart(self, product, quantity):
''' Adds _quantity_ of the given _product_ to the cart. Raises
ValidationError if constraints are violated.'''
try:
product_item = commerce.ProductItem.objects.get(
cart=self.cart,
product=product)
old_quantity = product_item.quantity
except ObjectDoesNotExist:
old_quantity = 0
self.set_quantity(product, old_quantity + quantity)
def next_cart(self):
if self.cart.status == commerce.Cart.STATUS_ACTIVE:
self.cart.status = commerce.Cart.STATUS_PAID
self.cart.save()
class TestingInvoiceController(InvoiceController):
def pay(self, reference, amount, pre_validate=True):
''' Testing method for simulating an invoice paymenht by the given
amount. '''
if pre_validate:
# Manual payments don't pre-validate; we should test that things
# still work if we do silly things.
self.validate_allowed_to_pay()
''' Adds a payment '''
commerce.PaymentBase.objects.create(
invoice=self.invoice,
reference=reference,
amount=amount,
)
self.update_status()
class TestingCreditNoteController(CreditNoteController):
def refund(self):
commerce.CreditNoteRefund.objects.create(
parent=self.credit_note,
reference="Whoops."
)
| apache-2.0 | -5,205,401,486,481,097,000 | 31.285714 | 79 | 0.652901 | false |
shfengcj/pyminer | pyminer_setting.py | 1 | 1142 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 06:53:32 2015
@author: chaojun
"""
from pyminer_cos_model import lcdm
from pyminer_residual import JLAresiCal, CMBresiCal, BAOresiCal
# Genearl setting
divMax = 15 # for romberg integral
ogh2 = 2.469e-5
JLA_DIR = '/Users/chaojun/Documents/Research/2015/grb/pycode/data/jla'
# Cosmological model
model = lcdm(divmax = divMax)
# Data setting
use_sn_data = True
use_cmb_data = True
use_bao_data = True
resobj=[]
if use_sn_data : resobj.append( JLAresiCal(cosModel = model, DATA_DIR_JLA = JLA_DIR) )
if use_cmb_data: resobj.append( CMBresiCal(cosModel = model) )
if use_bao_data: resobj.append( BAOresiCal(cosModel = model) )
# Residual function
def residual(p, resobj = resobj, fjac=None):
import numpy as np
res = np.array([])
for obj in resobj:
tmp = obj.residual(p)
res = np.append(res, tmp)
status = 0
return [status, res]
# some other functions
def clear_env():
for key in globals().keys():
if not key.startswith("__"):
globals().pop(key)
| gpl-2.0 | -945,975,103,707,670,100 | 18.689655 | 86 | 0.622592 | false |
nishad-jobsglobal/odoo-marriot | openerp/addons/sync_mail_multi_attach/controllers/__init__.py | 1 | 1157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
# Copyright (C) 2011-today Synconics Technologies Pvt. Ltd. (<http://www.synconics.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import main
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | -3,098,737,131,827,244,000 | 47.25 | 91 | 0.624892 | false |
ctrezevant/doorMan | Applications/Command Line/doorctl.py | 1 | 1846 | #!/usr/bin/env python
import urllib2, json, sys
CONFIG = {
'API_KEY': ' ',
'API_HOST': 'http://<ip>',
'DOOR_TARGET': 0
}
def main():
if 'open' in sys.argv:
print 'sent open: %s' % sendCmd('/set/open')
if 'close' in sys.argv:
print 'sent close: %s' % sendCmd('/set/close')
if 'cycle' in sys.argv:
print 'sent cycle: %s' % sendCmd('/set/cycle')
if 'lockout' in sys.argv:
print 'sent lockout: %s' % sendCmd('/set/lockout')
if 'status' in sys.argv:
checkStatus()
if 'state' in sys.argv:
checkStatus()
if 'open' or 'close' or 'cycle' or 'lockout' or 'status' or 'state' in sys.argv:
sys.exit(0)
print "usage:\n doorctl open|close|cycle|lockout|status|help"
def checkStatus():
ul2 = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=0))
api_data = {
'method': '/get/state',
'door_id': CONFIG['DOOR_TARGET'],
'api_key': CONFIG['API_KEY']
}
postData = json.dumps(api_data)
try:
api_response = json.loads(ul2.open(CONFIG['API_HOST'], postData).read())
if(api_response['state'] == 0):
response_text = 'open'
else:
response_text = 'closed'
if(api_response['lockout']):
response_text += ' and locked'
except Exception:
response_text = "query state failed"
print response_text
def sendCmd(cmd):
ul2 = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=0))
api_data = {
'method': cmd,
'door_id': CONFIG['DOOR_TARGET'],
'api_key': CONFIG['API_KEY']
}
postData = json.dumps(api_data)
try:
api_response = json.loads(ul2.open(CONFIG['API_HOST'], postData).read())
except Exception:
api_response = {'command_sent': 'failed'}
return api_response['command_sent']
if __name__ == "__main__":
main()
| mit | 7,744,271,617,177,077,000 | 25.753623 | 84 | 0.58234 | false |
fengshao0907/vitess | test/queryservice_test.py | 1 | 2631 | #!/usr/bin/env python
import logging
import optparse
import traceback
import unittest
import sys
import os
import utils
import framework
from queryservice_tests import cache_tests
from queryservice_tests import nocache_tests
from queryservice_tests import stream_tests
from queryservice_tests import status_tests
from queryservice_tests import test_env
from mysql_flavor import set_mysql_flavor
from protocols_flavor import set_protocols_flavor
from topo_flavor.server import set_topo_server_flavor
def main():
parser = optparse.OptionParser(usage='usage: %prog [options] [test_names]')
parser.add_option('-m', '--memcache', action='store_true', default=False,
help='starts a memcache d, and tests rowcache')
parser.add_option(
'-e', '--env', default='vttablet',
help='Environment that will be used. Valid options: vttablet, vtocc')
utils.add_options(parser)
(options, args) = parser.parse_args()
logging.getLogger().setLevel(logging.ERROR)
utils.set_options(options)
run_tests(options, args)
def run_tests(options, args):
suite = unittest.TestSuite()
if args:
if args[0] == 'teardown':
test_env.TestEnv(options.env).tearDown()
exit(0)
for arg in args:
if hasattr(nocache_tests.TestNocache, arg):
suite.addTest(nocache_tests.TestNocache(arg))
elif hasattr(stream_tests.TestStream, arg):
suite.addTest(stream_tests.TestStream(arg))
elif hasattr(cache_tests.TestCache, arg) and options.memcache:
suite.addTest(cache_tests.TestCache(arg))
elif hasattr(cache_tests.TestWillNotBeCached, arg) and options.memcache:
suite.addTest(cache_tests.TestWillNotBeCached(arg))
else:
raise Exception(arg, 'not found in tests')
else:
modules = [nocache_tests, stream_tests, status_tests]
if options.memcache:
modules.append(cache_tests)
for m in modules:
suite.addTests(unittest.TestLoader().loadTestsFromModule(m))
env = test_env.TestEnv(options.env)
try:
env.memcache = options.memcache
env.setUp()
print 'Starting queryservice_test.py: %s' % options.env
sys.stdout.flush()
framework.TestCase.setenv(env)
result = unittest.TextTestRunner(
verbosity=options.verbose, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception('test failures')
finally:
if not options.skip_teardown:
env.tearDown()
if options.keep_logs:
print('Leaving temporary files behind (--keep-logs), please '
'clean up before next run: ' + os.environ['VTDATAROOT'])
if __name__ == '__main__':
main()
| bsd-3-clause | 1,468,847,150,738,949,400 | 30.698795 | 78 | 0.700494 | false |
priyaganti/rockstor-core | src/rockstor/storageadmin/views/clone_helpers.py | 1 | 2535 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from storageadmin.models import (Share, Snapshot)
from storageadmin.util import handle_exception
from fs.btrfs import (add_clone, share_id, update_quota)
from rest_framework.response import Response
from storageadmin.serializers import ShareSerializer
import re
from django.conf import settings
def create_clone(share, new_name, request, logger, snapshot=None):
# if snapshot is None, create clone of the share.
# If it's not, then clone it.
if (re.match(settings.SHARE_REGEX + '$', new_name) is None):
e_msg = ('Clone name is invalid. It must start with a letter and can'
' contain letters, digits, _, . and - characters')
handle_exception(Exception(e_msg), request)
if (Share.objects.filter(name=new_name).exists()):
e_msg = ('Another Share with name: %s already exists.' % new_name)
handle_exception(Exception(e_msg), request)
if (Snapshot.objects.filter(share=share, name=new_name).exists()):
e_msg = ('Snapshot with name: %s already exists for the '
'share: %s. Choose a different name' %
(new_name, share.name))
handle_exception(Exception(e_msg), request)
try:
share_name = share.subvol_name
snap = None
if (snapshot is not None):
snap = snapshot.real_name
add_clone(share.pool, share_name, new_name, snapshot=snap)
snap_id = share_id(share.pool, new_name)
qgroup_id = ('0/%s' % snap_id)
update_quota(share.pool, qgroup_id, share.size * 1024)
new_share = Share(pool=share.pool, qgroup=qgroup_id, name=new_name,
size=share.size, subvol_name=new_name)
new_share.save()
return Response(ShareSerializer(new_share).data)
except Exception as e:
handle_exception(e, request)
| gpl-3.0 | -311,084,012,256,732,200 | 42.706897 | 77 | 0.680473 | false |
lpredova/pybookie | server/sources/footbal_db.py | 1 | 3991 | # coding=utf-8
import json
import os
class FootballDB:
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
groups_file = BASE_DIR + '/sources/groups.json'
wc_history_file = BASE_DIR + '/sources/wc_history'
wc_team_file = BASE_DIR + '/sources/squads/'
top_teams = ['RealMadrid(ESP)', 'Barcelona(ESP)', 'Chelsea(ENG)', 'ManchesterCity(ENG)', 'ParisSaint-Germain(FRA)',
'BayernMunich(GER)', 'Internazionale(ITA)', 'Napoli(ITA)', 'ManchesterUnited(ENG)', 'Arsenal(ENG)',
'Liverpool(ENG)', 'Juventus(ITA)', 'BorussiaDortmund(GER)', 'AtléticoMadrid(ESP)']
def __init__(self):
pass
@staticmethod
def get_team_by_id(team_id):
data = json.loads(FootballDB.get_games())
result = None
for group in data:
for team in group['teams']:
if int(team['id']) == int(team_id):
result = team['team']
return result
@staticmethod
def get_ranking(team_name):
return int(FootballDB.get_wc_history(team_name, 0))
@staticmethod
def get_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 2))
@staticmethod
def get_won_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 3))
@staticmethod
def get_draw_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 4))
@staticmethod
def get_lost_wc_games_played(team_name):
return int(FootballDB.get_wc_history(team_name, 5))
@staticmethod
def get_goal_difference_wc_games_played(team_name):
gd = FootballDB.get_wc_history(team_name, 6)
gd = gd.split(':')
goals_for = int(gd[0])
goals_against = int(gd[1])
return goals_for - goals_against
@staticmethod
def get_wc_points(team_name):
return int(FootballDB.get_wc_history(team_name, 7))
@staticmethod
def get_wc_participations(team_name):
return int(FootballDB.get_wc_history(team_name, 8))
@staticmethod
def get_wc_titles(team_name):
titles = FootballDB.get_wc_history(team_name, 9)
try:
if titles.isalpha() and int(titles) != 0:
titles = titles[0]
return int(titles)
else:
return 0
except Exception:
return 0
@staticmethod
def get_wc_history(team, result_row_index):
path = FootballDB.wc_history_file
if os.path.isfile(path):
f = open(path)
for line in f:
if line[0].isdigit():
row = line.replace('\n', '')
row = row.replace(' ', '')
row = row.split('|')
if row[1] == team.replace(' ', ''):
f.close()
try:
return row[result_row_index]
except BaseException:
return 0
@staticmethod
def get_wc_team_player_ratings(team):
path = '%s%s.txt' % (FootballDB.wc_team_file, (team.replace(' ', '-')))
path = path.lower()
team_rating = 0
if os.path.isfile(path):
f = open(path)
for line in f:
try:
row = line.split('##')
row = row[1].replace(' ', '').split(',')
team_rating += int(row[0])
team_name = row[1].replace('\n', '')
if team_name in FootballDB.top_teams:
team_rating += 10
except Exception:
pass
return team_rating
@staticmethod
def get_games():
data = None
path = FootballDB.groups_file
if os.path.isfile(path):
with open(path, 'r') as football_teams:
data = football_teams.read().replace('\n', '')
return data
| apache-2.0 | -716,865,174,706,552,200 | 29.458015 | 119 | 0.525063 | false |
DiamondLightSource/diffcalc | test/diffcalc/gdasupport/minigda/test_command.py | 1 | 5417 | ###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
import unittest
import diffcalc.gdasupport.minigda.command
from diffcalc.gdasupport.minigda.command import Pos, Scan, ScanDataPrinter
from diffcalc.gdasupport.minigda.scannable import \
MultiInputExtraFieldsDummyScannable, SingleFieldDummyScannable
class BadSingleFieldDummyScannable(SingleFieldDummyScannable):
def getPosition(self):
raise Exception("Problem")
class NoneReturningSingleFieldDummyScannable(SingleFieldDummyScannable):
def getPosition(self):
return None
class TestPos(object):
def setup_method(self):
self.dummyMainNamespace = namespace = {}
namespace['notAScannable'] = 3.124
namespace['scnA'] = SingleFieldDummyScannable('scnA')
namespace['scnB'] = SingleFieldDummyScannable('scnB')
namespace['scnC'] = SingleFieldDummyScannable('scnC')
namespace['scnD'] = SingleFieldDummyScannable('scnD')
namespace['scnNone'] = \
NoneReturningSingleFieldDummyScannable('scnNone')
namespace['scnBad'] = BadSingleFieldDummyScannable('scnBad')
diffcalc.gdasupport.minigda.command.ROOT_NAMESPACE_DICT = \
self.dummyMainNamespace
self.pos = Pos()
def testPosReturningReportWithRead(self):
scnA = self.dummyMainNamespace['scnA']
assert self.pos.posReturningReport(scnA) == 'scnA: 0.0000'
def testPosReturningReportWithMove(self):
scnA = self.dummyMainNamespace['scnA']
assert self.pos.posReturningReport(scnA, 1.123) == 'scnA: 1.1230'
def test__call__(self):
scnA = self.dummyMainNamespace['scnA']
self.pos.__call__(scnA)
self.pos.__call__(scnA, 4.321)
print "*"
self.pos.__call__()
print "*"
def testPosReturningReportWithMultiFieldScannables(self):
scn = MultiInputExtraFieldsDummyScannable('mie', ['i1', 'i2'], ['e1'])
assert (self.pos.posReturningReport(scn)
== 'mie: i1: 0.0000 i2: 0.0000 e1: 100.0000 ')
def testPosReturningReportWithBadScannable(self):
scnBad = self.dummyMainNamespace['scnBad']
assert self.pos.posReturningReport(scnBad) == "scnBad: Error: Problem"
assert (self.pos.posReturningReport(scnBad, 4.321)
== "scnBad: Error: Problem")
def testPosReturningReportWithNoneReturningScannable(self):
scnNone = self.dummyMainNamespace['scnNone']
assert self.pos.posReturningReport(scnNone) == "scnNone: ---"
assert self.pos.posReturningReport(scnNone, 4.321) == "scnNone: ---"
class TestScan(object):
def setup_method(self):
self.scan = Scan([ScanDataPrinter()])
def test__parseScanArgsIntoScannableArgGroups(self):
scnA = SingleFieldDummyScannable('scnA')
scnB = SingleFieldDummyScannable('scnB')
scnC = SingleFieldDummyScannable('scnC')
scnD = SingleFieldDummyScannable('scnD')
scanargs = (scnA, 1, 2, 3, scnB, [4, 5, 6], scnC, scnD, 1.123456)
r = self.scan._parseScanArgsIntoScannableArgGroups(scanargs)
result = [r[0].scannable, r[0].args, r[1].scannable, r[1].args,
r[2].scannable, r[2].args, r[3].scannable, r[3].args]
desired = [scnA, [1, 2, 3], scnB, [[4, 5, 6], ], scnC, list(), scnD,
[1.123456]]
assert result == desired
def test__reorderGroupsAccordingToLevel(self):
scn4 = SingleFieldDummyScannable('scn4')
scn4.setLevel(4)
scn5a = SingleFieldDummyScannable('scn5a')
scn5a.setLevel(5)
scn5b = SingleFieldDummyScannable('scn5b')
scn5b.setLevel(5)
scn6 = SingleFieldDummyScannable('scn6')
scn6.setLevel(6)
def t(scanargs):
groups = self.scan._parseScanArgsIntoScannableArgGroups(scanargs)
r = self.scan._reorderInnerGroupsAccordingToLevel(groups)
return [r[0].scannable, r[1].scannable, r[2].scannable,
r[3].scannable]
assert (t((scn5a, 1, 2, 3, scn6, 1, scn5b, scn4))
== [scn5a, scn4, scn5b, scn6])
assert (t((scn5a, 1, 3, scn6, 1, scn5b, scn4))
== [scn4, scn5a, scn5b, scn6])
def test__Frange(self):
assert self.scan._frange(1, 1.3, .1) == [1.0, 1.1, 1.2, 1.3]
def test__Call__(self):
scn4 = SingleFieldDummyScannable('scn4')
scn4.setLevel(4)
scn5a = SingleFieldDummyScannable('scn5a')
scn5a.setLevel(5)
scn5b = SingleFieldDummyScannable('scn5b')
scn5b.setLevel(5)
scn6 = SingleFieldDummyScannable('scn6')
scn6.setLevel(6)
self.scan.__call__(scn5a, 1, 3, 1, scn6, 1, scn5b, scn4)
| gpl-3.0 | -2,690,735,548,452,100,600 | 38.253623 | 80 | 0.648145 | false |
flavour/eden | modules/plugins/__init__.py | 5 | 8807 | # -*- coding: utf-8 -*-
import os
import sys
from gluon import current
from gluon.storage import Storage
from s3compat import reload
__all__ = ("PluginLoader",
)
# Name of the plugin directory in modules
PLUGINS = "plugins"
# Module names to ignore when scanning for plugins
IGNORE = ("skeleton", "__init__")
# Name of the setup function in plugins
SETUP = "setup"
# Name of the variable that contains the version info in plugins
VERSION = "__version__"
# =============================================================================
class PluginLoader(object):
"""
Simple plugin loader (experimental)
Plugins are python modules or packages in the modules/plugins
directory.
Each plugin defines a setup() function which is called during
the request cycle immediately before entering the controller.
Plugins can be added by simply placing them in the plugins
directory, without any code change required.
The plugin directory will be scanned for new or updated plugins
whenever a new session starts, or by calling explicitly:
PluginLoader.detect(reset_all=True)
NB the reloading of the plugins can only be enforced in the
current interpreter thread - while other threads may still
run the old version. Therefore, it is recommended to restart
all threads (=reloading the server) after installing or updating
a plugin.
NB failing setup() methods will not be tried again until the next
reload (new session, restart, or explicit call)
session.s3.plugins contains a dict of all current plugins, like:
{name: (version, status)}
where:
- name is the python module name of the plugin
- version is the version string provided by the plugin (or
"unknown" if not present)
- status is:
None = newly detected plugin, not set up yet
True = plugin has been set up successfully
False = plugin setup failed in the last attempt, deactivated
"""
# -------------------------------------------------------------------------
@classmethod
def setup_all(cls, reload_all=False):
"""
Setup all plugins
@param reload_all: reload all plugins and reset the registry
"""
if reload_all:
cls.detect(reset_all=True)
for name in list(cls._registry().keys()):
cls.load(name)
# -------------------------------------------------------------------------
@classmethod
def detect(cls, reset_all=False):
"""
Detect new plugins and update the registry
@param reset_all: reset all entries in the registry
"""
default = (None, None)
if reset_all:
plugin = lambda name: default
else:
registry = cls._registry()
plugin = lambda name: registry.get(name, default)
plugins = dict((name, plugin(name)) for name in cls._scan())
cls._registry(plugins)
# -------------------------------------------------------------------------
@classmethod
def load(cls, name, force=False):
"""
Run the setup method of a particular plugin
@param name: the name of the plugin
@param force: enforce the plugin to be reloaded and its
setup method to be re-run regardless of the
previous status
"""
if name[0] == "_":
return False
log = current.log
registry = cls._registry()
if name not in registry:
cls.detect()
if name not in registry:
raise NameError("plugin '%s' not found" % name)
# Get version and status info from registry
plugin_info = registry[name]
if force or not isinstance(plugin_info, tuple):
version, status = None, None
else:
version, status = plugin_info
if status is None:
new = True
if not (cls._reload(name)):
version, status = "unknown", False
else:
version, status = None, True
else:
new = False
if status is False:
# Skip plugins which have failed in previous attempts
registry[name] = (version, status)
return False
status = True
setup = None
# Import manifest
package = "%s.%s" % (PLUGINS, name)
try:
setup = getattr(__import__(package, fromlist=[SETUP]), SETUP)
except (ImportError, AttributeError):
# This may not be a plugin at all => remove from registry
if new:
log.debug("Plugin '%s' not found" % name)
registry.pop(name, None)
return False
except SyntaxError:
if new:
log.error("Skipping invalid plugin '%s'" % name)
if current.response.s3.debug:
raise
version, status = "invalid", False
if version is None:
# Update version info if plugin has been reloaded
try:
version = getattr(__import__(package, fromlist=[VERSION]), VERSION)
except (ImportError, AttributeError):
version = "unknown"
if status and not callable(setup):
# Is a module => find setup function
try:
setup = setup.setup
except AttributeError:
# No setup function found => treat as failed
if new:
log.debug("No setup function found for plugin '%s'" % name)
status = False
if status:
# Execute setup method
if new:
log.info("Setting up plugin '%s'" % name)
try:
setup()
except Exception:
log.error("Plugin '%s' setup failed" % name)
if current.response.s3.debug:
raise
status = False
# Update the registry
registry[name] = (version, status)
return status
# -------------------------------------------------------------------------
@classmethod
def _registry(cls, plugins=None):
"""
Get (or replace) the current plugin registry
@param plugins: the new registry
"""
session_s3 = current.session.s3
if plugins:
registry = session_s3.plugins = plugins
else:
registry = session_s3.plugins
if registry is None:
# New session => run detect
# - initialize registry first to prevent infinite recursion
registry = session_s3.plugins = {}
cls.detect()
return registry
# -------------------------------------------------------------------------
@staticmethod
def _scan():
"""
Iterator scanning the plugin directory for available plugins
@return: the names of the plugins
"""
folder = current.request.folder
path = os.path.join(folder, "modules", PLUGINS)
names = os.listdir(path)
for name in names:
name_, extension = os.path.splitext(name)
if name_ in IGNORE:
continue
path_ = os.path.join(path, name)
if os.path.isdir(path_) or extension == ".py":
yield(name_)
# -------------------------------------------------------------------------
@staticmethod
def _reload(name):
"""
Reload a plugin
@param name: the plugin name
@note: this works only within the current thread, other
threads may still be bound to the old version of
the plugin
"""
if name in IGNORE:
return
success = True
appname = current.request.application
plugin_name = "applications.%s.modules.%s.%s" % (appname, PLUGINS, name)
plugin = sys.modules.get(plugin_name)
if plugin is not None:
try:
reload(plugin)
except ImportError:
current.log.error("Reloading plugin '%s' failed" % name)
success = False
return success
# =============================================================================
# Do a full scan when reloading the module (=when the thread starts)
PluginLoader.detect(reset_all=True)
# =============================================================================
| mit | 2,472,458,515,809,089,000 | 29.901754 | 83 | 0.507551 | false |
amluto/libseccomp | tests/21-live-basic_allow.py | 1 | 1695 | #!/usr/bin/env python
#
# Seccomp Library test program
#
# Copyright (c) 2013 Red Hat <[email protected]>
# Author: Paul Moore <[email protected]>
#
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of version 2.1 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, see <http://www.gnu.org/licenses>.
#
import argparse
import sys
import util
from seccomp import *
def test():
action = util.parse_action(sys.argv[1])
if not action == ALLOW:
quit(1)
util.install_trap()
f = SyscallFilter(TRAP)
# NOTE: additional syscalls required for python
f.add_rule_exactly(ALLOW, "stat")
f.add_rule_exactly(ALLOW, "fstat")
f.add_rule_exactly(ALLOW, "open")
f.add_rule_exactly(ALLOW, "mmap")
f.add_rule_exactly(ALLOW, "munmap")
f.add_rule_exactly(ALLOW, "read")
f.add_rule_exactly(ALLOW, "write")
f.add_rule_exactly(ALLOW, "close")
f.add_rule_exactly(ALLOW, "rt_sigaction")
f.add_rule_exactly(ALLOW, "rt_sigreturn")
f.add_rule_exactly(ALLOW, "exit_group")
f.load()
try:
util.write_file("/dev/null")
except OSError as ex:
quit(ex.errno)
quit(160)
test()
# kate: syntax python;
# kate: indent-mode python; space-indent on; indent-width 4; mixedindent off;
| lgpl-2.1 | 905,958,259,002,516,000 | 27.728814 | 78 | 0.692625 | false |
ewandor/home-assistant | homeassistant/components/climate/netatmo.py | 1 | 5912 | """
Support for Netatmo Smart Thermostat.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.netatmo/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE
from homeassistant.components.climate import (
STATE_HEAT, STATE_IDLE, ClimateDevice, PLATFORM_SCHEMA,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE, SUPPORT_AWAY_MODE)
from homeassistant.util import Throttle
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['netatmo']
_LOGGER = logging.getLogger(__name__)
CONF_RELAY = 'relay'
CONF_THERMOSTAT = 'thermostat'
DEFAULT_AWAY_TEMPERATURE = 14
# # The default offeset is 2 hours (when you use the thermostat itself)
DEFAULT_TIME_OFFSET = 7200
# # Return cached results if last scan was less then this time ago
# # NetAtmo Data is uploaded to server every hour
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_RELAY): cv.string,
vol.Optional(CONF_THERMOSTAT, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE |
SUPPORT_AWAY_MODE)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
netatmo = get_component('netatmo')
device = config.get(CONF_RELAY)
import lnetatmo
try:
data = ThermostatData(netatmo.NETATMO_AUTH, device)
for module_name in data.get_module_names():
if CONF_THERMOSTAT in config:
if config[CONF_THERMOSTAT] != [] and \
module_name not in config[CONF_THERMOSTAT]:
continue
add_devices([NetatmoThermostat(data, module_name)], True)
except lnetatmo.NoDevice:
return None
class NetatmoThermostat(ClimateDevice):
"""Representation a Netatmo thermostat."""
def __init__(self, data, module_name, away_temp=None):
"""Initialize the sensor."""
self._data = data
self._state = None
self._name = module_name
self._target_temperature = None
self._away = None
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._target_temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._data.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def current_operation(self):
"""Return the current state of the thermostat."""
state = self._data.thermostatdata.relay_cmd
if state == 0:
return STATE_IDLE
elif state == 100:
return STATE_HEAT
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def turn_away_mode_on(self):
"""Turn away on."""
mode = "away"
temp = None
self._data.thermostatdata.setthermpoint(mode, temp, endTimeOffset=None)
self._away = True
def turn_away_mode_off(self):
"""Turn away off."""
mode = "program"
temp = None
self._data.thermostatdata.setthermpoint(mode, temp, endTimeOffset=None)
self._away = False
def set_temperature(self, **kwargs):
"""Set new target temperature for 2 hours."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
mode = "manual"
self._data.thermostatdata.setthermpoint(
mode, temperature, DEFAULT_TIME_OFFSET)
self._target_temperature = temperature
self._away = False
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from NetAtmo API and updates the states."""
self._data.update()
self._target_temperature = self._data.thermostatdata.setpoint_temp
self._away = self._data.setpoint_mode == 'away'
class ThermostatData(object):
"""Get the latest data from Netatmo."""
def __init__(self, auth, device=None):
"""Initialize the data object."""
self.auth = auth
self.thermostatdata = None
self.module_names = []
self.device = device
self.current_temperature = None
self.target_temperature = None
self.setpoint_mode = None
def get_module_names(self):
"""Return all module available on the API as a list."""
self.update()
if not self.device:
for device in self.thermostatdata.modules:
for module in self.thermostatdata.modules[device].values():
self.module_names.append(module['module_name'])
else:
for module in self.thermostatdata.modules[self.device].values():
self.module_names.append(module['module_name'])
return self.module_names
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Call the NetAtmo API to update the data."""
import lnetatmo
self.thermostatdata = lnetatmo.ThermostatData(self.auth)
self.target_temperature = self.thermostatdata.setpoint_temp
self.setpoint_mode = self.thermostatdata.setpoint_mode
self.current_temperature = self.thermostatdata.temp
| apache-2.0 | 6,718,774,278,088,995,000 | 31.662983 | 79 | 0.643945 | false |
geokala/cloudify-manager | rest-service/manager_rest/blueprints_manager.py | 1 | 39060 | #########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import traceback
from datetime import datetime
from StringIO import StringIO
from flask import g, current_app
from dsl_parser import exceptions as parser_exceptions
from dsl_parser import functions
from dsl_parser import tasks
from dsl_parser.constants import DEPLOYMENT_PLUGINS_TO_INSTALL
from manager_rest import models
from manager_rest import manager_exceptions
from manager_rest.workflow_client import workflow_client
from manager_rest.storage_manager import get_storage_manager
from manager_rest.utils import maybe_register_teardown
LIMITLESS_GLOBAL_PARALLEL_EXECUTIONS_VALUE = -1
class DslParseException(Exception):
pass
class BlueprintAlreadyExistsException(Exception):
def __init__(self, blueprint_id, *args):
Exception.__init__(self, args)
self.blueprint_id = blueprint_id
class BlueprintsManager(object):
@property
def sm(self):
return get_storage_manager()
def blueprints_list(self, include=None):
return self.sm.blueprints_list(include=include)
def deployments_list(self, include=None):
return self.sm.deployments_list(include=include)
def executions_list(self, deployment_id=None,
is_include_system_workflows=False, include=None):
executions = self.sm.executions_list(deployment_id=deployment_id,
include=include)
return [e for e in executions if
is_include_system_workflows or not e.is_system_workflow]
def get_blueprint(self, blueprint_id, include=None):
return self.sm.get_blueprint(blueprint_id, include=include)
def get_deployment(self, deployment_id, include=None):
return self.sm.get_deployment(deployment_id=deployment_id,
include=include)
def get_execution(self, execution_id, include=None):
return self.sm.get_execution(execution_id, include=include)
def update_execution_status(self, execution_id, status, error):
if self._get_transient_deployment_workers_mode_config()['enabled'] and\
status in models.Execution.END_STATES:
execution = self.get_execution(execution_id)
# currently, the create and delete deployment environment
# workflows are still not marked as system workflows, so they're
# named explicitly
if not (execution.is_system_workflow or execution.workflow_id in (
'create_deployment_environment',
'delete_deployment_environment')):
# a user workflow has reached a final state - initiating
# a workflow to stop deployment workers
deployment = self.get_deployment(execution.deployment_id,
include=['id'])
wf_id = '_stop_deployment_environment'
deployment_env_creation_task_name = \
'cloudify_system_workflows.deployment_environment.stop'
kwargs = {
'prerequisite_task_id': execution_id
}
self._execute_system_workflow(
deployment, wf_id, deployment_env_creation_task_name,
kwargs)
return self.sm.update_execution_status(execution_id, status, error)
def publish_blueprint(self, dsl_location,
resources_base_url, blueprint_id):
try:
plan = tasks.parse_dsl(dsl_location, resources_base_url)
except Exception, ex:
raise DslParseException(str(ex))
now = str(datetime.now())
new_blueprint = models.BlueprintState(plan=plan,
id=blueprint_id,
created_at=now,
updated_at=now)
self.sm.put_blueprint(new_blueprint.id, new_blueprint)
return new_blueprint
def delete_blueprint(self, blueprint_id):
blueprint_deployments = self.sm.get_blueprint_deployments(blueprint_id)
if len(blueprint_deployments) > 0:
raise manager_exceptions.DependentExistsError(
"Can't delete blueprint {0} - There exist "
"deployments for this blueprint; Deployments ids: {1}"
.format(blueprint_id,
','.join([dep.id for dep
in blueprint_deployments])))
return self.sm.delete_blueprint(blueprint_id)
def delete_deployment(self, deployment_id, ignore_live_nodes=False):
# Verify deployment exists.
self.sm.get_deployment(deployment_id)
# validate there are no running executions for this deployment
executions = self.executions_list(deployment_id=deployment_id)
if any(execution.status not in models.Execution.END_STATES for
execution in executions):
raise manager_exceptions.DependentExistsError(
"Can't delete deployment {0} - There are running "
"executions for this deployment. Running executions ids: {1}"
.format(
deployment_id,
','.join([execution.id for execution in
executions if execution.status not
in models.Execution.END_STATES])))
if not ignore_live_nodes:
node_instances = self.sm.get_node_instances(
deployment_id=deployment_id)
# validate either all nodes for this deployment are still
# uninitialized or have been deleted
if any(node.state not in ('uninitialized', 'deleted') for node in
node_instances):
raise manager_exceptions.DependentExistsError(
"Can't delete deployment {0} - There are live nodes for "
"this deployment. Live nodes ids: {1}"
.format(deployment_id,
','.join([node.id for node in node_instances
if node.state not in
('uninitialized', 'deleted')])))
self._delete_deployment_environment(deployment_id)
return self.sm.delete_deployment(deployment_id)
def execute_workflow(self, deployment_id, workflow_id,
parameters=None,
allow_custom_parameters=False, force=False):
deployment = self.get_deployment(deployment_id)
if workflow_id not in deployment.workflows:
raise manager_exceptions.NonexistentWorkflowError(
'Workflow {0} does not exist in deployment {1}'.format(
workflow_id, deployment_id))
workflow = deployment.workflows[workflow_id]
self._verify_deployment_environment_created_successfully(deployment_id)
transient_workers_config =\
self._get_transient_deployment_workers_mode_config()
is_transient_workers_enabled = transient_workers_config['enabled']
self._check_for_active_executions(deployment_id, force,
transient_workers_config)
execution_parameters = \
BlueprintsManager._merge_and_validate_execution_parameters(
workflow, workflow_id, parameters, allow_custom_parameters)
if is_transient_workers_enabled:
# in this mode, we push the user execution object to storage
# before executing the "_start_deployment_environment" system
# workflow, to prevent from other executions to start running in
# between the system workflow and the user workflow execution.
# to keep correct chronological order, the system workflow's
# "created_at" field is generated here.
start_deployment_env_created_at_time = str(datetime.now())
execution_id = str(uuid.uuid4())
new_execution = models.Execution(
id=execution_id,
status=models.Execution.PENDING,
created_at=str(datetime.now()),
blueprint_id=deployment.blueprint_id,
workflow_id=workflow_id,
deployment_id=deployment_id,
error='',
parameters=self._get_only_user_execution_parameters(
execution_parameters),
is_system_workflow=False)
self.sm.put_execution(new_execution.id, new_execution)
if is_transient_workers_enabled:
# initiating a workflow to start deployment workers
wf_id = '_start_deployment_environment'
deployment_env_start_task_name = \
'cloudify_system_workflows.deployment_environment.start'
self._execute_system_workflow(
deployment, wf_id, deployment_env_start_task_name, timeout=300,
created_at=start_deployment_env_created_at_time)
# executing the user workflow
workflow_client().execute_workflow(
workflow_id,
workflow,
blueprint_id=deployment.blueprint_id,
deployment_id=deployment_id,
execution_id=execution_id,
execution_parameters=execution_parameters)
return new_execution
def _execute_system_workflow(self, deployment, wf_id, task_mapping,
execution_parameters=None, timeout=0,
created_at=None):
"""
:param deployment: deployment for workflow execution
:param wf_id: workflow id
:param task_mapping: mapping to the system workflow
:param execution_parameters: parameters for the system workflow
:param timeout: 0 will return immediately; any positive value will
cause this method to wait for the given timeout for the task to
complete, and verify it finished successfully before returning
:param created_at: creation time for the workflow execution object.
if omitted, a value will be generated by this method.
:return: async task object
"""
execution_id = str(uuid.uuid4()) # will also serve as the task id
execution_parameters = execution_parameters or {}
# currently, deployment env creation/deletion are not set as
# system workflows
is_system_workflow = wf_id not in (
'create_deployment_environment', 'delete_deployment_environment')
execution = models.Execution(
id=execution_id,
status=models.Execution.PENDING,
created_at=created_at or str(datetime.now()),
blueprint_id=deployment.blueprint_id,
workflow_id=wf_id,
deployment_id=deployment.id,
error='',
parameters=self._get_only_user_execution_parameters(
execution_parameters),
is_system_workflow=is_system_workflow)
self.sm.put_execution(execution.id, execution)
async_task = workflow_client().execute_system_workflow(
deployment, wf_id, execution_id, task_mapping,
execution_parameters)
if timeout > 0:
try:
# wait for the workflow execution to complete
async_task.get(timeout=timeout, propagate=True)
except Exception as e:
# error message for the user
error_msg =\
'Error occurred while executing the {0} system workflow '\
'for deployment {1}: {2} - {3}'.format(
wf_id, deployment.id, type(e).__name__, str(e))
# adding traceback to the log error message
tb = StringIO()
traceback.print_exc(file=tb)
log_error_msg = '{0}; traceback: {1}'.format(
error_msg, tb.getvalue())
current_app.logger.error(log_error_msg)
raise RuntimeError(error_msg)
# verify the execution completed successfully
execution = self.sm.get_execution(async_task.id)
if execution.status != models.Execution.TERMINATED:
raise RuntimeError(
'Failed executing the {0} system workflow for deployment '
'{1}: Execution did not complete successfully before '
'timeout ({2} seconds)'.format(
wf_id, deployment.id, timeout))
return async_task
def cancel_execution(self, execution_id, force=False):
"""
Cancel an execution by its id
If force is False (default), this method will request the
executed workflow to gracefully terminate. It is up to the workflow
to follow up on that request.
If force is used, this method will request the abrupt and immediate
termination of the executed workflow. This is valid for all
workflows, regardless of whether they provide support for graceful
termination or not.
Note that in either case, the execution is not yet cancelled upon
returning from the method. Instead, it'll be in a 'cancelling' or
'force_cancelling' status (as can be seen in models.Execution). Once
the execution is truly stopped, it'll be in 'cancelled' status (unless
force was not used and the executed workflow doesn't support
graceful termination, in which case it might simply continue
regardless and end up with a 'terminated' status)
:param execution_id: The execution id
:param force: A boolean describing whether to force cancellation
:return: The updated execution object
:rtype: models.Execution
:raises manager_exceptions.IllegalActionError
"""
execution = self.get_execution(execution_id)
if execution.status not in (models.Execution.PENDING,
models.Execution.STARTED) and \
(not force or execution.status != models.Execution
.CANCELLING):
raise manager_exceptions.IllegalActionError(
"Can't {0}cancel execution {1} because it's in status {2}"
.format(
'force-' if force else '',
execution_id,
execution.status))
new_status = models.Execution.CANCELLING if not force \
else models.Execution.FORCE_CANCELLING
self.sm.update_execution_status(
execution_id, new_status, '')
return self.get_execution(execution_id)
def create_deployment(self, blueprint_id, deployment_id, inputs=None):
blueprint = self.get_blueprint(blueprint_id)
plan = blueprint.plan
try:
deployment_plan = tasks.prepare_deployment_plan(plan, inputs)
except parser_exceptions.MissingRequiredInputError, e:
raise manager_exceptions.MissingRequiredDeploymentInputError(
str(e))
except parser_exceptions.UnknownInputError, e:
raise manager_exceptions.UnknownDeploymentInputError(str(e))
now = str(datetime.now())
new_deployment = models.Deployment(
id=deployment_id,
blueprint_id=blueprint_id, created_at=now, updated_at=now,
workflows=deployment_plan['workflows'],
inputs=deployment_plan['inputs'],
policy_types=deployment_plan['policy_types'],
policy_triggers=deployment_plan['policy_triggers'],
groups=deployment_plan['groups'],
outputs=deployment_plan['outputs'])
self.sm.put_deployment(deployment_id, new_deployment)
self._create_deployment_nodes(blueprint_id,
deployment_id,
deployment_plan)
node_instances = deployment_plan['node_instances']
self._create_deployment_node_instances(deployment_id,
node_instances)
self._create_deployment_environment(new_deployment, deployment_plan)
return new_deployment
def start_deployment_modification(self,
deployment_id,
modified_nodes,
context):
# verify deployment exists
self.sm.get_deployment(deployment_id, include=['id'])
existing_modifications = self.sm.deployment_modifications_list(
deployment_id=deployment_id, include=['id', 'status'])
active_modifications = [
m.id for m in existing_modifications
if m.status == models.DeploymentModification.STARTED]
if active_modifications:
raise \
manager_exceptions.ExistingStartedDeploymentModificationError(
'Cannot start deployment modification while there are '
'existing started deployment modifications. Currently '
'started deployment modifications: {0}'
.format(active_modifications))
nodes = [node.to_dict() for node in self.sm.get_nodes(deployment_id)]
node_instances = [instance.to_dict() for instance
in self.sm.get_node_instances(deployment_id)]
node_instances_modification = tasks.modify_deployment(
nodes=nodes,
previous_node_instances=node_instances,
modified_nodes=modified_nodes)
node_instances_modification['before_modification'] = [
instance.to_dict() for instance in
self.sm.get_node_instances(deployment_id)]
now = str(datetime.now())
modification_id = str(uuid.uuid4())
modification = models.DeploymentModification(
id=modification_id,
created_at=now,
ended_at=None,
status=models.DeploymentModification.STARTED,
deployment_id=deployment_id,
modified_nodes=modified_nodes,
node_instances=node_instances_modification,
context=context)
self.sm.put_deployment_modification(modification_id, modification)
for node_id, modified_node in modified_nodes.items():
self.sm.update_node(
modification.deployment_id, node_id,
planned_number_of_instances=modified_node['instances'])
added_and_related = node_instances_modification['added_and_related']
added_node_instances = []
for node_instance in added_and_related:
if node_instance.get('modification') == 'added':
added_node_instances.append(node_instance)
else:
current = self.sm.get_node_instance(node_instance['id'])
new_relationships = current.relationships
new_relationships += node_instance['relationships']
self.sm.update_node_instance(models.DeploymentNodeInstance(
id=node_instance['id'],
relationships=new_relationships,
version=current.version,
node_id=None,
host_id=None,
deployment_id=None,
state=None,
runtime_properties=None))
self._create_deployment_node_instances(deployment_id,
added_node_instances)
return modification
def finish_deployment_modification(self, modification_id):
modification = self.sm.get_deployment_modification(modification_id)
if modification.status in models.DeploymentModification.END_STATES:
raise manager_exceptions.DeploymentModificationAlreadyEndedError(
'Cannot finish deployment modification: {0}. It is already in'
' {1} status.'.format(modification_id,
modification.status))
modified_nodes = modification.modified_nodes
for node_id, modified_node in modified_nodes.items():
self.sm.update_node(modification.deployment_id, node_id,
number_of_instances=modified_node['instances'])
node_instances = modification.node_instances
for node_instance in node_instances['removed_and_related']:
if node_instance.get('modification') == 'removed':
self.sm.delete_node_instance(node_instance['id'])
else:
removed_relationship_target_ids = set(
[rel['target_id']
for rel in node_instance['relationships']])
current = self.sm.get_node_instance(node_instance['id'])
new_relationships = [rel for rel in current.relationships
if rel['target_id']
not in removed_relationship_target_ids]
self.sm.update_node_instance(models.DeploymentNodeInstance(
id=node_instance['id'],
relationships=new_relationships,
version=current.version,
node_id=None,
host_id=None,
deployment_id=None,
state=None,
runtime_properties=None))
now = str(datetime.now())
self.sm.update_deployment_modification(
models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.FINISHED,
ended_at=now,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=None,
context=None))
return models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.FINISHED,
ended_at=None,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=None,
context=None)
def rollback_deployment_modification(self, modification_id):
modification = self.sm.get_deployment_modification(modification_id)
if modification.status in models.DeploymentModification.END_STATES:
raise manager_exceptions.DeploymentModificationAlreadyEndedError(
'Cannot rollback deployment modification: {0}. It is already '
'in {1} status.'.format(modification_id,
modification.status))
node_instances = self.sm.get_node_instances(modification.deployment_id)
modification.node_instances['before_rollback'] = [
instance.to_dict() for instance in node_instances]
for instance in node_instances:
self.sm.delete_node_instance(instance.id)
for instance in modification.node_instances['before_modification']:
self.sm.put_node_instance(
models.DeploymentNodeInstance(**instance))
nodes_num_instances = {node.id: node for node in self.sm.get_nodes(
deployment_id=modification.deployment_id,
include=['id', 'number_of_instances'])}
for node_id, modified_node in modification.modified_nodes.items():
self.sm.update_node(
modification.deployment_id, node_id,
planned_number_of_instances=nodes_num_instances[
node_id].number_of_instances)
now = str(datetime.now())
self.sm.update_deployment_modification(
models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.ROLLEDBACK,
ended_at=now,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=modification.node_instances,
context=None))
return models.DeploymentModification(
id=modification_id,
status=models.DeploymentModification.ROLLEDBACK,
ended_at=None,
created_at=None,
deployment_id=None,
modified_nodes=None,
node_instances=None,
context=None)
def _get_node_instance_ids(self, deployment_id):
return self.sm.get_node_instances(deployment_id, include=['id'])
def _create_deployment_node_instances(self,
deployment_id,
dsl_node_instances):
for node_instance in dsl_node_instances:
instance_id = node_instance['id']
node_id = node_instance['name']
relationships = node_instance.get('relationships', [])
host_id = node_instance.get('host_id')
instance = models.DeploymentNodeInstance(
id=instance_id,
node_id=node_id,
host_id=host_id,
relationships=relationships,
deployment_id=deployment_id,
state='uninitialized',
runtime_properties={},
version=None)
self.sm.put_node_instance(instance)
def evaluate_deployment_outputs(self, deployment_id):
deployment = self.get_deployment(
deployment_id, include=['outputs'])
def get_node_instances(node_id=None):
return self.sm.get_node_instances(deployment_id, node_id)
def get_node_instance(node_instance_id):
return self.sm.get_node_instance(node_instance_id)
def get_node(node_id):
return self.sm.get_node(deployment_id, node_id)
try:
return functions.evaluate_outputs(
outputs_def=deployment.outputs,
get_node_instances_method=get_node_instances,
get_node_instance_method=get_node_instance,
get_node_method=get_node)
except parser_exceptions.FunctionEvaluationError, e:
raise manager_exceptions.DeploymentOutputsEvaluationError(str(e))
def evaluate_functions(self, deployment_id, context, payload):
self.get_deployment(deployment_id, include=['id'])
def get_node_instances(node_id=None):
return self.sm.get_node_instances(deployment_id, node_id)
def get_node_instance(node_instance_id):
return self.sm.get_node_instance(node_instance_id)
def get_node(node_id):
return self.sm.get_node(deployment_id, node_id)
try:
return functions.evaluate_functions(
payload=payload,
context=context,
get_node_instances_method=get_node_instances,
get_node_instance_method=get_node_instance,
get_node_method=get_node)
except parser_exceptions.FunctionEvaluationError, e:
raise manager_exceptions.FunctionsEvaluationError(str(e))
def _create_deployment_nodes(self, blueprint_id, deployment_id, plan):
for raw_node in plan['nodes']:
num_instances = raw_node['instances']['deploy']
self.sm.put_node(models.DeploymentNode(
id=raw_node['name'],
deployment_id=deployment_id,
blueprint_id=blueprint_id,
type=raw_node['type'],
type_hierarchy=raw_node['type_hierarchy'],
number_of_instances=num_instances,
planned_number_of_instances=num_instances,
deploy_number_of_instances=num_instances,
host_id=raw_node['host_id'] if 'host_id' in raw_node else None,
properties=raw_node['properties'],
operations=raw_node['operations'],
plugins=raw_node['plugins'],
plugins_to_install=raw_node.get('plugins_to_install'),
relationships=self._prepare_node_relationships(raw_node)
))
@staticmethod
def _merge_and_validate_execution_parameters(
workflow, workflow_name, execution_parameters=None,
allow_custom_parameters=False):
"""
merge parameters - parameters passed directly to execution request
override workflow parameters from the original plan. any
parameters without a default value in the blueprint must
appear in the execution request parameters.
Custom parameters will be passed to the workflow as well if allowed;
Otherwise, an exception will be raised if such parameters are passed.
"""
merged_execution_parameters = dict()
workflow_parameters = workflow.get('parameters', dict())
execution_parameters = execution_parameters or dict()
missing_mandatory_parameters = set()
for param_name, param in workflow_parameters.iteritems():
if 'default' not in param:
# parameter without a default value - ensure one was
# provided via execution parameters
if param_name not in execution_parameters:
missing_mandatory_parameters.add(param_name)
continue
merged_execution_parameters[param_name] = \
execution_parameters[param_name]
else:
merged_execution_parameters[param_name] = \
execution_parameters[param_name] if \
param_name in execution_parameters else param['default']
if missing_mandatory_parameters:
raise \
manager_exceptions.IllegalExecutionParametersError(
'Workflow "{0}" must be provided with the following '
'parameters to execute: {1}'.format(
workflow_name, ','.join(missing_mandatory_parameters)))
custom_parameters = {k: v for k, v in execution_parameters.iteritems()
if k not in workflow_parameters}
if not allow_custom_parameters and custom_parameters:
raise \
manager_exceptions.IllegalExecutionParametersError(
'Workflow "{0}" does not have the following parameters '
'declared: {1}. Remove these parameters or use '
'the flag for allowing custom parameters'
.format(workflow_name, ','.join(custom_parameters.keys())))
merged_execution_parameters.update(custom_parameters)
return merged_execution_parameters
@staticmethod
def _prepare_node_relationships(raw_node):
if 'relationships' not in raw_node:
return []
prepared_relationships = []
for raw_relationship in raw_node['relationships']:
relationship = {
'target_id': raw_relationship['target_id'],
'type': raw_relationship['type'],
'type_hierarchy': raw_relationship['type_hierarchy'],
'properties': raw_relationship['properties'],
'source_operations': raw_relationship['source_operations'],
'target_operations': raw_relationship['target_operations'],
}
prepared_relationships.append(relationship)
return prepared_relationships
def _verify_deployment_environment_created_successfully(self,
deployment_id):
env_creation = next(
(execution for execution in
self.sm.executions_list(deployment_id=deployment_id)
if execution.workflow_id == 'create_deployment_environment'),
None)
if not env_creation:
raise RuntimeError('Failed to find "create_deployment_environment"'
' execution for deployment {0}'.format(
deployment_id))
status = env_creation.status
if status == models.Execution.TERMINATED:
return
elif status == models.Execution.PENDING:
raise manager_exceptions \
.DeploymentEnvironmentCreationPendingError(
'Deployment environment creation is still pending, '
'try again in a minute')
elif status == models.Execution.STARTED:
raise manager_exceptions\
.DeploymentEnvironmentCreationInProgressError(
'Deployment environment creation is still in progress, '
'try again in a minute')
elif status == models.Execution.FAILED:
raise RuntimeError(
"Can't launch executions since environment creation for "
"deployment {0} has failed: {1}".format(
deployment_id, env_creation.error))
elif status in (
models.Execution.CANCELLED, models.Execution.CANCELLING,
models.Execution.FORCE_CANCELLING):
raise RuntimeError(
"Can't launch executions since the environment creation for "
"deployment {0} has been cancelled [status={1}]".format(
deployment_id, status))
else:
raise RuntimeError(
'Unexpected deployment status for deployment {0} '
'[status={1}]'.format(deployment_id, status))
def _create_deployment_environment(self, deployment, deployment_plan):
wf_id = 'create_deployment_environment'
deployment_env_creation_task_name = \
'cloudify_system_workflows.deployment_environment.create'
kwargs = {
DEPLOYMENT_PLUGINS_TO_INSTALL: deployment_plan[
DEPLOYMENT_PLUGINS_TO_INSTALL],
'workflow_plugins_to_install': deployment_plan[
'workflow_plugins_to_install'],
'policy_configuration': {
'policy_types': deployment_plan['policy_types'],
'policy_triggers': deployment_plan['policy_triggers'],
'groups': deployment_plan['groups'],
},
}
self._execute_system_workflow(
deployment, wf_id, deployment_env_creation_task_name, kwargs)
def _delete_deployment_environment(self, deployment_id):
deployment = self.sm.get_deployment(deployment_id)
wf_id = 'delete_deployment_environment'
deployment_env_deletion_task_name = \
'cloudify_system_workflows.deployment_environment.delete'
self._execute_system_workflow(
deployment, wf_id, deployment_env_deletion_task_name, timeout=300)
def _check_for_active_executions(self, deployment_id, force,
transient_workers_config):
is_transient_workers_enabled = transient_workers_config['enabled']
def _get_running_executions(deployment_id=None, include_system=True):
executions = self.executions_list(
deployment_id=deployment_id,
is_include_system_workflows=include_system)
running = [
e.id for e in executions if
self.sm.get_execution(e.id).status
not in models.Execution.END_STATES]
return running
# validate no execution is currently in progress
if not force:
running = _get_running_executions(deployment_id)
if len(running) > 0:
raise manager_exceptions.ExistingRunningExecutionError(
'The following executions are currently running for this '
'deployment: {0}. To execute this workflow anyway, pass '
'"force=true" as a query parameter to this request'.format(
running))
elif is_transient_workers_enabled:
raise manager_exceptions.ExistingRunningExecutionError(
'Forcing parallel executions in a single deployment is '
'disabled in transient deployment workers mode')
if is_transient_workers_enabled:
global_parallel_executions_limit = \
transient_workers_config['global_parallel_executions_limit']
if global_parallel_executions_limit != \
LIMITLESS_GLOBAL_PARALLEL_EXECUTIONS_VALUE:
running = _get_running_executions()
if len(running) >= global_parallel_executions_limit:
raise manager_exceptions. \
GlobalParallelRunningExecutionsLimitReachedError(
'New workflows may not be executed at this time,'
'because global parallel running executions limit '
'has been reached ({0} running executions; '
'global limit {1}). Please try again soon'
.format(len(running),
global_parallel_executions_limit))
def _get_transient_deployment_workers_mode_config(self):
provider_context = self.sm.get_provider_context().context
transient_workers_config = provider_context['cloudify'].get(
'transient_deployment_workers_mode', {})
# setting defaults if missing
transient_workers_config['enabled'] = \
transient_workers_config.get('enabled', False)
transient_workers_config['global_parallel_executions_limit'] = \
transient_workers_config.get(
'global_parallel_executions_limit',
LIMITLESS_GLOBAL_PARALLEL_EXECUTIONS_VALUE)
return transient_workers_config
@staticmethod
def _get_only_user_execution_parameters(execution_parameters):
return {k: v for k, v in execution_parameters.iteritems()
if not k.startswith('__')}
def teardown_blueprints_manager(exception):
# print "tearing down blueprints manager!"
pass
# What we need to access this manager in Flask
def get_blueprints_manager():
"""
Get the current blueprints manager
or create one if none exists for the current app context
"""
if 'blueprints_manager' not in g:
g.blueprints_manager = BlueprintsManager()
maybe_register_teardown(current_app, teardown_blueprints_manager)
return g.blueprints_manager
| apache-2.0 | -8,425,815,242,610,895,000 | 43.691076 | 79 | 0.595341 | false |
bdeak/taskmgr | fabfile/execute/install_package.py | 1 | 2064 | from fabric.api import *
import re
import os.path
import logging
import utils.log
l = logging.getLogger()
l = utils.log.CustomLogAdapter(l, None)
@task(default=True)
def check(input_params, cluster):
""" Install a given version of a given package
Can support multiple backends
input_params parameter is a string, with the following fields:
package:version
The backend to be used for package management is autodetected.
For adapting to various systems this needs to be extended.
"""
# split up the input_params, and make sense of it
m = re.search("^([^:]+)(?::(.+))?$", input_params)
if not m:
raise AttributeError("The given input_params '%s' doesn't match the requirements!" % input_params)
package = m.group(1)
version = m.group(2) if m.group(2) else None
# auto detect the backend
try:
result = run("test -e /usr/bin/apt-get")
except:
return False
if result.failed:
raise RuntimeError("%s: Failed to execute remote command for detecting backend" % env.command)
if result.return_code == 0:
backend = "apt_get"
else:
# check for other backends - note yet implemented
raise SystemError("%s: only backend 'apt_get' is currently supported." % env.command)
backends = { 'apt_get': install_package_apt_get }
if not backend in backends.keys():
raise ValueError("function for detected backend '%s' is not found!" % backend)
return backends[backend](package, version)
def install_package_apt_get(package, version):
""" Install the package, internal function, not exposed via @task """
if version is None:
# just install the package
command = "apt-get -qq update && apt-get -qq install -y %s" % package
else:
command = "apt-get -qq update && apt-get -qq install -y %s=%s" % (package, version)
try:
result = sudo(command)
except:
return False
if result.succeeded:
return True
else:
return False
| gpl-2.0 | -5,122,457,050,969,303,000 | 28.913043 | 106 | 0.637597 | false |
okolisny/integration_tests | scripts/post_jenkins_result.py | 1 | 2181 | #!/usr/bin/env python2
import json
import os
import os.path
from datetime import datetime
from artifactor.plugins.post_result import test_report
from cfme.utils import read_env
from cfme.utils.path import project_path
from cfme.utils.trackerbot import post_jenkins_result
job_name = os.environ['JOB_NAME']
number = int(os.environ['BUILD_NUMBER'])
date = str(datetime.now())
# reduce returns to bools for easy logic
runner_src = read_env(project_path.join('.jenkins_runner_result'))
runner_return = runner_src.get('RUNNER_RETURN', '1') == '0'
test_return = runner_src.get('TEST_RETURN', '1') == '0'
# 'stream' environ is set by jenkins for all stream test jobs
# but not in the template tester
if job_name not in ('template-tester', 'template-tester-openstack',
'template-tester-rhevm', 'template-tester-virtualcenter'):
# try to pull out the appliance template name
template_src = read_env(project_path.join('.appliance_template'))
template = template_src.get('appliance_template', 'Unknown')
stream = os.environ['stream']
else:
tester_src = read_env(project_path.join('.template_tester'))
stream = tester_src['stream']
template = tester_src['appliance_template']
if test_report.check():
with test_report.open() as f:
artifact_report = json.load(f)
else:
raise RuntimeError('Unable to post to jenkins without test report: '
'{} does not exist!'.format(test_report.strpath))
if runner_return and test_return:
build_status = 'success'
elif runner_return:
build_status = 'unstable'
else:
build_status = 'failed'
result_attrs = ('job_name', 'number', 'stream', 'date', 'template',
'build_status', 'artifact_report')
# pack the result attr values into the jenkins post
post_jenkins_result(*[eval(attr) for attr in result_attrs])
# vain output padding calculation
# get len of longest string, pad with an extra space to make the output pretty
max_len = len(max(result_attrs, key=len)) + 1
# now print all the attrs so we can see what we posted (and *that* we
# posted) in the jenkins log
for attr in result_attrs[:-1]:
print('{:>{width}}: {}'.format(attr, eval(attr), width=max_len))
| gpl-2.0 | 5,412,878,123,027,363,000 | 34.177419 | 78 | 0.703347 | false |
lukeolson/clean-latex-to-arxiv | parxiv.py | 1 | 12460 | #! /usr/bin/env python
from __future__ import print_function
import glob
import re
import os
import io
import time
import shutil
import tempfile
import subprocess
import ply.lex
# Python2 FileNotFoundError support
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
"""
usage:
python parxiv.py file.tex
this will make arxiv-somelongdatestring with
- file_strip.tex (where includegraphics paths are stripped)
- file_strip.bbl (you should have the .bbl file already)
- all figures
- the class file if custom
- the bib style if custom
- extra files listed in extra.txt
"""
def strip_comments(source):
"""
from https://gist.github.com/dzhuang/dc34cdd7efa43e5ecc1dc981cc906c85
"""
tokens = (
'PERCENT', 'BEGINCOMMENT', 'ENDCOMMENT',
'BACKSLASH', 'CHAR', 'BEGINVERBATIM',
'ENDVERBATIM', 'NEWLINE', 'ESCPCT',
'MAKEATLETTER', 'MAKEATOTHER',
)
states = (
('makeatblock', 'exclusive'),
('makeatlinecomment', 'exclusive'),
('linecomment', 'exclusive'),
('commentenv', 'exclusive'),
('verbatim', 'exclusive')
)
# Deal with escaped backslashes, so we don't
# think they're escaping %
def t_BACKSLASH(t):
r"\\\\"
return t
# Leaving all % in makeatblock
def t_MAKEATLETTER(t):
r"\\makeatletter"
t.lexer.begin("makeatblock")
return t
# One-line comments
def t_PERCENT(t):
r"\%"
t.lexer.begin("linecomment")
# Escaped percent signs
def t_ESCPCT(t):
r"\\\%"
return t
# Comment environment, as defined by verbatim package
def t_BEGINCOMMENT(t):
r"\\begin\s*{\s*comment\s*}"
t.lexer.begin("commentenv")
#Verbatim environment (different treatment of comments within)
def t_BEGINVERBATIM(t):
r"\\begin\s*{\s*verbatim\s*}"
t.lexer.begin("verbatim")
return t
#Any other character in initial state we leave alone
def t_CHAR(t):
r"."
return t
def t_NEWLINE(t):
r"\n"
return t
# End comment environment
def t_commentenv_ENDCOMMENT(t):
r"\\end\s*{\s*comment\s*}"
#Anything after \end{comment} on a line is ignored!
t.lexer.begin('linecomment')
# Ignore comments of comment environment
def t_commentenv_CHAR(t):
r"."
pass
def t_commentenv_NEWLINE(t):
r"\n"
pass
#End of verbatim environment
def t_verbatim_ENDVERBATIM(t):
r"\\end\s*{\s*verbatim\s*}"
t.lexer.begin('INITIAL')
return t
#Leave contents of verbatim environment alone
def t_verbatim_CHAR(t):
r"."
return t
def t_verbatim_NEWLINE(t):
r"\n"
return t
#End a % comment when we get to a new line
def t_linecomment_ENDCOMMENT(t):
r"\n"
t.lexer.begin("INITIAL")
# Newline at the end of a line comment is presevered.
return t
#Ignore anything after a % on a line
def t_linecomment_CHAR(t):
r"."
pass
def t_makeatblock_MAKEATOTHER(t):
r"\\makeatother"
t.lexer.begin('INITIAL')
return t
def t_makeatblock_BACKSLASH(t):
r"\\\\"
return t
# Escaped percent signs in makeatblock
def t_makeatblock_ESCPCT(t):
r"\\\%"
return t
# presever % in makeatblock
def t_makeatblock_PERCENT(t):
r"\%"
t.lexer.begin("makeatlinecomment")
return t
def t_makeatlinecomment_NEWLINE(t):
r"\n"
t.lexer.begin('makeatblock')
return t
# Leave contents of makeatblock alone
def t_makeatblock_CHAR(t):
r"."
return t
def t_makeatblock_NEWLINE(t):
r"\n"
return t
# For bad characters, we just skip over it
def t_ANY_error(t):
t.lexer.skip(1)
lexer = ply.lex.lex()
lexer.input(source)
return u"".join([tok.value for tok in lexer])
def find_class(source):
"""
(unused)
look for \documentclass[review]{siamart}
then return 'siamart.cls'
"""
classname = re.search(r'\\documentclass.*{(.*)}', source)
if classname:
classname = classname.group(1) + '.cls'
return classname
def find_bibstyle(source):
"""
look for \ bibliographystyle{siamplain}
then return 'siamplain.bst'
"""
bibstylename = re.search(r'\\bibliographystyle{(.*)}', source)
if bibstylename:
bibstylename = bibstylename.group(1) + '.bst'
return bibstylename
def find_figs(source):
"""
look for \graphicspath{{subdir}} (a single subdir)
find figures in \includegraphics[something]{PATH/filename.ext}
\includegraphics{PATH/filename.ext}
make them \includegraphics[something]{PATH-filename.ext}
\includegraphics{PATH-filename.ext}
later: copy figures to arxivdir
"""
findgraphicspath = re.search(r'\\graphicspath{(.*)}', source)
if findgraphicspath:
graphicspaths = findgraphicspath.group(1)
graphicspaths = re.findall('{(.*?)}', graphicspaths)
else:
graphicspaths = []
# keep a list of (figname, figpath)
figlist = []
def repl(m):
figpath = ''
figname = os.path.basename(m.group(2))
figpath = os.path.dirname(m.group(2)).lstrip('./')
if figpath:
newfigname = figpath.replace(' ', '_').replace('/', '_')+'_'+figname
else:
newfigname = figname
newincludegraphics = m.group(1) + newfigname + m.group(3)
figlist.append((figname, figpath, newfigname))
return newincludegraphics
source = re.sub(r'(\\includegraphics.*?{)(.*?)(})', repl, source)
return figlist, source, graphicspaths
def flatten(source):
"""
replace arguments of include{} and intput{}
only input can be nested
include adds a clearpage
includeonly not supported
"""
def repl(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
return newtext
def repl_include(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = '\\clearpage\n' + newtext
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
newtext += '\\clearpage\n'
return newtext
dest = re.sub(r'(\\include{)(.*?)(})', repl_include, source, True)
dest = re.sub(r'(\\input{)(.*?)(})', repl, dest)
return dest
def main(fname):
print('[parxiv] reading %s' % fname)
with io.open(fname, encoding='utf-8') as f:
source = f.read()
print('[parxiv] stripping comments')
source = strip_comments(source)
print('[parxiv] flattening source')
source = flatten(source)
print('[parxiv] stripping comments again')
source = strip_comments(source)
print('[parxiv] finding figures...')
figlist, source, graphicspaths = find_figs(source)
# print('[parxiv] finding article class and bib style')
# localbibstyle = find_bibstyle(source)
print('[parxiv] making directory', end='')
dirname = 'arxiv-' + time.strftime('%c').replace(' ', '-')
dirname = dirname.replace(':', '-')
print(' %s' % dirname)
os.makedirs(dirname)
print('[parxiv] copying class/style files')
# shutil.copy2(localclass, os.path.join(dirname, localclass))
# if localbibstyle is not None:
# shutil.copy2(localbibstyle, os.path.join(dirname, localbibstyle))
for bst in glob.glob('*.bst'):
shutil.copy2(bst, os.path.join(dirname, bst))
for sty in glob.glob('*.sty'):
shutil.copy2(sty, os.path.join(dirname, sty))
for cls in glob.glob('*.cls'):
shutil.copy2(cls, os.path.join(dirname, cls))
print('[parxiv] copying figures')
for figname, figpath, newfigname in figlist:
allpaths = graphicspaths
allpaths += ['./']
_, ext = os.path.splitext(figname)
if ext == '':
figname += '.pdf'
newfigname += '.pdf'
if figpath:
allpaths = [os.path.join(p, figpath) for p in allpaths]
for p in allpaths:
#if 'quartz' in newfigname:
# print(p)
src = os.path.join(p, figname)
dest = os.path.join(dirname, os.path.basename(newfigname))
try:
shutil.copy2(src, dest)
except IOError:
# attempts multiple graphics paths
pass
# copy bbl file
print('[parxiv] copying bbl file')
bblfile = fname.replace('.tex', '.bbl')
newbblfile = fname.replace('.tex', '_strip.bbl')
bblflag = False
try:
shutil.copy2(bblfile, os.path.join(dirname, newbblfile))
bblflag = True
except FileNotFoundError:
print(' ...skipping, not found')
# copy extra files
try:
with io.open('extra.txt', encoding='utf-8') as f:
inputsource = f.read()
except IOError:
print('[parxiv] copying no extra files')
else:
print('[parxiv] copying extra file(s): ', end='')
for f in inputsource.split('\n'):
if os.path.isfile(f):
localname = os.path.basename(f)
print(' %s' % localname, end='')
shutil.copy2(f, os.path.join(dirname, localname))
print('\n')
newtexfile = fname.replace('.tex', '_strip.tex')
print('[parxiv] writing %s' % newtexfile)
with io.open(
os.path.join(dirname, newtexfile), 'w') as fout:
fout.write(source)
print('[parxiv] attempting to generate bbl file')
if not bblflag:
# attempt to generate
# with tempfile.TemporaryDirectory() as d:
# python2 support
try:
d = tempfile.mkdtemp()
try:
args = ['pdflatex',
'-interaction', 'nonstopmode',
'-recorder',
'-output-directory', d,
newtexfile]
# python2 support
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
p = subprocess.Popen(args,
cwd=dirname,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
# copy .bib files
for bib in glob.glob('*.bib'):
shutil.copy2(bib, os.path.join(d, bib))
for bib in glob.glob('*.bst'):
shutil.copy2(bib, os.path.join(d, bib))
args = ['bibtex', newtexfile.replace('.tex', '.aux')]
p = subprocess.Popen(args,
cwd=d,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
except OSError as e:
raise RuntimeError(e)
bblfile = newtexfile.replace('.tex', '.bbl')
if os.path.isfile(os.path.join(d, bblfile)):
print(' ... generated')
shutil.copy2(os.path.join(d, bblfile),
os.path.join(dirname, bblfile))
else:
print(' ... could not generate')
finally:
try:
shutil.rmtree(d)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return source
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('usage: python parxiv.py <filename.tex>')
sys.exit(-1)
fname = sys.argv[1]
source = main(fname)
| mit | -5,527,309,183,598,388,000 | 27.190045 | 80 | 0.54374 | false |
sameenjalal/mavenize-beta | mavenize/apps/item/models.py | 1 | 2128 | from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
class Item(models.Model):
item_type = models.CharField(max_length=30, default="")
four_star = models.IntegerField(default=0)
three_star = models.IntegerField(default=0)
two_star = models.IntegerField(default=0)
one_star = models.IntegerField(default=0)
reviews = models.IntegerField(default=0)
bookmarks = models.IntegerField(default=0)
def __unicode__(self):
return str(self.id)
def get_popularity(self):
"""
Returns the Popularity model for this item.
"""
if not hasattr(self, '_popularity_cache'):
try:
self._popularity_cache = Popularity.objects.get(
item__id__exact=self.id)
self._popularity_cache.item = self
except:
raise ObjectDoesNotExist
return self._popularity_cache
def get_rating(self):
return (self.four_star*4 + self.three_star*3 +
self.two_star*2 + self.one_star) / self.get_votes()
def get_votes(self):
return (self.four_star + self.three_star + self.two_star +
self.one_star)
class Link(models.Model):
item = models.ForeignKey(Item)
partner = models.CharField(max_length=20)
url = models.CharField(max_length=200)
def __unicode__(self):
return self.url
class Popularity(models.Model):
item = models.OneToOneField(Item, primary_key=True)
today = models.IntegerField(default=0, db_index=True)
week = models.IntegerField(default=0, db_index=True)
month = models.IntegerField(default=0, db_index=True)
alltime = models.IntegerField(default=0, db_index=True)
class Meta:
verbose_name_plural = "Popularities"
def __unicode__(self):
return "Item #%s: %s" % (self.item.id, self.alltime)
@receiver(post_save, sender=Item)
def create_item(sender, instance, created, **kwargs):
if created:
Popularity.objects.create(item=instance)
| mit | 1,467,266,724,365,762,000 | 32.25 | 67 | 0.648496 | false |
skosukhin/spack | lib/spack/spack/schema/__init__.py | 1 | 1512 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""This module contains jsonschema files for all of Spack's YAML formats.
"""
from llnl.util.lang import list_modules
# Automatically bring in all sub-modules
__all__ = []
for mod in list_modules(__path__[0]):
__import__('%s.%s' % (__name__, mod))
__all__.append(mod)
| lgpl-2.1 | 1,307,680,615,969,989,000 | 44.818182 | 78 | 0.666667 | false |
shifvb/hash_photos | _gui/main_gui.py | 1 | 9930 | import os
import time
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter.font import Font
from _gui.get_geometry import get_center_geometry
from _tools.get_hash import get_hash
from _tools.is_valid_file import is_vaild_file
from _tools.move_file import move_file
__version__ = (1, 2, 1)
class HashPhotoApp(object):
def __init__(self):
self.root = tk.Tk()
self.select_dir_entry_var = tk.StringVar()
self.choose_hash_method_var = tk.IntVar(value=0)
self.log_text_area_var = tk.StringVar()
# 文件类型全局变量
self.jpg_check_btn_var = tk.IntVar(value=1)
self.png_check_btn_var = tk.IntVar(value=1)
self.bmp_check_btn_var = tk.IntVar(value=1)
self.gif_check_btn_var = tk.IntVar(value=1)
self.big_font = Font(size=25, )
self.mid_font = Font(size=16, )
self.init_gui()
tk.mainloop()
def init_gui(self):
self.root.geometry(get_center_geometry(self.root))
self.root.title("hash photos v{}.{}.{}".format(*__version__))
# 0. 选择要重命名图片/文件的文件夹
select_dir_frame = tk.Frame(self.root)
select_dir_frame.grid(row=0, column=0, columnspan=2)
select_dir_entry = tk.Entry(select_dir_frame, width=59, textvariable=self.select_dir_entry_var)
select_dir_entry.configure(font=self.mid_font)
select_dir_entry.grid(row=0, column=0, padx=5)
select_dir_btn = tk.Button(select_dir_frame, text="select dir", command=self.select_dir_btn_callback)
select_dir_btn.configure(font=self.mid_font) #
select_dir_btn.grid(row=0, column=1)
# 1. 选择哈希方法面板
choose_hash_method_frame = tk.LabelFrame(self.root, text="choose hash method", font=self.mid_font)
choose_hash_method_frame.grid(row=1, column=0)
md5_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=0)
md5_radio_btn.pack(side=tk.LEFT)
md5_label = tk.Label(choose_hash_method_frame, text="md5", font=self.mid_font, )
md5_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(0))
md5_label.pack(side=tk.LEFT)
sha1_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=1)
sha1_radio_btn.pack(side=tk.LEFT)
sha1_label = tk.Label(choose_hash_method_frame, text="sha1", font=self.mid_font)
sha1_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(1))
sha1_label.pack(side=tk.LEFT)
sha256_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=2)
sha256_radio_btn.pack(side=tk.LEFT)
sha256_label = tk.Label(choose_hash_method_frame, text="sha256", font=self.mid_font)
sha256_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(2))
sha256_label.pack(side=tk.LEFT)
sha512_radio_btn = tk.Radiobutton(choose_hash_method_frame, variable=self.choose_hash_method_var, value=3)
sha512_radio_btn.pack(side=tk.LEFT)
sha512_label = tk.Label(choose_hash_method_frame, text="sha512", font=self.mid_font)
sha512_label.bind("<Button-1>", lambda *args: self.choose_hash_method_var.set(3))
sha512_label.pack(side=tk.LEFT)
# 2. 选择重命名文件类型面板
choose_file_type_frame = tk.LabelFrame(self.root, text="choose file type", font=self.mid_font)
choose_file_type_frame.grid(row=1, column=1)
jpg_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.jpg_check_btn_var)
jpg_check_btn.pack(side=tk.LEFT)
jpg_label = tk.Label(choose_file_type_frame, text="jpg/jpeg", font=self.mid_font)
jpg_label.bind("<Button-1>", lambda *args: self.jpg_check_btn_var.set(1 - self.jpg_check_btn_var.get()))
jpg_label.pack(side=tk.LEFT)
png_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.png_check_btn_var)
png_check_btn.pack(side=tk.LEFT)
png_label = tk.Label(choose_file_type_frame, text="png", font=self.mid_font)
png_label.bind("<Button-1>", lambda *args: self.png_check_btn_var.set(1 - self.png_check_btn_var.get()))
png_label.pack(side=tk.LEFT)
bmp_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.bmp_check_btn_var)
bmp_check_btn.pack(side=tk.LEFT)
bmp_label = tk.Label(choose_file_type_frame, text="bmp", font=self.mid_font)
bmp_label.bind("<Button-1>", lambda *args: self.bmp_check_btn_var.set(1 - self.bmp_check_btn_var.get()))
bmp_label.pack(side=tk.LEFT)
gif_check_btn = tk.Checkbutton(choose_file_type_frame, variable=self.gif_check_btn_var)
gif_check_btn.pack(side=tk.LEFT)
gif_label = tk.Label(choose_file_type_frame, text="gif", font=self.mid_font)
gif_label.bind("<Button-1>", lambda *args: self.gif_check_btn_var.set(1 - self.gif_check_btn_var.get()))
gif_label.pack(side=tk.LEFT)
# 显示当前状态
log_frame = tk.Frame(self.root)
log_frame.grid(row=2, column=0, columnspan=2, sticky=tk.NSEW)
self.log_text_area = tk.Text(log_frame, state=tk.DISABLED, width=70, height=21, font=self.mid_font)
self.log_text_area.configure(wrap='none')
self.log_text_area.grid(row=0, column=0, sticky=tk.NSEW)
log_vert_scrollbar = tk.Scrollbar(log_frame)
log_vert_scrollbar.grid(row=0, column=1, sticky=tk.NS)
log_vert_scrollbar.configure(command=self.log_text_area.yview)
self.log_text_area.configure(yscrollcommand=log_vert_scrollbar.set)
log_hori_scorllbar = tk.Scrollbar(log_frame, orient=tk.HORIZONTAL)
log_hori_scorllbar.grid(row=1, column=0, sticky=tk.EW)
log_hori_scorllbar.configure(command=self.log_text_area.xview)
self.log_text_area.configure(xscrollcommand=log_hori_scorllbar.set)
# 日志底下的按钮
buttons_frame = tk.Frame(self.root)
buttons_frame.grid(row=3, column=0, sticky=tk.EW, columnspan=2)
_padx = 65
_width = 10
# 进行转换
rename_btn = tk.Button(buttons_frame, text="run", command=self.rename_file_btn_callback)
rename_btn.config(font=self.mid_font, width=_width)
rename_btn.grid(row=0, column=0, padx=_padx, )
# 复制日志
copy_log_btn = tk.Button(buttons_frame, text="copy log", command=self.copy_log_btn_callback)
copy_log_btn.config(font=self.mid_font, width=_width)
copy_log_btn.grid(row=0, column=1, padx=_padx)
# 清除日志
clear_log_btn = tk.Button(buttons_frame, text="clear log", command=self.clear_log_btn_callback)
clear_log_btn.config(font=self.mid_font, width=_width)
clear_log_btn.grid(row=0, column=2, padx=_padx)
def select_dir_btn_callback(self):
"""选择文件夹按钮回调函数"""
self.select_dir_entry_var.set(filedialog.askdirectory())
def rename_file_btn_callback(self):
"""重命名按钮回调函数"""
_workspace = self.select_dir_entry_var.get()
# 如果没有选定就直接结束callback
if _workspace == "":
messagebox.showinfo(title="info", message="Please choose directory!")
return
# 如果文件夹不存在报错
if not os.path.isdir(_workspace):
messagebox.showerror(title="error", text="Directory\n{}\nnot exists!")
return
# 置为可修改状态
self.log_text_area.configure(state=tk.NORMAL)
self.log_text_area.insert(tk.END, "[{}] rename started\n".format(time.asctime()))
# 得到需要重命名的文件列表
abs_names = [os.path.join(_workspace, _) for _ in os.listdir(_workspace)]
abs_file_names = [_ for _ in abs_names if is_vaild_file(self, _)]
# 遍历文件
for abs_filename in abs_file_names:
# 得到新的文件名
new_abs_filename = os.path.join(_workspace,
get_hash(self, abs_filename) + os.path.splitext(abs_filename)[1])
try:
# 如果计算出的新文件名和旧文件名相同,跳过
if abs_filename == new_abs_filename:
self.log_text_area.insert(tk.END, "[INFO] 已重命名过,跳过({})\n".format(abs_filename))
continue
# 如果计算出的新文件名和旧文件名相同,但是新文件名已存在,将此文件移动到 ./backup/{time} 目录下
if os.path.exists(new_abs_filename):
self.log_text_area.insert(tk.END, "[WARN] 文件名经存在,跳过并移入备份文件夹({})\n".format(abs_filename))
move_file(abs_filename)
continue
# 重命名文件
os.rename(abs_filename, new_abs_filename)
self.log_text_area.insert(tk.END, "[INFO] 重命名:{} -> {}\n".format(abs_filename, new_abs_filename))
except IOError as err:
self.log_text_area.insert(tk.END, "[ERROR] {}\n".format(err))
# 置为不可修改状态
self.log_text_area.configure(state=tk.DISABLED)
def copy_log_btn_callback(self):
"""复制日志到剪贴板"""
self.root.clipboard_clear()
self.root.clipboard_append(self.log_text_area.get("1.0", tk.END))
messagebox.showinfo(title="info", message="Log has been copied to clipboard")
def clear_log_btn_callback(self):
"""清楚日志按钮"""
if messagebox.askyesno(title="clear log", message="Are you sure?"):
self.log_text_area.config(state=tk.NORMAL)
self.log_text_area.delete("1.0", tk.END)
self.log_text_area.config(state=tk.DISABLED)
| apache-2.0 | -435,194,561,010,551,500 | 49.688172 | 114 | 0.630781 | false |
ericspod/Eidolon | tests/meshtests/billboardtest.py | 1 | 1140 | # Eidolon Biomedical Framework
# Copyright (C) 2016-8 Eric Kerfoot, King's College London, all rights reserved
#
# This file is part of Eidolon.
#
# Eidolon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eidolon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program (LICENSE.txt). If not, see <http://www.gnu.org/licenses/>
from eidolon import vec3, FT_BB_POINT, BoundBox, PyVertexBuffer
nodes=[vec3(0,0,0),vec3(10.0/3,0,0),vec3(20.0/3,0,0),vec3(10,0,0)]
fig=mgr.callThreadSafe(mgr.scene.createFigure,"testBB","Default",FT_BB_POINT)
vb=PyVertexBuffer(nodes)
fig.fillData(vb,None,True)
mgr.controller.setSeeAllBoundBox(BoundBox(nodes))
mgr.repaint()
| gpl-3.0 | 3,215,951,023,605,023,000 | 41.846154 | 79 | 0.742982 | false |
mancoast/CPythonPyc_test | fail/314_test_normalization.py | 1 | 3162 | from test.support import run_unittest, open_urlresource
import unittest
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
if os.path.exists(TESTDATAFILE):
f = open(TESTDATAFILE, encoding='utf-8')
l = f.readline()
f.close()
if not unidata_version in l:
os.unlink(TESTDATAFILE)
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return "".join([chr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part1_data = {}
# Hit the exception early
try:
open_urlresource(TESTDATAURL, encoding="utf-8")
except IOError:
self.skipTest("Could not retrieve " + TESTDATAURL)
for line in open_urlresource(TESTDATAURL, encoding="utf-8"):
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = chr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', '\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 | -3,841,246,843,036,727,300 | 29.699029 | 89 | 0.504428 | false |
kaltura/server | alpha/scripts/utils/apiGrep.py | 1 | 4097 | #!/usr/bin/python
from optparse import OptionParser
import sys
import os
def isLineLogStart(curLine):
if len(curLine) < 20:
return False
if (curLine[4] == '-' and curLine[7] == '-' and curLine[10] == ' ' and
curLine[13] == ':' and curLine[16] == ':'):
return True
return False
def parseCmdLine():
parser = OptionParser(usage='%prog [OPTION]... PATTERN [FILE]...', add_help_option=False)
parser.add_option("--help", help="display this help and exit", action="help")
parser.add_option("-h", "--no-filename",
action="store_true", dest="noFilename", default=False,
help="suppress the file name prefix on output")
parser.add_option("-H", "--with-filename",
action="store_true", dest="withFilename", default=False,
help="print the file name for each match")
parser.add_option("--label", dest="stdinLabel", default="(standard input)", metavar="LABEL",
help="use LABEL as the standard input file name prefix")
parser.add_option("-i", "--ignore-case",
action="store_true", dest="ignoreCase", default=False,
help="ignore case distinctions")
parser.add_option("--match-any",
action="store_true", dest="matchAny", default=False,
help="match the pattern against any line (default is to match only starting log lines)")
parser.add_option("-v", "--invert-match",
action="store_true", dest="invertMatch", default=False,
help="select non-matching lines")
return parser.parse_args()
def shellQuote(s):
return "'" + s.replace("'", "'\\''") + "'"
def matchCaseSensitive(pattern, block):
return pattern in block
def matchCaseInsensitive(pattern, block):
return pattern in block.lower()
def processFileMatchStart(inputFile, pattern, prefix):
output = False
for curLine in inputFile:
logStart = isLineLogStart(curLine)
if output:
if not logStart:
print prefix + curLine.rstrip()
continue
output = False
if logStart and match(pattern, curLine):
print prefix + curLine.rstrip()
output = True
def processFileMatchAny(inputFile, pattern, prefix):
block = ''
for curLine in inputFile:
if isLineLogStart(curLine):
if match(pattern, block):
print prefix + block.rstrip().replace('\n', '\n' + prefix)
block = curLine
elif len(block) < 10485760: # 10MB
block += curLine
if match(pattern, block):
print prefix + block.rstrip().replace('\n', '\n' + prefix)
# parse the command line
(options, args) = parseCmdLine()
if len(args) < 1:
baseName = os.path.basename(__file__)
print 'Usage: python %s [OPTION]... PATTERN [FILE]...' % baseName
print 'Try `python %s --help` for more information.' % baseName
sys.exit(1)
pattern = args[0]
fileNames = args[1:]
if len(fileNames) == 0:
fileNames = ['-']
if options.withFilename:
outputFileName = True
elif options.noFilename:
outputFileName = False
else:
outputFileName = len(fileNames) > 1
if options.matchAny:
processFile = processFileMatchAny
else:
processFile = processFileMatchStart
if options.ignoreCase:
match = matchCaseInsensitive
pattern = pattern.lower()
else:
match = matchCaseSensitive
if options.invertMatch:
originalMatch = match
match = lambda p, b: not originalMatch(p, b)
prefix = ''
for fileName in fileNames:
if fileName.endswith('.gz'):
# using zcat | python is faster than using python's gzip module
params = [__file__, '--label=' + fileName]
if outputFileName:
params.append('-H')
if options.matchAny:
params.append('--match-any')
if options.ignoreCase:
params.append('-i')
if options.invertMatch:
params.append('-v')
params.append(pattern)
params = ' '.join(map(shellQuote, params))
cmdLine = "gzip -cd %s | python %s" % (shellQuote(fileName), params)
if os.system(cmdLine) != 0:
break
continue
if fileName == '-':
inputFile = sys.stdin
else:
inputFile = file(fileName, 'r')
# get the prefix
if outputFileName:
if fileName == '-':
prefix = options.stdinLabel + ':'
else:
prefix = '%s:' % fileName
try:
processFile(inputFile, pattern, prefix)
except IOError: # broken pipe
sys.exit(1)
| agpl-3.0 | 4,614,323,749,777,093,000 | 28.056738 | 95 | 0.672687 | false |
flgiordano/netcash | +/google-cloud-sdk/lib/surface/sql/instances/patch.py | 1 | 13800 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Updates the settings of a Cloud SQL instance."""
from googlecloudsdk.api_lib.sql import errors
from googlecloudsdk.api_lib.sql import instances
from googlecloudsdk.api_lib.sql import operations
from googlecloudsdk.api_lib.sql import validate
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import resource_printer
from googlecloudsdk.core.console import console_io
from googlecloudsdk.third_party.apitools.base.py import encoding
class _BasePatch(object):
"""Updates the settings of a Cloud SQL instance."""
@classmethod
def Args(cls, parser):
"""Args is called by calliope to gather arguments for this command.
Please add arguments in alphabetical order except for no- or a clear-
pair for that argument which can follow the argument itself.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'--activation-policy',
required=False,
choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
help='The activation policy for this instance. This specifies when the '
'instance should be activated and is applicable only when the '
'instance state is RUNNABLE.')
parser.add_argument(
'--assign-ip',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='The instance must be assigned an IP address.')
gae_apps_group = parser.add_mutually_exclusive_group()
gae_apps_group.add_argument(
'--authorized-gae-apps',
type=arg_parsers.ArgList(min_length=1),
metavar='APP',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='A list of App Engine app IDs that can access this instance.')
gae_apps_group.add_argument(
'--clear-gae-apps',
required=False,
action='store_true',
help=('Specified to clear the list of App Engine apps that can access '
'this instance.'))
networks_group = parser.add_mutually_exclusive_group()
networks_group.add_argument(
'--authorized-networks',
type=arg_parsers.ArgList(min_length=1),
metavar='NETWORK',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='The list of external networks that are allowed to connect to the '
'instance. Specified in CIDR notation, also known as \'slash\' '
'notation (e.g. 192.168.100.0/24).')
networks_group.add_argument(
'--clear-authorized-networks',
required=False,
action='store_true',
help='Clear the list of external networks that are allowed to connect '
'to the instance.')
backups_group = parser.add_mutually_exclusive_group()
backups_group.add_argument(
'--backup-start-time',
required=False,
help='The start time of daily backups, specified in the 24 hour format '
'- HH:MM, in the UTC timezone.')
backups_group.add_argument(
'--no-backup',
required=False,
action='store_true',
help='Specified if daily backup should be disabled.')
database_flags_group = parser.add_mutually_exclusive_group()
database_flags_group.add_argument(
'--database-flags',
type=arg_parsers.ArgDict(min_length=1),
metavar='FLAG=VALUE',
required=False,
action=arg_parsers.FloatingListValuesCatcher(),
help='A comma-separated list of database flags to set on the instance. '
'Use an equals sign to separate flag name and value. Flags without '
'values, like skip_grant_tables, can be written out without a value '
'after, e.g., `skip_grant_tables=`. Use on/off for '
'booleans. View the Instance Resource API for allowed flags. '
'(e.g., `--database-flags max_allowed_packet=55555,skip_grant_tables=,'
'log_output=1`)')
database_flags_group.add_argument(
'--clear-database-flags',
required=False,
action='store_true',
help='Clear the database flags set on the instance. '
'WARNING: Instance will be restarted.')
parser.add_argument(
'--enable-bin-log',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Enable binary log. If backup configuration is disabled, binary '
'log should be disabled as well.')
parser.add_argument(
'--follow-gae-app',
required=False,
help='The App Engine app this instance should follow. It must be in '
'the same region as the instance. '
'WARNING: Instance may be restarted.')
parser.add_argument(
'--gce-zone',
required=False,
help='The preferred Compute Engine zone (e.g. us-central1-a, '
'us-central1-b, etc.). '
'WARNING: Instance may be restarted.')
parser.add_argument(
'instance',
completion_resource='sql.instances',
help='Cloud SQL instance ID.')
parser.add_argument(
'--pricing-plan',
'-p',
required=False,
choices=['PER_USE', 'PACKAGE'],
help='The pricing plan for this instance.')
parser.add_argument(
'--replication',
required=False,
choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
help='The type of replication this instance uses.')
parser.add_argument(
'--require-ssl',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='mysqld should default to \'REQUIRE X509\' for users connecting '
'over IP.')
parser.add_argument(
'--tier',
'-t',
required=False,
help='The tier of service for this instance, for example D0, D1. '
'WARNING: Instance will be restarted.')
parser.add_argument(
'--enable-database-replication',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Enable database replication. Applicable only '
'for read replica instance(s). WARNING: Instance will be restarted.')
parser.add_argument(
'--async',
action='store_true',
help='Do not wait for the operation to complete.')
parser.add_argument(
'--diff',
action='store_true',
help='Show what changed as a result of the update.')
def Display(self, args, result):
"""Display prints information about what just happened to stdout.
Args:
args: The same as the args in Run.
result: A dict object representing the operations resource describing the
patch operation if the patch was successful.
"""
if args.diff:
resource_printer.Print(result, 'text')
def _PrintAndConfirmWarningMessage(self, args):
"""Print and confirm warning indicating the effect of applying the patch."""
continue_msg = None
if any([args.tier, args.database_flags, args.clear_database_flags,
args.enable_database_replication is not None]):
continue_msg = ('WARNING: This patch modifies a value that requires '
'your instance to be restarted. Submitting this patch '
'will immediately restart your instance if it\'s running.'
)
else:
if any([args.follow_gae_app, args.gce_zone]):
continue_msg = ('WARNING: This patch modifies the zone your instance '
'is set to run in, which may require it to be moved. '
'Submitting this patch will restart your instance '
'if it is running in a different zone.')
if continue_msg and not console_io.PromptContinue(continue_msg):
raise exceptions.ToolException('canceled by the user.')
def _GetConfirmedClearedFields(self, args, patch_instance):
"""Clear fields according to args and confirm with user."""
cleared_fields = []
if args.clear_gae_apps:
cleared_fields.append('settings.authorizedGaeApplications')
if args.clear_authorized_networks:
cleared_fields.append('settings.ipConfiguration.authorizedNetworks')
if args.clear_database_flags:
cleared_fields.append('settings.databaseFlags')
log.status.write(
'The following message will be used for the patch API method.\n')
log.status.write(
encoding.MessageToJson(
patch_instance, include_fields=cleared_fields)+'\n')
self._PrintAndConfirmWarningMessage(args)
return cleared_fields
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Patch(_BasePatch, base.Command):
"""Updates the settings of a Cloud SQL instance."""
@errors.ReraiseHttpException
def Run(self, args):
"""Updates settings of a Cloud SQL instance using the patch api method.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the patch
operation if the patch was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
original_instance_resource = sql_client.instances.Get(
instance_ref.Request())
patch_instance = instances.InstancesV1Beta3.ConstructInstanceFromArgs(
sql_messages, args, original=original_instance_resource)
patch_instance.project = instance_ref.project
patch_instance.instance = instance_ref.instance
cleared_fields = self._GetConfirmedClearedFields(args, patch_instance)
with sql_client.IncludeFields(cleared_fields):
result = sql_client.instances.Patch(patch_instance)
operation_ref = resources.Create(
'sql.operations',
operation=result.operation,
project=instance_ref.project,
instance=instance_ref.instance,
)
if args.async:
return sql_client.operations.Get(operation_ref.Request())
operations.OperationsV1Beta3.WaitForOperation(
sql_client, operation_ref, 'Patching Cloud SQL instance')
log.UpdatedResource(instance_ref)
if args.diff:
changed_instance_resource = sql_client.instances.Get(
instance_ref.Request())
return resource_printer.ResourceDiff(
original_instance_resource, changed_instance_resource)
return sql_client.instances.Get(instance_ref.Request())
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class PatchBeta(_BasePatch, base.Command):
"""Updates the settings of a Cloud SQL instance."""
@errors.ReraiseHttpException
def Run(self, args):
"""Updates settings of a Cloud SQL instance using the patch api method.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the patch
operation if the patch was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
original_instance_resource = sql_client.instances.Get(
instance_ref.Request())
patch_instance = instances.InstancesV1Beta3.ConstructInstanceFromArgs(
sql_messages, args, original=original_instance_resource)
patch_instance.project = instance_ref.project
patch_instance.name = instance_ref.instance
cleared_fields = self._GetConfirmedClearedFields(args, patch_instance)
with sql_client.IncludeFields(cleared_fields):
result_operation = sql_client.instances.Patch(patch_instance)
operation_ref = resources.Create(
'sql.operations',
operation=result_operation.name,
project=instance_ref.project,
instance=instance_ref.instance,
)
if args.async:
return sql_client.operations.Get(operation_ref.Request())
operations.OperationsV1Beta4.WaitForOperation(
sql_client, operation_ref, 'Patching Cloud SQL instance')
log.UpdatedResource(instance_ref)
if args.diff:
changed_instance_resource = sql_client.instances.Get(
instance_ref.Request())
return resource_printer.ResourceDiff(
original_instance_resource, changed_instance_resource)
return sql_client.instances.Get(instance_ref.Request())
| bsd-3-clause | 5,171,648,414,095,974,000 | 37.547486 | 80 | 0.672681 | false |
tensorflow/estimator | tensorflow_estimator/python/estimator/canned/dnn_test_fc_v2.py | 1 | 19054 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py with feature_column_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from unittest.mock import patch
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column_v2
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import dnn_testing_utils
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifierV2(*args, **kwargs)
class DNNModelFnV2Test(dnn_testing_utils.BaseDNNModelFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(
self, dnn.dnn_model_fn_v2, fc_impl=feature_column_v2)
class DNNLogitFnV2Test(dnn_testing_utils.BaseDNNLogitFnTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(
self, dnn.dnn_logit_fn_builder_v2, fc_impl=feature_column_v2)
class DNNWarmStartingV2Test(dnn_testing_utils.BaseDNNWarmStartingTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(
self, _dnn_classifier_fn, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNClassifierEvaluateV2Test(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierPredictV2Test(dnn_testing_utils.BaseDNNClassifierPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
class DNNClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressorV2(*args, **kwargs)
class DNNRegressorEvaluateV2Test(dnn_testing_utils.BaseDNNRegressorEvaluateTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorPredictV2Test(dnn_testing_utils.BaseDNNRegressorPredictTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
class DNNRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, n_classes, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNClassifierV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNTrainingMode(tf.test.TestCase):
"""Tests that training mode propagates to feature columns correctly."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
self._label_dimension = 1
self._batch_size = 10
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _create_data(self):
data = np.linspace(
0., 2., self._batch_size * self._label_dimension, dtype=np.float32)
return data.reshape(self._batch_size, self._label_dimension)
def _get_estimator(self):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(self._label_dimension,))
]
return dnn.DNNRegressorV2(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=self._label_dimension,
model_dir=self._model_dir)
def test_train_vs_eval_mode(self):
data = self._create_data()
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=self._batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=self._batch_size, shuffle=False)
est = self._get_estimator()
with patch.object(
tf.compat.v2.keras.layers.DenseFeatures, 'call',
return_value=data) as mock_dense_features_call:
est.train(train_input_fn, steps=10)
est.evaluate(eval_input_fn)
train_args, eval_args = mock_dense_features_call.call_args_list
# DenseFeature should have been called with training = True in train.
_, train_training_kwarg = train_args
self.assertTrue(train_training_kwarg['training'])
# DenseFeature should have been called with training = False in eval.
_, eval_training_kwarg = eval_args
self.assertFalse(eval_training_kwarg['training'])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -8,548,565,812,858,429,000 | 35.293333 | 80 | 0.65031 | false |
delete/estofadora | estofadora/bills/views.py | 1 | 1602 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from .forms import BillForm
from .models import Bill
@login_required
def new(request):
context = {}
if request.method == 'POST':
form = BillForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Cadastrada com sucesso!')
return redirect(reverse('bills:new'))
else:
form = BillForm()
context['form'] = form
context['section'] = 'bill_new'
return render(request, 'bills/new.html', context)
@login_required
def list(request):
context = {}
if request.method == 'POST':
bill_name = request.POST.get('name')
bills = Bill.objects.filter(
name__icontains=bill_name
).order_by('-date_to_pay')
print(bills)
else:
bills = Bill.objects.all().order_by('-date_to_pay')
context['bills'] = bills
context['section'] = 'bills'
return render(request, 'bills/list.html', context)
@login_required
def delete(request, pk):
bill = get_object_or_404(Bill, pk=pk)
bill.delete()
messages.success(request, 'Conta removida com sucesso!')
return redirect(reverse('bills:list'))
@login_required
def mark_as_paid(request, pk):
bill = get_object_or_404(Bill, pk=pk)
bill.is_paid = True
bill.save()
messages.success(request, 'Conta marcada como paga!')
return redirect(reverse('bills:list'))
| mit | 4,154,725,326,265,776,600 | 22.910448 | 64 | 0.644195 | false |
theepicsnail/SuperBot2 | Core.py | 1 | 5362 | from PluginManager import PluginManager
from PluginDispatcher import PluginDispatcher
from Configuration import ConfigFile
from Util import call
from re import match
from sys import path
from os import getcwd
from Util import dictJoin
from Logging import LogFile
path.append(getcwd())
log = LogFile("Core")
class Core:
_PluginManager = None
_PluginDispatcher = None
_ResponseObject = None
_Connector = None
_Config = None
def _LoadConnector(self, ConName):
try:
con = __import__("%s.Connector" % ConName,
globals(), locals(), "Connector")
log.debug("Got connector:", con)
cls = getattr(con, "Connector", None)
except :
log.exception("Exception while loading connector")
cls = None
log.debug("Connectors class", cls)
if cls:
c = cls()
log.debug("Connector constructed")
return c
log.critical("No connector")
return cls
def HandleEvent(self, event):
log.dict(event,"HandleEvent")
pm = self._PluginManager
if not pm:
log.warning("No plugin manager")
return
pd = self._PluginDispatcher
if not pd:
log.warning("No plugin dispatcher")
return
ro = self._ResponseObject
if not ro:
log.warning("no response object")
pass
matches = pm.GetMatchingFunctions(event)
log.debug("Matched %i hook(s)." % len(matches))
for inst, func, args, servs in matches:
newEvent = dictJoin(event, dictJoin(args,
{"self": inst, "response": ro}))
log.debug("Services found for plugin:", servs)
if servs:
log.debug("Event before processing:", newEvent)
servDict={}
servDict["event"]=newEvent
servDict["pm"]=self._PluginManager
servDict["pd"]=self._PluginDispatcher
servDict["ro"]=self._ResponseObject
servDict["c"]=self._Connector
servDict["core"]=self
servDict["config"]=self._Config
for servName in servs:
serv = pm.GetService(servName)
log.debug("Processing service",servName,serv)
call(serv.onEvent,servDict)
if servs:
log.dict(newEvent,"Event after processing:")
#issue 5 fix goes here
newEvent.update(servDict)
pd.Enqueue((func, newEvent))
def __init__(self):
self._Config = ConfigFile("Core")
if not self._Config:
log.critical("No log file loaded!")
return
ConName = self._Config["Core", "Provider"]
if ConName == None:
log.critical("No Core:Provider in Core.cfg")
del self._Connector
return
self._Connector=self._LoadConnector(ConName)
if self._Connector:
self._PluginManager = PluginManager(ConName)
self._PluginDispatcher = PluginDispatcher()
self._Connector.SetEventHandler(self.HandleEvent)
self._ResponseObject = self._Connector.GetResponseObject()
self._PluginDispatcher.SetResponseHandler(
self._Connector.HandleResponse)
def Start(self):
if not self._Connector:
log.warning("Could not start, no connector.")
return
log.debug("Starting")
log.debug("Auto loading plugins")
self.AutoLoad()
log.debug("Auto load complete")
if self._Connector:
log.debug("Connector starting")
self._Connector.Start()
#else log error?
def Stop(self):
log.debug("Stopping")
if self._PluginDispatcher:
self._PluginDispatcher.Stop()
if self._PluginManager:
self._PluginManager.Stop()
if self._Connector:
self._Connector.Stop()
def AutoLoad(self):
if not self._PluginManager:
return
pm = self._PluginManager
log.note("Starting autoload", "Root:" + pm.root)
cf = ConfigFile(pm.root, "Autoload")
lines = ["Configuration:"]
for i in cf:
lines.append(i)
for j in cf[i]:
lines.append(" %s=%s"%(j,cf[i,j]))
log.debug(*lines)
if cf:
log.debug("Autoloading plugins.")
names = cf["Plugins", "Names"]
log.debug("Autoloading plugins", names)
if names:
for name in names.split():
pm.LoadPlugin(name)
log.debug("Autoloading finished.")
pd=self._PluginDispatcher
handler = pd.GetResponseHandler()
log.debug("Updating dedicated thread pool",self._ResponseObject,handler)
pd.EnsureDedicated(pm.GetDedicated(),self._ResponseObject,handler)
else:
log.note("No Autoload configuration file")
if __name__ == "__main__":
try:
c = Core()
try:
c.Start()
except:
log.exception("Exception while starting.")
c.Stop()
except:
log.exception("Exception while stopping.")
log.debug("End of core")
| mit | -4,258,690,189,499,016,000 | 29.99422 | 88 | 0.55166 | false |
ryanpstauffer/market-vis | marketvis/quotes.py | 1 | 5030 | # -*- coding: utf-8 -*-
"""
[Python 2.7 (Mayavi is not yet compatible with Python 3+)]
Created on Wed Dec 16 22:44:15 2015
@author: Ryan Stauffer
https://github.com/ryanpstauffer/market-vis
[This module referenced http://www.theodor.io/scraping-google-finance-data-using-pandas/]
Market Visualization Prototype
Quotes Module
"""
from datetime import datetime, date
import pandas as pd
import json
import urllib
import urllib2
import os
def getIntradayData(ticker, interval_seconds=61, num_days=10):
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/getprices?q={0}'.format(ticker.upper())
urlString += "&i={0}&p={1}d&f=d,c".format(interval_seconds,num_days)
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
r = r.splitlines()
# Split each line by a comma, starting at the 8th line
r = [line.split(',') for line in r[7:]]
# Save data in Pandas DataFrame
df = pd.DataFrame(r, columns=['Datetime',ticker])
# Convert UNIX to Datetime format
df['Datetime'] = df['Datetime'].apply(lambda x: datetime.fromtimestamp(int(x[1:])))
df.index = df['Datetime']
return df[ticker]
def getDailyData(ticker, startDate, endDate=date.today()):
''' Daily quotes from Google Finance API. Date format='yyyy-mm-dd' '''
ticker = ticker.upper()
urlString = "http://www.google.com/finance/historical?q={0}".format(ticker)
urlString += "&startdate={0}&enddate={1}&output=csv".format(
startDate.strftime('%b %d, %Y'),endDate.strftime('%b %d, %Y'))
#Convert URL output to dataframe
df = pd.read_csv(urllib.urlopen(urlString))
# Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%d-%b-%y'))
#Index by date
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
return df
def getLastPrice(ticker):
'''Returns last price and date time of a given ticker (from Google Finance API)'''
# Specify URL string based on function inputs.
urlString = 'http://www.google.com/finance/info?client=ig&q={0}'.format(ticker.upper())
# Request the text, and split by each line
r = urllib2.urlopen(urllib2.Request(urlString)).read()
obj = json.loads(r[3:])
print(obj)
price = float(obj[0]['l'])
return price
def buildDailyPriceData(tickerList, startDate, endDate):
print('Pulling Market Data for S&P 500 from {0} to {1}'.format(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
#Build SP500 daily price data (for saving)
firstTicker = tickerList[0]
print(firstTicker)
firstTickerData = getDailyData(firstTicker, startDate, endDate)
firstTickerData.rename(columns={'Close' : firstTicker}, inplace = True)
df = firstTickerData[firstTicker]
for ticker in tickerList[1:]:
print(ticker)
newTicker = getDailyData(ticker, startDate, endDate)
if not newTicker.empty:
newTicker.rename(columns={'Close' : ticker}, inplace = True)
df = pd.concat([df, newTicker[ticker]], axis=1, join='outer')
#Google returns data w/ most recent at the top, this puts data in chrono order
stockPrices = df.sort_index()
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def buildDummyData():
'''Builds Daily Price Data from a backup .csv file
Used for offline testing purposes
'''
#Select Dates
startDate = datetime.strptime('20120101', '%Y%m%d')
endDate = datetime.strptime('20130101', '%Y%m%d')
#Load dataset from .csv
print("Pulling Market Data from .csv")
dataLoc = os.path.join(os.path.dirname(__file__),"Resources/SP500_daily_price_data.csv")
df = pd.read_csv(dataLoc)
#Convert strings to Datetime format
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df.index = df[df.columns[0]]
df.drop(df.columns[0], axis=1, inplace=True)
#Build Price Table
stockPrices = df[startDate:endDate]
print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d')))
return stockPrices
def createIndexedPricing(stockPrices, startingIndexValue):
'''Takes a stock prices tables and converts to indexed pricing
(i.e. all prices are relative based on a common starting index value)
Inputs:
stockPrices => a panda DataFrame
startingIndexValue => the value that all prices will start at
'''
#Build Returns Table
stockReturns = stockPrices.pct_change(1)
#Build Indexed Price Table (indexed to 100)
indexedPrices = stockReturns + 1
indexedPrices.iloc[0] = startingIndexValue
indexedPrices = indexedPrices.cumprod(axis=0)
return indexedPrices | mit | -1,957,792,777,954,780,700 | 35.456522 | 146 | 0.669384 | false |
caspartse/QQ-Groups-Spider | vendor/pyexcel/constants.py | 1 | 3090 | """
pyexcel.constants
~~~~~~~~~~~~~~~~~~~
Constants appeared in pyexcel
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
# flake8: noqa
DEFAULT_NA = ''
DEFAULT_NAME = 'pyexcel sheet'
DEFAULT_SHEET_NAME = 'pyexcel_sheet1'
MESSAGE_WARNING = "We do not overwrite files"
MESSAGE_WRITE_ERROR = "Cannot write sheet"
MESSAGE_ERROR_02 = "No valid parameters found!"
MESSAGE_DATA_ERROR_NO_SERIES = "No column names or row names found"
MESSAGE_DATA_ERROR_EMPTY_COLUMN_LIST = "Column list is empty. Do not waste resource"
MESSAGE_DATA_ERROR_COLUMN_LIST_INTEGER_TYPE = "Column list should be a list of integers"
MESSAGE_DATA_ERROR_COLUMN_LIST_STRING_TYPE = "Column list should be a list of integers"
MESSAGE_INDEX_OUT_OF_RANGE = "Index out of range"
MESSAGE_DATA_ERROR_EMPTY_CONTENT = "Nothing to be pasted!"
MESSAGE_DATA_ERROR_DATA_TYPE_MISMATCH = "Data type mismatch"
MESSAGE_DATA_ERROR_ORDEREDDICT_IS_EXPECTED = "Please give a ordered list"
MESSAGE_DEPRECATED_ROW_COLUMN = "Deprecated usage. Please use [row, column]"
MESSAGE_DEPRECATED_OUT_FILE = "Depreciated usage of 'out_file'. please use dest_file_name"
MESSAGE_DEPRECATED_CONTENT = "Depreciated usage of 'content'. please use file_content"
MESSAGE_NOT_IMPLEMENTED_01 = "Please use attribute row or column to extend sheet"
MESSAGE_NOT_IMPLEMENTED_02 = "Confused! What do you want to put as column names"
MESSAGE_READONLY = "This attribute is readonly"
MESSAGE_ERROR_NO_HANDLER = "No suitable plugins imported or installed"
MESSAGE_UNKNOWN_IO_OPERATION = "Internal error: an illegal source action"
MESSAGE_UPGRADE = "Please upgrade the plugin '%s' according to \
plugin compactibility table."
_IMPLEMENTATION_REMOVED = "Deprecated since 0.3.0! Implementation removed"
IO_FILE_TYPE_DOC_STRING = """
Get/Set data in/from {0} format
You could obtain content in {0} format by dot notation::
{1}.{0}
And you could as well set content by dot notation::
{1}.{0} = the_io_stream_in_{0}_format
if you need to pass on more parameters, you could use::
{1}.get_{0}(**keywords)
{1}.set_{0}(the_io_stream_in_{0}_format, **keywords)
"""
OUT_FILE_TYPE_DOC_STRING = """
Get data in {0} format
You could obtain content in {0} format by dot notation::
{1}.{0}
if you need to pass on more parameters, you could use::
{1}.get_{0}(**keywords)
"""
IN_FILE_TYPE_DOC_STRING = """
Set data in {0} format
You could set content in {0} format by dot notation::
{1}.{0}
if you need to pass on more parameters, you could use::
{1}.set_{0}(the_io_stream_in_{0}_format, **keywords)
"""
VALID_SHEET_PARAMETERS = ['name_columns_by_row',
'name_rows_by_column',
'colnames',
'rownames',
'transpose_before',
'transpose_after']
# for sources
# targets
SOURCE = 'source'
SHEET = 'sheet'
BOOK = 'book'
# actions
READ_ACTION = 'read'
WRITE_ACTION = 'write'
RW_ACTION = 'read-write'
FILE_TYPE_NOT_SUPPORTED_FMT = "File type '%s' is not supported for %s."
| mit | 4,937,172,543,752,419,000 | 31.1875 | 90 | 0.680583 | false |
rarcotvmw/capirca | lib/pcap.py | 1 | 15928 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pcap filter generator.
This generate a pcap packet filter expression that either:
1) Matches (i.e., captures), the packets that match the ACCEPT clauses
specified in a given policy, or
2) Matches the packets that match opposite of that, i.e., the DENY or REJECT
clauses.
Support tcp flags matching and icmptypes, including ipv6/icmpv6, but not much
else past the standard addres, port, and protocol conditions.
Note that this is still alpha and will likely require more testing prior to
having more confidence in it.
Stolen liberally from packetfilter.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from lib import aclgenerator
from lib import nacaddr
import logging
class Error(Exception):
"""Base error class."""
class UnsupportedActionError(Error):
"""Raised when we see an unsupported action."""
class UnsupportedTargetOption(Error):
"""Raised when we see an unsupported option."""
class Term(aclgenerator.Term):
"""Generate pcap filter to match a policy term."""
_PLATFORM = 'pcap'
_ACTION_TABLE = {
'accept': '',
'deny': '',
'reject': '',
'next': '',
}
_TCP_FLAGS_TABLE = {
'syn': 'tcp-syn',
'ack': 'tcp-ack',
'fin': 'tcp-fin',
'rst': 'tcp-rst',
'urg': 'tcp-urg',
'psh': 'tcp-push',
'all': '(tcp-syn|tcp-ack|tcp-fin|tcp-rst|tcp-urg|tcp-push)',
'none': '(tcp-syn&tcp-ack&tcp-fin&tcp-rst&tcp-urg&tcp-push)',
}
_PROTO_TABLE = {
'ah': 'proto \\ah',
'esp': 'proto \\esp',
'icmp': 'proto \\icmp',
'icmpv6': 'icmp6',
'ip': 'proto \\ip',
'ip6': 'ip6',
'igmp': 'proto \\igmp',
'igrp': 'igrp',
'pim': 'proto \\pim',
'tcp': 'proto \\tcp',
'udp': 'proto \\udp',
# bpf supports "\vrrp", but some winpcap version dont' recognize it,
# so use the IANA protocol number for it:
'vrrp': 'proto 112',
'hopopt': 'ip6 protochain 0',
}
def __init__(self, term, filter_name, af='inet', direction=''):
"""Setup a new term.
Args:
term: A policy.Term object to represent in packetfilter.
filter_name: The name of the filter chan to attach the term to.
af: Which address family ('inet' or 'inet6') to apply the term to.
direction: Direction of the flow.
Raises:
aclgenerator.UnsupportedFilterError: Filter is not supported.
"""
super(Term, self).__init__(term)
self.term = term # term object
self.filter = filter_name # actual name of filter
self.options = []
self.default_action = 'deny'
self.af = af
self.direction = direction
def __str__(self):
"""Render config output from this term object."""
# Verify platform specific terms. Skip whole term if platform does not
# match.
if self.term.platform:
if self._PLATFORM not in self.term.platform:
return ''
if self.term.platform_exclude:
if self._PLATFORM in self.term.platform_exclude:
return ''
conditions = []
# if terms does not specify action, use filter default action
if not self.term.action:
self.term.action[0].value = self.default_action
if str(self.term.action[0]) not in self._ACTION_TABLE:
raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
'\n', self.term.name, self.term.action[0],
'action not currently supported.'))
# source address
term_saddrs = self._CheckAddressAf(self.term.source_address)
if not term_saddrs:
logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
direction='source',
af=self.af))
return ''
conditions.append(self._GenerateAddrStatement(
term_saddrs, self.term.source_address_exclude))
# destination address
term_daddrs = self._CheckAddressAf(self.term.destination_address)
if not term_daddrs:
logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
direction='destination',
af=self.af))
return ''
conditions.append(self._GenerateAddrStatement(
term_daddrs, self.term.destination_address_exclude))
# protocol
if self.term.protocol_except:
raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
'\n', self.term.name,
'protocol_except logic not currently supported.'))
conditions.append(self._GenerateProtoStatement(self.term.protocol))
conditions.append(self._GeneratePortStatement(
self.term.source_port, 'src'))
conditions.append(self._GeneratePortStatement(
self.term.destination_port, 'dst'))
# icmp-type
icmp_types = ['']
if self.term.icmp_type:
if self.af != 'mixed':
af = self.af
elif self.term.protocol == ['icmp']:
af = 'inet'
elif self.term.protocol == ['icmp6']:
af = 'inet6'
else:
raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
'\n', self.term.name,
'icmp protocol is not defined or not supported.'))
icmp_types = self.NormalizeIcmpTypes(
self.term.icmp_type, self.term.protocol, af)
if 'icmp' in self.term.protocol:
conditions.append(self._GenerateIcmpType(icmp_types,
self.term.icmp_code))
# tcp options
if 'tcp' in self.term.protocol:
conditions.append(self._GenerateTcpOptions(self.term.option))
cond = Term.JoinConditionals(conditions, 'and')
# Note that directionally-based pcap filter requires post-processing to
# replace 'localhost' with whatever the IP(s) of the local machine happen
# to be. This bit of logic ensure there's a placeholder with the
# appropriate booleans around it. We also have to check that there exists
# some form of condition already, else we'll end up with something overly
# broad like 'dst net localhost' (e.g., 'default-deny').
if cond and self.direction == 'in':
cond = Term.JoinConditionals(['dst net localhost', cond], 'and')
elif cond and self.direction == 'out':
cond = Term.JoinConditionals(['src net localhost', cond], 'and')
return cond + '\n'
def _CheckAddressAf(self, addrs):
"""Verify that the requested address-family matches the address's family."""
if not addrs:
return ['any']
if self.af == 'mixed':
return addrs
af_addrs = []
af = self.NormalizeAddressFamily(self.af)
for addr in addrs:
if addr.version == af:
af_addrs.append(addr)
return af_addrs
@staticmethod
def JoinConditionals(condition_list, operator):
"""Join conditionals using the specified operator.
Filters out empty elements and blank strings.
Args:
condition_list: a list of str()-able items to join.
operator: the join string.
Returns:
A string consisting of the joined elements. If all elements are False
or whitespace-only, the empty string.
"""
condition_list = filter(None, condition_list)
condition_list = [str(x).strip(' ') for x in condition_list
if str(x).strip()]
if not condition_list:
return ''
op = ' %s ' % (operator)
res = '(%s)' % (op.join(condition_list))
return res
def _GenerateAddrStatement(self, addrs, exclude_addrs):
addrlist = []
for d in nacaddr.CollapseAddrListRecursive(addrs):
if d != 'any' and str(d) != '::/0':
addrlist.append('dst net %s' % (d))
excludes = []
if exclude_addrs:
for d in nacaddr.CollapseAddrListRecursive(exclude_addrs):
if d != 'any' and str(d) != '::/0':
excludes.append('not dst net %s' % (d))
else:
# excluding 'any' doesn't really make sense ...
return ''
if excludes:
return Term.JoinConditionals(
[Term.JoinConditionals(addrlist, 'or'),
Term.JoinConditionals(excludes, 'or')], 'and not')
else:
return Term.JoinConditionals(addrlist, 'or')
def _GenerateProtoStatement(self, protocols):
return Term.JoinConditionals(
[self._PROTO_TABLE[p] for p in protocols], 'or')
def _GeneratePortStatement(self, ports, direction):
conditions = []
# term.destination_port is a list of tuples containing the start and end
# ports of the port range. In the event it is a single port, the start
# and end ports are the same.
for port_tuple in ports:
if port_tuple[0] == port_tuple[1]:
conditions.append('%s port %s' % (direction, port_tuple[0]))
else:
conditions.append('%s portrange %s-%s' % (
direction, port_tuple[0], port_tuple[1]))
return Term.JoinConditionals(conditions, 'or')
def _GenerateTcpOptions(self, options):
opts = [str(x) for x in options]
tcp_flags_set = []
tcp_flags_check = []
for next_opt in opts:
if next_opt == 'tcp-established':
tcp_flags_set.append(self._TCP_FLAGS_TABLE['ack'])
tcp_flags_check.extend([self._TCP_FLAGS_TABLE['ack']])
else:
# Iterate through flags table, and create list of tcp-flags to append
for next_flag in self._TCP_FLAGS_TABLE:
if next_opt.find(next_flag) == 0:
tcp_flags_check.append(self._TCP_FLAGS_TABLE.get(next_flag))
tcp_flags_set.append(self._TCP_FLAGS_TABLE.get(next_flag))
if tcp_flags_check:
return '(tcp[tcpflags] & (%s) == (%s))' % ('|'.join(tcp_flags_check),
'|'.join(tcp_flags_set))
return ''
def _GenerateIcmpType(self, icmp_types, icmp_code):
rtr_str = ''
if icmp_types:
code_strings = ['']
if icmp_code:
code_strings = [' and icmp[icmpcode] == %d' % code for
code in icmp_code]
rtr_str = Term.JoinConditionals(
['icmp[icmptype] == %d%s' % (x, y) for y in code_strings for
x in icmp_types], 'or')
return rtr_str
class PcapFilter(aclgenerator.ACLGenerator):
"""Generates filters and terms from provided policy object.
Note that since pcap isn't actually a firewall grammar, this generates a
filter that only matches matches that which would be accepted by the
specified policy.
"""
_PLATFORM = 'pcap'
_DEFAULT_PROTOCOL = 'all'
SUFFIX = '.pcap'
_TERM = Term
def __init__(self, *args, **kwargs):
"""Initialize a PcapFilter generator.
Takes standard ACLGenerator arguments, as well as an 'invert' kwarg. If
this argument is true, the pcap filter will be reversed, such that it
matches all those packets that would be denied by the specified policy.
"""
self._invert = False
if 'invert' in kwargs:
self._invert = kwargs['invert']
del kwargs['invert']
super(PcapFilter, self).__init__(*args, **kwargs)
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super(
PcapFilter, self)._BuildTokens()
supported_tokens |= {'logging', 'icmp_code'}
supported_tokens -= {'verbatim'}
supported_sub_tokens.update(
{'action': {'accept', 'deny', 'reject', 'next'},
'option': {
'tcp-established',
'established',
'syn',
'ack',
'fin',
'rst',
'urg',
'psh',
'all',
'none'},
})
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
self.pcap_policies = []
current_date = datetime.datetime.utcnow().date()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
good_afs = ['inet', 'inet6', 'mixed']
good_options = ['in', 'out']
direction = ''
for header, terms in pol.filters:
filter_type = None
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)[1:]
filter_name = header.FilterName(self._PLATFORM)
# ensure all options after the filter name are expected
for opt in filter_options:
if opt not in good_afs + good_options:
raise UnsupportedTargetOption('%s %s %s %s' % (
'\nUnsupported option found in', self._PLATFORM,
'target definition:', opt))
if 'in' in filter_options:
direction = 'in'
elif 'out' in filter_options:
direction = 'out'
# Check for matching af
for address_family in good_afs:
if address_family in filter_options:
# should not specify more than one AF in options
if filter_type is not None:
raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
'\nMay only specify one of', good_afs, 'in filter options:',
filter_options))
filter_type = address_family
if filter_type is None:
filter_type = 'mixed'
# add the terms
accept_terms = []
deny_terms = []
term_names = set()
for term in terms:
if term.name in term_names:
raise aclgenerator.DuplicateTermError(
'You have a duplicate term: %s' % term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info('INFO: Term %s in policy %s expires '
'in less than two weeks.', term.name, filter_name)
if term.expiration <= current_date:
logging.warn('WARNING: Term %s in policy %s is expired and '
'will not be rendered.', term.name, filter_name)
continue
if not term:
continue
if term.action[0] == 'accept':
accept_terms.append(self._TERM(term, filter_name, filter_type,
direction))
elif term.action[0] == 'deny' or term.action[0] == 'reject':
deny_terms.append(self._TERM(term, filter_name, filter_type,
direction))
self.pcap_policies.append((header, filter_name, filter_type, accept_terms,
deny_terms))
def __str__(self):
"""Render the output of the PF policy into config."""
target = []
for (unused_header, unused_filter_name, unused_filter_type, accept_terms,
deny_terms) in self.pcap_policies:
accept = []
for term in accept_terms:
term_str = str(term)
if term_str:
accept.append(str(term))
accept_clause = Term.JoinConditionals(accept, 'and')
deny = []
for term in deny_terms:
term_str = str(term)
if term_str:
deny.append(str(term))
deny_clause = Term.JoinConditionals(deny, 'and')
if self._invert:
target.append(
Term.JoinConditionals([deny_clause, accept_clause], 'and not'))
else:
target.append(
Term.JoinConditionals([accept_clause, deny_clause], 'and not'))
return '\nor\n'.join(target) + '\n'
| apache-2.0 | 6,208,498,579,205,639,000 | 32.674419 | 80 | 0.604847 | false |
clubcapra/Ibex | src/seagoatvision_ros/scripts/CapraVision/server/recording/image.py | 1 | 1268 | #! /usr/bin/env python
# Copyright (C) 2012 Club Capra - capra.etsmtl.ca
#
# This filename is part of CapraVision.
#
# CapraVision is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cv2
import os
class ImageRecorder:
def __init__(self, savepath, filtre):
self.savepath = savepath
self.filtre = filtre
self.index = 0
def next_filename(self):
return os.path.join(
self.savepath,
self.filtre.__class__.__name__,
str(self.index).zfill(10) + '.png')
def save(self, image):
cv2.imwrite(self.next_filename(), image)
| gpl-3.0 | 1,795,421,447,140,961,800 | 31.538462 | 74 | 0.638013 | false |
gmr/infoblox | infoblox/record.py | 1 | 15975 | """
Base Record Object
"""
import logging
from infoblox import exceptions
from infoblox import mapping
LOGGER = logging.getLogger(__name__)
class Record(mapping.Mapping):
"""This object is extended by specific Infoblox record types and implements
the core API behavior of a record class. Attributes that map to other
infoblox records will be instances of those record types.
:param infoblox.Session session: The infoblox session object
:param str reference_id: The infoblox _ref value for the record
:param dict kwargs: Key-value pairs that when passed in, if the a key
matches an attribute of the record, the value will be assigned.
"""
view = 'default'
_ref = None
_repr_keys = ['_ref']
_return_ignore = ['view']
_save_ignore = []
_search_by = []
_session = None
_supports = []
_wapi_type = 'record'
def __init__(self, session, reference_id=None, **kwargs):
"""Create a new instance of the Record passing in the Infoblox
session object and the reference id for the record.
"""
super(Record, self).__init__(**kwargs)
self._session = session
self._ref = reference_id
self._search_values = self._build_search_values(kwargs)
if self._ref or self._search_values:
self.fetch()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
' '.join(['%s=%s' % (key, getattr(self, key))
for key in self._repr_keys]))
def delete(self):
"""Remove the item from the infoblox server.
:rtype: bool
:raises: AssertionError
:raises: ValueError
:raises: infoblox.exceptions.ProtocolError
"""
if not self._ref:
raise ValueError('Object has no reference id for deletion')
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
response = self._session.delete(self._path)
if response.status_code == 200:
self._ref = None
self.clear()
return True
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
def fetch(self):
"""Attempt to fetch the object from the Infoblox device. If successful
the object will be updated and the method will return True.
:rtype: bool
:raises: infoblox.exceptions.ProtocolError
"""
LOGGER.debug('Fetching %s, %s', self._path, self._search_values)
response = self._session.get(self._path, self._search_values,
{'_return_fields': self._return_fields})
if response.status_code == 200:
values = response.json()
self._assign(values)
return bool(values)
elif response.status_code >= 400:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
return False
def reference_id(self):
"""Return a read-only handle for the reference_id of this object.
"""
return str(self._ref)
def save(self):
"""Update the infoblox with new values for the specified object, or add
the values if it's a new object all together.
:raises: AssertionError
:raises: infoblox.exceptions.ProtocolError
"""
if 'save' not in self._supports:
raise AssertionError('Can not save this object type')
values = {}
for key in [key for key in self.keys() if key not in self._save_ignore]:
if not getattr(self, key) and getattr(self, key) != False:
continue
if isinstance(getattr(self, key, None), list):
value = list()
for item in getattr(self, key):
if isinstance(item, dict):
value.append(item)
elif hasattr(item, '_save_as'):
value.append(item._save_as())
elif hasattr(item, '_ref') and getattr(item, '_ref'):
value.append(getattr(item, '_ref'))
else:
LOGGER.warning('Cant assign %r', item)
values[key] = value
elif getattr(self, key, None):
values[key] = getattr(self, key)
if not self._ref:
response = self._session.post(self._path, values)
else:
values['_ref'] = self._ref
response = self._session.put(self._path, values)
LOGGER.debug('Response: %r, %r', response.status_code, response.content)
if 200 <= response.status_code <= 201:
self.fetch()
return True
else:
try:
error = response.json()
raise exceptions.ProtocolError(error['text'])
except ValueError:
raise exceptions.ProtocolError(response.content)
def _assign(self, values):
"""Assign the values passed as either a dict or list to the object if
the key for each value matches an available attribute on the object.
:param dict values: The values to assign
"""
LOGGER.debug('Assigning values: %r', values)
if not values:
return
keys = self.keys()
if not self._ref:
keys.append('_ref')
if isinstance(values, dict):
for key in keys:
if values.get(key):
if isinstance(values.get(key), list):
items = list()
for item in values[key]:
if isinstance(item, dict):
if '_ref' in item:
obj_class = get_class(item['_ref'])
if obj_class:
items.append(obj_class(self._session,
**item))
else:
items.append(item)
setattr(self, key, items)
else:
setattr(self, key, values[key])
elif isinstance(values, list):
self._assign(values[0])
else:
LOGGER.critical('Unhandled return type: %r', values)
def _build_search_values(self, kwargs):
"""Build the search criteria dictionary. It will first try and build
the values from already set attributes on the object, falling back
to the passed in kwargs.
:param dict kwargs: Values to build the dict from
:rtype: dict
"""
criteria = {}
for key in self._search_by:
if getattr(self, key, None):
criteria[key] = getattr(self, key)
elif key in kwargs and kwargs.get(key):
criteria[key] = kwargs.get(key)
return criteria
@property
def _path(self):
return self._ref if self._ref else self._wapi_type
@property
def _return_fields(self):
return ','.join([key for key in self.keys()
if key not in self._return_ignore])
class Host(Record):
"""Implements the host record type.
Example::
session = infoblox.Session(infoblox_host,
infoblox_user,
infoblox_password)
host = infoblox.Host(session, name='foo.bar.net')
"""
aliases = []
comment = None
configure_for_dns = True
disable = False
dns_aliases = []
dns_name = None
extattrs = None
ipv4addrs = []
ipv6addrs = []
name = None
rrset_order = 'cyclic'
ttl = None
use_ttl = False
zone = None
_repr_keys = ['name', 'ipv4addrs', 'ipv6addrs']
_save_ignore = ['dns_name', 'host', 'zone']
_search_by = ['name', 'ipv4addr', 'ipv6addr', 'mac']
_supports = ['delete', 'save']
_wapi_type = 'record:host'
def __init__(self, session, reference_id=None, name=None, **kwargs):
"""Create a new instance of a Host object. If a reference_id or valid
search criteria are passed in, the object will attempt to load the
values for the host from the Infoblox device.
When creating a new host or adding an ip address, use the
Host.add_ipv4_address and Host.add_ipv6_address methods::
host.add_ipv4addr('1.2.3.4')
Valid search criteria: name, ipv4addr, ipv6addr, mac
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str host: The host's FQDN
:param dict kwargs: Optional keyword arguments
"""
self.name = name
super(Host, self).__init__(session, reference_id, **kwargs)
def add_ipv4addr(self, ipv4addr):
"""Add an IPv4 address to the host.
:param str ipv4addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
raise ValueError('Already exists')
self.ipv4addrs.append({'ipv4addr': ipv4addr})
def remove_ipv4addr(self, ipv4addr):
"""Remove an IPv4 address from the host.
:param str ipv4addr: The IP address to remove
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
self.ipv4addrs.remove(addr)
break
def add_ipv6addr(self, ipv6addr):
"""Add an IPv6 address to the host.
:param str ipv6addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
raise ValueError('Already exists')
self.ipv6addrs.append({'ipv6addr': ipv6addr})
def remove_ipv6addr(self, ipv6addr):
"""Remove an IPv6 address from the host.
:param str ipv6addr: The IP address to remove
"""
for addr in self.ipv6addrs:
if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or
(isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)):
self.ipv6addrs.remove(addr)
break
class HostIPv4(Record):
"""Implements the host_ipv4addr record type.
"""
bootfile = None
bootserver = None
configure_for_dhcp = None
deny_bootp = None
discovered_data = None
enable_pxe_lease_time = None
host = None
ignore_client_requested_options = None
ipv4addr = None
last_queried = None
mac = None
match_client = None
network = None
nextserver = None
options = None
pxe_lease_time = None
use_bootfile = None
use_bootserver = None
use_deny_bootp = None
use_for_ea_inheritance = None
use_ignore_client_requested_options = None
use_nextserver = None
use_options = None
use_pxe_lease_time = None
_repr_keys = ['ipv4addr']
_search_by = ['ipv4addr']
_wapi_type = 'record:host_ipv4addr'
def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs):
"""Create a new instance of a HostIPv4 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv4addr from the Infoblox device.
Valid search criteria: ipv4addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv4addr: The ipv4 address
:param dict kwargs: Optional keyword arguments
"""
self.ipv4addr = str(ipv4addr)
super(HostIPv4, self).__init__(session, reference_id, **kwargs)
def _save_as(self):
return {'ipv4addr': self.ipv4addr}
class HostIPv6(Record):
"""Implements the host_ipv6addr record type.
"""
address_type = None
configure_for_dhcp = True
discovered_data = None
domain_name = None
domain_name_servers = []
duid = None
host = None
ipv6addr = None
ipv6bits = None
ipv6prefix_bits = None
match_client = None
options = None
preferred_lifetime = 27000
use_domain_name = False
use_domain_name_servers = False
use_for_ea_inheritance = False
use_options = False
use_valid_lifetime = False
valid_lifetime = 43200
_repr_keys = ['ipv6addr', 'ipv6bits', 'ipv6prefix_bits']
_save_ignore = ['host']
_search_by = ['ipv6addr']
_wapi_type = 'record:host_ipv6addr'
def __init__(self, session, reference_id=None, ipv6addr=None,
ipv6bits=None, ipv6prefix_bits=None, **kwargs):
"""Create a new instance of a HostIPv6 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv6addr from the Infoblox device.
Valid search criteria: ipv6addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv6addr: The ipv6 address
:param str ipv6bits: The ipv6 address bit count
:param str ipv6prefix_bits: The ipv6 address prefix bit count
:param dict kwargs: Optional keyword arguments
"""
self.ipv6addr = str(ipv6addr)
self.ipv6bits = str(ipv6bits)
self.ipv6prefix_bits = str(ipv6prefix_bits)
super(HostIPv6, self).__init__(session, reference_id, **kwargs)
def _save_as(self):
return {'ipv6addr': self.ipv6addr,
'ipv6bits': self.ipv6bits,
'ipv6prefix_bits': self.ipv6prefix_bits}
class IPv4Address(Record):
"""Implements the ipv4address record type.
"""
dhcp_client_identifier = None
extattrs = None
fingerprint = None
ip_address = None
is_conflict = None
lease_state = None
mac_address = None
names = None
network = None
network_view = None
objects = None
status = None
types = None
usage = None
username = None
_repr_keys = ['ip_address']
_search_by = ['ip_address']
_supports = ['fetch', 'put']
_wapi_type = 'record:host_ipv4addr'
def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs):
"""Create a new instance of a HostIPv4 object. If a reference_id or
valid search criteria are passed in, the object will attempt to load
the values for the host_ipv4addr from the Infoblox device.
Valid search criteria: ipv4addr
:param infobox.Session session: The established session object
:param str reference_id: The Infoblox reference id for the host
:param str ipv4addr: The ipv4 address
:param dict kwargs: Optional keyword arguments
"""
self.ipv4addr = str(ipv4addr)
super(IPv4Address, self).__init__(session, reference_id, **kwargs)
def get_class(reference):
class_name = reference.split('/')[0].split(':')[1]
LOGGER.debug('Class: %s', class_name)
return CLASS_MAP.get(class_name)
CLASS_MAP = {'host': Host,
'host_ipv4addr': HostIPv4,
'host_ipv6addr': HostIPv6,
'ipv4address': IPv4Address}
| bsd-3-clause | -1,247,889,201,822,977,500 | 32.420502 | 80 | 0.57759 | false |
aleju/self-driving-truck | lib/plotting.py | 1 | 13772 | """Classes to handle plotting during the training."""
from __future__ import print_function, division
import math
import cPickle as pickle
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import time
GROWTH_BY = 500
class History(object):
def __init__(self):
self.line_groups = OrderedDict()
@staticmethod
def from_string(s):
return pickle.loads(s)
def to_string(self):
return pickle.dumps(self, protocol=-1)
@staticmethod
def load_from_filepath(fp):
#return json.loads(open(, "r").read())
with open(fp, "r") as f:
history = pickle.load(f)
return history
def save_to_filepath(self, fp):
with open(fp, "w") as f:
pickle.dump(self, f, protocol=-1)
def add_group(self, group_name, line_names, increasing=True):
self.line_groups[group_name] = LineGroup(group_name, line_names, increasing=increasing)
def add_value(self, group_name, line_name, x, y, average=False):
self.line_groups[group_name].lines[line_name].append(x, y, average=average)
def get_group_names(self):
return list(self.line_groups.iterkeys())
def get_groups_increasing(self):
return [group.increasing for group in self.line_groups.itervalues()]
def get_max_x(self):
return max([group.get_max_x() for group in self.line_groups.itervalues()])
def get_recent_average(self, group_name, line_name, nb_points):
ys = self.line_groups[group_name].lines[line_name].ys[-nb_points:]
return np.average(ys)
class LineGroup(object):
def __init__(self, group_name, line_names, increasing=True):
self.group_name = group_name
self.lines = OrderedDict([(name, Line()) for name in line_names])
self.increasing = increasing
self.xlim = (None, None)
def get_line_names(self):
return list(self.lines.iterkeys())
def get_line_xs(self):
#return [line.xs for line in self.lines.itervalues()]
"""
for key, line in self.lines.items():
if not hasattr(line, "last_index"):
print(self.group_name, key, "no last index")
else:
print(self.group_name, key, "OK")
print(type(line.xs), type(line.ys), type(line.counts), type(line.datetimes))
"""
return [line.get_xs() for line in self.lines.itervalues()]
def get_line_ys(self):
#return [line.ys for line in self.lines.itervalues()]
return [line.get_ys() for line in self.lines.itervalues()]
def get_max_x(self):
#return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()])
return max([np.maximum(line.get_xs()) if line.last_index > -1 else 0 for line in self.lines.itervalues()])
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
self.xs = xs if xs is not None else []
self.ys = ys if ys is not None else []
self.counts = counts if counts is not None else []
self.datetimes = datetimes if datetimes is not None else []
self.last_index = -1
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
if not average or len(self.xs) == 0 or self.xs[-1] != x:
self.xs.append(x)
self.ys.append(float(y)) # float to get rid of numpy
self.counts.append(1)
self.datetimes.append(time.time())
else:
count = self.counts[-1]
self.ys[-1] = ((self.ys[-1] * count) + y) / (count+1)
self.counts[-1] += 1
self.datetimes[-1] = time.time()
"""
class Line(object):
def __init__(self, xs=None, ys=None, counts=None, datetimes=None):
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = xs if xs is not None else np.copy(zeros)
self.ys = ys if ys is not None else zeros.astype(np.float32)
self.counts = counts if counts is not None else zeros.astype(np.uint16)
self.datetimes = datetimes if datetimes is not None else zeros.astype(np.uint64)
self.last_index = -1
# for legacy as functions, replace with properties
def get_xs(self):
# legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
return self.xs[0:self.last_index+1]
def get_ys(self):
return self.ys[0:self.last_index+1]
def get_counts(self):
return self.counts[0:self.last_index+1]
def get_datetimes(self):
return self.datetimes[0:self.last_index+1]
def _legacy_convert_from_list_to_np(self):
#print("is list!")
print("[plotting] Converting from list to numpy...")
self.last_index = len(self.xs) - 1
self.xs = np.array(self.xs, dtype=np.int32)
self.ys = np.array(self.ys, dtype=np.float32)
self.counts = np.array(self.counts, dtype=np.uint16)
self.datetimes = np.array([int(dt*1000) for dt in self.datetimes], dtype=np.uint64)
def append(self, x, y, average=False):
# legacy (for loading from pickle)
#if not hasattr(self, "counts"):
# self.counts = [1] * len(self.xs)
# ---
#legacy
if isinstance(self.xs, list):
self._legacy_convert_from_list_to_np()
if (self.last_index+1) == self.xs.shape[0]:
#print("growing from %d by %d..." % (self.xs.shape[0], GROWTH_BY), self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)
self.xs = np.append(self.xs, np.copy(zeros))
self.ys = np.append(self.ys, zeros.astype(np.float32))
self.counts = np.append(self.counts, zeros.astype(np.uint16))
self.datetimes = np.append(self.datetimes, zeros.astype(np.uint64))
#print("growing done", self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)
first_entry = (self.last_index == -1)
if not average or first_entry or self.xs[self.last_index] != x:
idx = self.last_index + 1
self.xs[idx] = x
self.ys[idx] = y
self.counts[idx] = 1
self.datetimes[idx] = int(time.time()*1000)
self.last_index = idx
else:
idx = self.last_index
count = self.counts[idx]
self.ys[idx] = ((self.ys[idx] * count) + y) / (count+1)
self.counts[idx] = count + 1
self.datetimes[idx] = int(time.time()*1000)
#print("added", x, y, average)
#print(self.xs[self.last_index-10:self.last_index+10+1])
#print(self.ys[self.last_index-10:self.last_index+10+1])
#print(self.counts[self.last_index-10:self.last_index+10+1])
#print(self.datetimes[self.last_index-10:self.last_index+10+1])
class LossPlotter(object):
def __init__(self, titles, increasing, save_to_fp):
assert len(titles) == len(increasing)
n_plots = len(titles)
self.titles = titles
self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])
self.xlim = dict([(title, (None, None)) for title in titles])
self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"]
self.nb_points_max = 500
self.save_to_fp = save_to_fp
self.start_batch_idx = 0
self.autolimit_y = False
self.autolimit_y_multiplier = 5
#self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))
nrows = max(1, int(math.sqrt(n_plots)))
ncols = int(math.ceil(n_plots / nrows))
width = ncols * 10
height = nrows * 10
self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))
if nrows == 1 and ncols == 1:
self.axes = [self.axes]
else:
self.axes = self.axes.flat
title_to_ax = dict()
for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):
title_to_ax[title] = ax
self.title_to_ax = title_to_ax
self.fig.tight_layout()
self.fig.subplots_adjust(left=0.05)
def plot(self, history):
for plot_idx, title in enumerate(self.titles):
ax = self.title_to_ax[title]
group_name = title
group_increasing = self.increasing[title]
group = history.line_groups[title]
line_names = group.get_line_names()
#print("getting line x/y...", time.time())
line_xs = group.get_line_xs()
line_ys = group.get_line_ys()
#print("getting line x/y FIN", time.time())
"""
print("title", title)
print("line_names", line_names)
for i, xx in enumerate(line_xs):
print("line_xs i: ", xx)
for i, yy in enumerate(line_ys):
print("line_ys i: ", yy)
"""
if any([len(xx) > 0 for xx in line_xs]):
xs_min = min([min(xx) for xx in line_xs if len(xx) > 0])
xs_max = max([max(xx) for xx in line_xs if len(xx) > 0])
xlim = self.xlim[title]
xlim = [
max(xs_min, self.start_batch_idx) if xlim[0] is None else min(xlim[0], xs_max-1),
xs_max+1 if xlim[1] is None else xlim[1]
]
if xlim[0] < 0:
xlim[0] = max(xs_max - abs(xlim[0]), 0)
if xlim[1] < 0:
xlim[1] = max(xs_max - abs(xlim[1]), 1)
else:
# none of the lines has any value, so just use dummy values
# to avoid min/max of empty sequence errors
xlim = [
0 if self.xlim[title][0] is None else self.xlim[title][0],
1 if self.xlim[title][1] is None else self.xlim[title][1]
]
self._plot_group(ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim)
self.fig.savefig(self.save_to_fp)
# this seems to be slow sometimes
def _line_to_xy(self, line_x, line_y, xlim, limit_y_min=None, limit_y_max=None):
def _add_point(points_x, points_y, curr_sum, counter):
points_x.append(batch_idx)
y = curr_sum / counter
if limit_y_min is not None and limit_y_max is not None:
y = np.clip(y, limit_y_min, limit_y_max)
elif limit_y_min is not None:
y = max(y, limit_y_min)
elif limit_y_max is not None:
y = min(y, limit_y_max)
points_y.append(y)
nb_points = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
nb_points += 1
point_every = max(1, int(nb_points / self.nb_points_max))
points_x = []
points_y = []
curr_sum = 0
counter = 0
for i in range(len(line_x)):
batch_idx = line_x[i]
if xlim[0] <= batch_idx < xlim[1]:
curr_sum += line_y[i]
counter += 1
if counter >= point_every:
_add_point(points_x, points_y, curr_sum, counter)
counter = 0
curr_sum = 0
if counter > 0:
_add_point(points_x, points_y, curr_sum, counter)
return points_x, points_y
def _plot_group(self, ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim):
ax.cla()
ax.grid()
if self.autolimit_y and any([len(line_xs) > 0 for line_xs in line_xs]):
min_x = min([np.min(line_x) for line_x in line_xs])
max_x = max([np.max(line_x) for line_x in line_xs])
min_y = min([np.min(line_y) for line_y in line_ys])
max_y = max([np.max(line_y) for line_y in line_ys])
if group_increasing:
if max_y > 0:
limit_y_max = None
limit_y_min = max_y / self.autolimit_y_multiplier
if min_y > limit_y_min:
limit_y_min = None
else:
if min_y > 0:
limit_y_max = min_y * self.autolimit_y_multiplier
limit_y_min = None
if max_y < limit_y_max:
limit_y_max = None
if limit_y_min is not None:
ax.plot((min_x, max_x), (limit_y_min, limit_y_min), c="purple")
if limit_y_max is not None:
ax.plot((min_x, max_x), (limit_y_max, limit_y_max), c="purple")
# y achse range begrenzen
yaxmin = min_y if limit_y_min is None else limit_y_min
yaxmax = max_y if limit_y_max is None else limit_y_max
yrange = yaxmax - yaxmin
yaxmin = yaxmin - (0.05 * yrange)
yaxmax = yaxmax + (0.05 * yrange)
ax.set_ylim([yaxmin, yaxmax])
else:
limit_y_min = None
limit_y_max = None
for line_name, line_x, line_y, line_col in zip(line_names, line_xs, line_ys, self.colors):
#print("line to xy...", time.time())
x, y = self._line_to_xy(line_x, line_y, xlim, limit_y_min=limit_y_min, limit_y_max=limit_y_max)
#print("line to xy FIN", time.time())
#print("plotting ax...", time.time())
ax.plot(x, y, color=line_col, linewidth=1.0)
#print("plotting ax FIN", time.time())
ax.set_title(group_name)
| mit | 3,102,420,107,428,294,000 | 38.348571 | 149 | 0.548141 | false |
waile23/todo | models/pduser.py | 1 | 2906 | # -*- coding: utf-8 -*-
from basemodel import *
import md5
import math
import sys
class PDuser(BaseModel):
'''model autocreate by createModel'''
table_name = 'pd_user'
#db_name = 'todo_local'
db_name = web.config.write_db_name
def _format_user(self, row):
if hasattr(row, 'u_logo'):
if not row.u_logo:
row.u_logo = "/static/img/default_logo.png"
return row
def load_by_id(self, id, iscache=True, isformat=True):
mkey = self.create_pri_cache_key(u_id=id)
ret = BaseModel.memget(mkey)
if not iscache or not ret:
rows = self.reader().select(self.table_name, where="u_id=$uid", vars={"uid":id})
for row in rows:
if isformat:
ret = self._format_user(row)
else:
ret = row
break
BaseModel.memset(mkey, ret)
return ret
def check_name(self, name,loginid=0):
ret = self.reader().select(self.table_name, where="u_name=$name and u_id not in ($loginid)", vars={"name":name,"loginid":loginid})
for v in ret:
return True
return False
def check_name_count(self, name):
ret = self.reader().select(self.table_name,what="count(1) as count", where="u_name=$name", vars={"name":name})
for v in ret:
return v.count
return 0
def check_email(self, email,loginid=0):
ret = self.reader().select(self.table_name, where="u_email=$email and u_id not in ($loginid)", vars={"email":email,"loginid":loginid})
for v in ret:
return True
return False
def user_list(self,page=0,size=15,iscache=True,isformat=True):
mkey=md5.new(self.__class__.__name__+"."+sys._getframe().f_code.co_name+"_page_"+str(page)+"_size_"+str(size)).hexdigest()
ret=BaseModel.memget(mkey)
if not iscache or not ret:
ret=[]
ret_i = self.reader().select(self.table_name,order="u_create_time desc",limit=size,offset=page*size)
for row in ret_i:
if isformat:
ret.append(self._format_user(row))
else:
ret.append(row)
BaseModel.memset(mkey,ret)
return ret
def loaduser_by_email(self, email):
rows = self.reader().select(self.table_name, where="u_email=$email", vars={"email":email})
ret = None
for row in rows:
ret = row
break
return ret
def loaduser_by_social(self, fr, auth):
rows = self.reader().select(self.table_name, where="u_from='" + fr + "' and u_auth='" + auth + "'")
ret = None
for row in rows:
ret = row
break
return ret
def insert_by_list(self, rows):
ret = self.writer().multiple_insert(self.table_name, rows)
for i in ret:
self.memdel(self.create_pri_cache_key(u_id=i))
return ret
def update_by_insert(self, row):
sql = ["update"]
sql.append(self.table_name)
sql.append("set")
tmp = []
for k in row:
tmp.append(k + "=$" + k)
sql.append(",".join(tmp))
sql.append("where u_id=$u_id")
sqlstr = " ".join(sql)
self.writer().query(sqlstr, row)
self.memdel(self.create_pri_cache_key(u_id=row.u_id))
pduser = PDuser() #public instance
| mit | -6,926,692,520,643,417,000 | 26.415094 | 136 | 0.646249 | false |
Michal-Fularz/codingame_solutions | codingame_solutions/medium/medium_The_Paranoid_Android.py | 1 | 3099 | __author__ = 'Amin'
# COMPLETED
# PYTHON 3.x
import sys
import math
class Floor:
def __init__(self, width, contains_exit=False, exit_position=-1):
self.width = width
self.__contains_elevator = False
self.__elevator_position = -1
self.__contains_exit = contains_exit
self.__exit_position = exit_position
def add_exit(self, exit_position):
self.__contains_exit = True
self.__exit_position = exit_position
def add_elevator(self, elevator_position):
self.__contains_elevator = True
self.__elevator_position = elevator_position
def should_be_blocked(self, position, direction):
flag_should_be_blocked = False
if self.__contains_elevator:
if position > self.__elevator_position and direction == "RIGHT" or \
position < self.__elevator_position and direction == "LEFT":
flag_should_be_blocked = True
elif self.__contains_exit:
if position > self.__exit_position and direction == "RIGHT" or \
position < self.__exit_position and direction == "LEFT":
flag_should_be_blocked = True
return flag_should_be_blocked
class Drive:
def __init__(self):
self.floors = []
self.load_from_input()
def load_from_input(self):
# nb_floors: number of floors
# width: width of the area
# nb_rounds: maximum number of rounds
# exit_floor: floor on which the exit is found
# exit_pos: position of the exit on its floor
# nb_total_clones: number of generated clones
# nb_additional_elevators: ignore (always zero)
# nb_elevators: number of elevators
nb_floors, width, nb_rounds, exit_floor, exit_pos, nb_total_clones, nb_additional_elevators, nb_elevators = [int(i) for i in input().split()]
for i in range(nb_floors):
self.floors.append(Floor(width))
self.floors[exit_floor].add_exit(exit_pos)
for i in range(nb_elevators):
# elevator_floor: floor on which this elevator is found
# elevator_pos: position of the elevator on its floor
elevator_floor, elevator_pos = [int(j) for j in input().split()]
self.floors[elevator_floor].add_elevator(elevator_pos)
if __name__ == '__main__':
drive = Drive()
flag_do_the_blocking = False
# game loop
while 1:
# clone_floor: floor of the leading clone
# clone_pos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
clone_floor, clone_pos, direction = input().split()
clone_floor = int(clone_floor)
clone_pos = int(clone_pos)
flag_do_the_blocking = drive.floors[clone_floor].should_be_blocked(clone_pos, direction)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
# action: WAIT or BLOCK
if flag_do_the_blocking:
print("BLOCK")
else:
print("WAIT")
| mit | 7,778,575,852,018,126,000 | 32.322581 | 149 | 0.603743 | false |
garthylou/Libreosteo | libreosteoweb/api/file_integrator.py | 1 | 19791 | # This file is part of LibreOsteo.
#
# LibreOsteo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibreOsteo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreOsteo. If not, see <http://www.gnu.org/licenses/>.
import logging
import csv
from django.utils.translation import ugettext_lazy as _
import random
from libreosteoweb.models import Patient, ExaminationType, ExaminationStatus
from datetime import date, datetime
from .utils import enum, Singleton, _unicode
logger = logging.getLogger(__name__)
_CSV_BUFFER_SIZE = 1024 * 1024 * 10
class Extractor(object):
def extract(self, instance):
"""
return a dict with key patient and examination which gives some extract of the content,
with list of dict which contains line number and the content.
"""
result = {}
extract_patient = self.extract_file(instance.file_patient)
extract_examination = self.extract_file(instance.file_examination)
result['patient'] = extract_patient
result['examination'] = extract_examination
return result
def analyze(self, instance):
"""
return a dict with key patient, and examination, which indicates if :
- the expected file has the correct type.
- the file is is_valid
- the file is not is_empty
- list of errors if found.
"""
logger.info("* Analyze the instance")
result = {}
(type_file, is_valid, is_empty,
errors) = self.analyze_file(instance.file_patient)
result['patient'] = (type_file, is_valid, is_empty, errors)
(type_file, is_valid, is_empty,
errors) = self.analyze_file(instance.file_examination)
result['examination'] = (type_file, is_valid, is_empty, errors)
return result
def analyze_file(self, internal_file):
if not bool(internal_file):
return ('', False, True, [])
try:
handler = AnalyzerHandler()
report = handler.analyze(internal_file)
except:
logger.exception('Analyze failed.')
return ('', False, True, [_('Analyze failed on this file')])
if report.type == FileCsvType.PATIENT:
return ('patient', report.is_valid, report.is_empty, [])
if report.type == FileCsvType.EXAMINATION:
return ('examination', report.is_valid, report.is_empty, [])
else:
return ('patient', False, True,
[_('Cannot recognize the patient file')])
def extract_file(self, internal_file):
if not bool(internal_file):
return {}
result = {}
try:
content = FileContentProxy().get_content(internal_file,
line_filter=filter)
nb_row = content['nb_row'] - 1
if nb_row > 0:
idx = sorted(
random.sample(range(1, nb_row + 1), min(5, nb_row)))
logger.info("indexes = %s " % idx)
for i in idx:
result['%s' % (i + 1)] = content['content'][i - 1]
except:
logger.exception('Extractor failed.')
logger.info("result is %s" % result)
return result
def get_content(self, internal_file):
return FileContentProxy().get_content(internal_file,
line_filter=filter)
def unproxy(self, internal_file):
FileContentProxy().unproxy(internal_file, line_filter=filter)
def filter(line):
logger.debug("filtering ...")
if not hasattr(line, 'decode'):
logger.debug("no decode available")
return line
result_line = None
try:
logger.debug("Try to decode against utf-8")
result_line = line.decode('utf-8')
except:
logger.debug("Fail to decode against utf-8")
pass
if result_line is None:
try:
logger.debug("Try to decode against iso-8859-1")
result_line = line.decode('iso-8859-1')
except:
logger.info("Fail to decode against iso-8859-1")
result_line = _(
'Cannot read the content file. Check the encoding.')
return result_line
FileCsvType = enum('FileCsvType', 'PATIENT', 'EXAMINATION')
class AnalyzeReport(object):
def __init__(self, is_empty, is_valid, internal_type):
self.is_empty = is_empty
self.is_valid = is_valid
self.type = internal_type
def is_empty(self):
return self.is_empty
def is_valid(self):
return self.is_valid
def type(self):
return self.type
class Analyzer(object):
"""
Performs the analyze on the content.
It should be inherited.
"""
identifier = None
type = None
def __init__(self, content=None):
self.content = content
def is_instance(self):
if self.content is not None:
try:
self._parse_header(self.content['header'])
return True
except ValueError:
return False
return False
def _parse_header(self, header):
_unicode(header[:]).lower().index(self.__class__.identifier)
def get_report(self):
is_empty = self.content.nb_row <= 1
# is_valid should check the number of columns
is_valid = len(self.content.header) == self.__class__.field_number
return AnalyzeReport(is_empty, is_valid, self.__class__.type)
class AnalyzerPatientFile(Analyzer):
identifier = 'nom de famille'
type = FileCsvType.PATIENT
field_number = 24
def __init__(self, content=None):
super(self.__class__, self).__init__(content=content)
class AnalyzerExaminationFile(Analyzer):
identifier = 'conclusion'
type = FileCsvType.EXAMINATION
field_number = 14
def __init__(self, content=None):
super(self.__class__, self).__init__(content=content)
class FileContentAdapter(dict):
def __init__(self, ourfile, line_filter=None):
self.file = ourfile
self['content'] = None
self.filter = line_filter
if self.filter is None:
self.filter = self.passthrough
def __getattr__(self, attr):
return self[attr]
def get_content(self):
if self['content'] is None:
reader = self._get_reader()
rownum = 0
header = None
content = []
for row in reader:
# Save header row.
if rownum == 0:
header = [self.filter(c) for c in row]
else:
content.append([self.filter(c) for c in row])
rownum += 1
self.file.close()
self['content'] = content
self['nb_row'] = rownum
self['header'] = header
return self
def _get_reader(self):
if not bool(self.file):
return None
self.file.open(mode='r')
logger.info("* Try to guess the dialect on csv")
csv_buffer = self.file.read(_CSV_BUFFER_SIZE)
# Compatibility with python2 and python3
dialect = csv.Sniffer().sniff(csv_buffer)
self.file.seek(0)
reader = csv.reader(self.file, dialect)
return reader
def passthrough(self, line):
return line
class DecodeCsvReader(object):
def __init__(self, underlying_instance, decode_filter):
self.reader_instance = underlying_instance
self.filter = decode_filter
def __next__(self):
return self.filter(next(self.reader_instance))
def __iter__(self):
return self
class FileContentKey(object):
def __init__(self, ourfile, line_filter):
self.file = ourfile
self.line_filter = line_filter
def __hash__(self):
return hash((self.file, self.line_filter))
def __eq__(self, other):
return (self.file, self.line_filter) == (other.file, other.line_filter)
def __ne__(self, other):
# Not strictly necessary, but to avoid having both x==y and x!=y
# True at the same time
return not (self == other)
class FileContentProxy(object):
__metaclass__ = Singleton
file_content = {}
def get_content(self, ourfile, line_filter=None):
key = FileContentKey(ourfile, line_filter)
try:
return self.file_content[key]
except KeyError:
self.file_content[key] = FileContentAdapter(
ourfile, line_filter).get_content()
return self.file_content[key]
def unproxy(self, ourfile, line_filter=None):
key = FileContentKey(ourfile, line_filter)
try:
self.file_content[key] = None
except:
pass
class AnalyzerHandler(object):
analyzers = [AnalyzerPatientFile, AnalyzerExaminationFile]
def analyze(self, ourfile):
if not bool(ourfile):
return AnalyzeReport(False, False, None)
content = self.get_content(ourfile)
for analyzer in self.analyzers:
instance = analyzer(content)
if instance.is_instance():
return instance.get_report()
logger.warn("No Analyzer found")
return AnalyzeReport(False, False, None)
def get_content(self, ourfile):
return FileContentProxy().get_content(ourfile, line_filter=filter)
def filter(self, line):
result_line = None
try:
result_line = line.decode('utf-8')
except:
pass
if result_line is None:
try:
result_line = line.decode('iso-8859-1')
except:
result_line = _(
'Cannot read the content file. Check the encoding.')
return result_line
class InvalidIntegrationFile(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class IntegratorHandler(object):
def integrate(self, file, file_additional=None, user=None):
integrator = IntegratorFactory().get_instance(file)
if integrator is None:
raise InvalidIntegrationFile(
"This file %s is not valid to be integrated." % (file))
result = integrator.integrate(file,
file_additional=file_additional,
user=user)
return result
def post_processing(self, files):
extractor = Extractor()
for f in files:
extractor.unproxy(f)
class IntegratorFactory(object):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
def get_instance(self, file):
result = self.extractor.analyze_file(file)
if not result[1]:
return None
if result[0] == 'patient':
from .serializers import PatientSerializer
return IntegratorPatient(serializer_class=PatientSerializer)
elif result[0] == 'examination':
from .serializers import ExaminationSerializer
return IntegratorExamination(
serializer_class=ExaminationSerializer)
class FilePatientFactory(object):
def __init__(self):
from .serializers import PatientSerializer
self.serializer_class = PatientSerializer
def get_serializer(self, row):
try:
data = {
'family_name': row[1],
'original_name': row[2],
'first_name': row[3],
'birth_date': self.get_date(row[4]),
'sex': self.get_sex_value(row[5]),
'address_street': row[6],
'address_complement': row[7],
'address_zipcode': row[8],
'address_city': row[9],
'email': row[10],
'phone': row[11],
'mobile_phone': row[12],
'job': row[13],
'hobbies': row[14],
'smoker': self.get_boolean_value(row[15]),
'laterality': self.get_laterality_value(row[16]),
'important_info': row[17],
'current_treatment': row[18],
'surgical_history': row[19],
'medical_history': row[20],
'family_history': row[21],
'trauma_history': row[22],
'medical_reports': row[23],
'creation_date': self.get_default_date(),
'consent_check': False
}
serializer = self.serializer_class(data=data)
except ValueError as e:
logger.exception("Exception when creating examination.")
serializer = {'errors': ["%s" % e]}
except:
logger.exception("Exception when creating examination.")
return serializer
def get_sex_value(self, value):
if value.upper() == 'F':
return 'F'
else:
return 'M'
def get_laterality_value(self, value):
if value.upper() == 'G' or value.upper() == 'L':
return 'L'
else:
return 'R'
def get_boolean_value(self, value):
if value.lower() == 'o' or value.lower() == 'oui' or value.lower(
) == 'true' or value.lower() == 't':
return True
else:
return False
def get_default_date(self):
return date(2011, 1, 1)
def get_date(self, value):
f = "%d/%m/%Y"
return datetime.strptime(value, f).date()
class AbstractIntegrator(object):
def integrate(self, file, file_additional=None, user=None):
pass
class IntegratorPatient(AbstractIntegrator):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
def integrate(self, file, file_additional=None, user=None):
content = self.extractor.get_content(file)
nb_line = 0
errors = []
factory = FilePatientFactory()
for idx, r in enumerate(content['content']):
serializer = factory.get_serializer(r)
try:
serializer['errors']
errors.append((idx + 2, serializer['errors']))
except KeyError:
if serializer.is_valid():
serializer.save()
nb_line += 1
else:
# idx + 2 because : we have header and the index start from 0
# To have the line number we have to add 2 to the index....
errors.append((idx + 2, serializer.errors))
logger.info("errors detected, data is = %s " %
serializer.initial_data)
return (nb_line, errors)
class IntegratorExamination(AbstractIntegrator):
def __init__(self, serializer_class=None):
self.extractor = Extractor()
self.serializer_class = serializer_class
self.patient_table = None
def integrate(self, file, file_additional=None, user=None):
if file_additional is None:
return (0, [_('Missing patient file to integrate it.')])
content = self.extractor.get_content(file)
nb_line = 0
errors = []
for idx, r in enumerate(content['content']):
logger.info("* Load line from content")
try:
patient = self.get_patient(int(r[0]), file_additional)
data = {
'date': self.get_date(r[1], with_time=True),
'reason': r[2],
'reason_description': r[3],
'orl': r[4],
'visceral': r[5],
'pulmo': r[6],
'uro_gyneco': r[7],
'periphery': r[8],
'general_state': r[9],
'medical_examination': r[10],
'diagnosis': r[11],
'treatments': r[12],
'conclusion': r[13],
'patient': patient.id,
'therapeut': user.id,
'type': ExaminationType.NORMAL,
'status': ExaminationStatus.NOT_INVOICED,
'status_reason': u'%s' % _('Imported examination'),
}
serializer = self.serializer_class(data=data)
if serializer.is_valid():
serializer.save()
nb_line += 1
else:
# idx + 2 because : we have header and the index start from 0
# To have the line number we have to add 2 to the index....
errors.append((idx + 2, serializer.errors))
logger.info("errors detected, data is = %s, errors = %s " %
(data, serializer.errors))
except ValueError as e:
logger.exception("Exception when creating examination.")
errors.append((idx + 2, {
'general_problem':
_('There is a problem when reading this line :') +
_unicode(e)
}))
except:
logger.exception("Exception when creating examination.")
errors.append((idx + 2, {
'general_problem':
_('There is a problem when reading this line.')
}))
return (nb_line, errors)
def get_date(self, value, with_time=False):
f = "%d/%m/%Y"
if with_time:
return datetime.strptime(value, f)
return datetime.strptime(value, f).date()
def get_patient(self, numero, file_patient):
if not bool(file_patient):
return None
if self.patient_table is None:
self._build_patient_table(file_patient)
return self.patient_table[numero]
def _build_patient_table(self, file_patient):
content = self.extractor.get_content(file_patient)
self.patient_table = {}
factory = FilePatientFactory()
for c in content['content']:
serializer = factory.get_serializer(c)
# remove validators to get a validated data through filters
serializer.validators = []
serializer.is_valid()
self.patient_table[int(c[0])] = Patient.objects.filter(
family_name=serializer.validated_data['family_name'],
first_name=serializer.validated_data['first_name'],
birth_date=serializer.validated_data['birth_date']).first()
logger.info("found patient %s " % self.patient_table[int(c[0])])
| gpl-3.0 | 5,506,196,519,202,931,000 | 33.090426 | 95 | 0.539235 | false |
spiceqa/virt-test | libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_create_as.py | 1 | 18414 | import re
import os
import commands
import logging
from autotest.client.shared import error
from virttest import virsh, utils_misc, xml_utils, libvirt_xml
from virttest.libvirt_xml import vm_xml, xcepts
def xml_recover(vmxml):
"""
Recover older xml config with backup vmxml.
:params: vmxml: VMXML object
"""
try:
options = "--snapshots-metadata"
vmxml.undefine(options)
vmxml.define()
return True
except xcepts.LibvirtXMLError, detail:
logging.error("Recover older xml failed:%s.", detail)
return False
def check_snap_in_image(vm_name, snap_name):
"""
check the snapshot info in image
:params: vm_name: VM name
:params: snap_name: Snapshot name
"""
domxml = virsh.dumpxml(vm_name).stdout.strip()
xtf_dom = xml_utils.XMLTreeFile(domxml)
cmd = "qemu-img info " + xtf_dom.find("devices/disk/source").get("file")
img_info = commands.getoutput(cmd).strip()
if re.search(snap_name, img_info):
logging.info("Find snapshot info in image")
return True
else:
return False
def compose_disk_options(test, params, opt_names):
"""
Compose the {disk,mem}spec options
The diskspec file need to add suitable dir with the name which is configed
individually, The 'value' after 'file=' is a parameter which also need to
get from cfg
:params: test & params: system parameters
:params: opt_names: params get from cfg of {disk,mem}spec options
"""
if opt_names.find("file=") >= 0:
opt_disk = opt_names.split("file=")
opt_list = opt_disk[1].split(",")
if len(opt_list) > 1:
left_opt = opt_list[1]
else:
left_opt = ""
if params.get("bad_disk") is not None or \
params.get("external_disk") is not None:
spec_disk = os.path.join(test.virtdir, params.get(opt_list[0]))
else:
spec_disk = os.path.join(test.virtdir, opt_list[0])
return opt_disk[0] + "file=" + spec_disk + left_opt
def check_snapslist(vm_name, options, option_dict, output,
snaps_before, snaps_list):
no_metadata = options.find("--no-metadata")
fdisks = "disks"
# command with print-xml will not really create snapshot
if options.find("print-xml") >= 0:
xtf = xml_utils.XMLTreeFile(output)
# With --print-xml there isn't new snapshot created
if len(snaps_before) != len(snaps_list):
raise error.TestFail("--print-xml create new snapshot")
else:
# The following does not check with print-xml
get_sname = output.split()[2]
# check domain/snapshot xml depends on if have metadata
if no_metadata < 0:
output_dump = virsh.snapshot_dumpxml(vm_name,
get_sname).stdout.strip()
else:
output_dump = virsh.dumpxml(vm_name).stdout.strip()
fdisks = "devices"
xtf = xml_utils.XMLTreeFile(output_dump)
find = 0
for snap in snaps_list:
if snap == get_sname:
find = 1
break
# Should find snap in snaplist without --no-metadata
if (find == 0 and no_metadata < 0):
raise error.TestFail("Can not find snapshot %s!"
% get_sname)
# Should not find snap in list without metadata
elif (find == 1 and no_metadata >= 0):
raise error.TestFail("Can find snapshot metadata even "
"if have --no-metadata")
elif (find == 0 and no_metadata >= 0):
logging.info("Can not find snapshot %s as no-metadata "
"is given" % get_sname)
# Check snapshot only in qemu-img
if (options.find("--disk-only") < 0 and
options.find("--memspec") < 0):
ret = check_snap_in_image(vm_name, get_sname)
if ret is False:
raise error.TestFail("No snap info in image")
else:
logging.info("Find snapshot %s in snapshot list."
% get_sname)
# Check if the disk file exist when disk-only is given
if options.find("disk-only") >= 0:
for disk in xtf.find(fdisks).findall('disk'):
diskpath = disk.find('source').get('file')
if os.path.isfile(diskpath):
logging.info("disk file %s exist" % diskpath)
os.remove(diskpath)
else:
# Didn't find <source file="path to disk"/>
# in output - this could leave a file around
# wherever the main OS image file is found
logging.debug("output_dump=%s", output_dump)
raise error.TestFail("Can not find disk %s"
% diskpath)
# Check if the guest is halted when 'halt' is given
if options.find("halt") >= 0:
domstate = virsh.domstate(vm_name)
if re.match("shut off", domstate.stdout):
logging.info("Domain is halted after create "
"snapshot")
else:
raise error.TestFail("Domain is not halted after "
"snapshot created")
# Check the snapshot xml regardless of having print-xml or not
if (options.find("name") >= 0 and no_metadata < 0):
if xtf.findtext('name') == option_dict["name"]:
logging.info("get snapshot name same as set")
else:
raise error.TestFail("Get wrong snapshot name %s" %
xtf.findtext('name'))
if (options.find("description") >= 0 and no_metadata < 0):
desc = xtf.findtext('description')
if desc == option_dict["description"]:
logging.info("get snapshot description same as set")
else:
raise error.TestFail("Get wrong description on xml")
if options.find("diskspec") >= 0:
if isinstance(option_dict['diskspec'], list):
index = len(option_dict['diskspec'])
else:
index = 1
disks = xtf.find(fdisks).findall('disk')
for num in range(index):
if isinstance(option_dict['diskspec'], list):
option_disk = option_dict['diskspec'][num]
else:
option_disk = option_dict['diskspec']
option_disk = "name=" + option_disk
disk_dict = utils_misc.valued_option_dict(option_disk,
",", 0, "=")
logging.debug("disk_dict is %s", disk_dict)
# For no metadata snapshot do not check name and
# snapshot
if no_metadata < 0:
dname = disks[num].get('name')
logging.debug("dname is %s", dname)
if dname == disk_dict['name']:
logging.info("get disk%d name same as set in "
"diskspec", num)
else:
raise error.TestFail("Get wrong disk%d name %s"
% num, dname)
if option_disk.find('snapshot=') >= 0:
dsnap = disks[num].get('snapshot')
logging.debug("dsnap is %s", dsnap)
if dsnap == disk_dict['snapshot']:
logging.info("get disk%d snapshot type same"
" as set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d "
"snapshot type %s" %
num, dsnap)
if option_disk.find('driver=') >= 0:
dtype = disks[num].find('driver').get('type')
if dtype == disk_dict['driver']:
logging.info("get disk%d driver type same as "
"set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d driver "
"type %s" % num, dtype)
if option_disk.find('file=') >= 0:
sfile = disks[num].find('source').get('file')
if sfile == disk_dict['file']:
logging.info("get disk%d source file same as "
"set in diskspec", num)
else:
raise error.TestFail("Get wrong disk%d source "
"file %s" % num, sfile)
# For memspec check if the xml is same as setting
# Also check if the mem file exists
if options.find("memspec") >= 0:
memspec = option_dict['memspec']
if re.search('file=', option_dict['memspec']) < 0:
memspec = 'file=' + option_dict['memspec']
mem_dict = utils_misc.valued_option_dict(memspec, ",", 0,
"=")
logging.debug("mem_dict is %s", mem_dict)
if no_metadata < 0:
if memspec.find('snapshot=') >= 0:
snap = xtf.find('memory').get('snapshot')
if snap == mem_dict['snapshot']:
logging.info("get memory snapshot type same as"
" set in diskspec")
else:
raise error.TestFail("Get wrong memory snapshot"
" type on print xml")
memfile = xtf.find('memory').get('file')
if memfile == mem_dict['file']:
logging.info("get memory file same as set in "
"diskspec")
else:
raise error.TestFail("Get wrong memory file on "
"print xml %s", memfile)
if options.find("print-xml") < 0:
if os.path.isfile(mem_dict['file']):
logging.info("memory file generated")
os.remove(mem_dict['file'])
else:
raise error.TestFail("Fail to generate memory file"
" %s", mem_dict['file'])
def run_virsh_snapshot_create_as(test, params, env):
"""
Test snapshot-create-as command
Make sure that the clean repo can be used because qemu-guest-agent need to
be installed in guest
The command create a snapshot (disk and RAM) from arguments which including
the following point
* virsh snapshot-create-as --print-xml --diskspec --name --description
* virsh snapshot-create-as --print-xml with multi --diskspec
* virsh snapshot-create-as --print-xml --memspec
* virsh snapshot-create-as --description
* virsh snapshot-create-as --no-metadata
* virsh snapshot-create-as --no-metadata --print-xml (negative test)
* virsh snapshot-create-as --atomic --disk-only
* virsh snapshot-create-as --quiesce --disk-only (positive and negative)
* virsh snapshot-create-as --reuse-external
* virsh snapshot-create-as --disk-only --diskspec
* virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
* virsh snapshot-create-as --disk-only and --memspec (negative)
* Create multi snapshots with snapshot-create-as
* Create snapshot with name a--a a--a--snap1
"""
if not virsh.has_help_command('snapshot-create-as'):
raise error.TestNAError("This version of libvirt does not support "
"the snapshot-create-as test")
vm_name = params.get("main_vm")
status_error = params.get("status_error", "no")
options = params.get("snap_createas_opts")
multi_num = params.get("multi_num", "1")
diskspec_num = params.get("diskspec_num", "1")
bad_disk = params.get("bad_disk")
external_disk = params.get("external_disk")
start_ga = params.get("start_ga", "yes")
domain_state = params.get("domain_state")
memspec_opts = params.get("memspec_opts")
diskspec_opts = params.get("diskspec_opts")
opt_names = locals()
if memspec_opts is not None:
mem_options = compose_disk_options(test, params, memspec_opts)
# if the parameters have the disk without "file=" then we only need to
# add testdir for it.
if mem_options is None:
mem_options = os.path.join(test.virtdir, memspec_opts)
options += " --memspec " + mem_options
tag_diskspec = 0
dnum = int(diskspec_num)
if diskspec_opts is not None:
tag_diskspec = 1
opt_names['diskopts_1'] = diskspec_opts
# diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
if dnum > 1:
tag_diskspec = 1
for i in range(1, dnum + 1):
opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)
if tag_diskspec == 1:
for i in range(1, dnum + 1):
disk_options = compose_disk_options(test, params,
opt_names["diskopts_%s" % i])
options += " --diskspec " + disk_options
logging.debug("options are %s", options)
vm = env.get_vm(vm_name)
option_dict = {}
option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
logging.debug("option_dict is %s", option_dict)
# A backup of original vm
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
logging.debug("original xml is %s", vmxml_backup)
# Generate empty image for negative test
if bad_disk is not None:
bad_disk = os.path.join(test.virtdir, bad_disk)
os.open(bad_disk, os.O_RDWR | os.O_CREAT)
# Generate external disk
if external_disk is not None:
external_disk = os.path.join(test.virtdir, external_disk)
commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk)
try:
# Start qemu-ga on guest if have --quiesce
if options.find("quiesce") >= 0:
if vm.is_alive():
vm.destroy()
virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
virt_xml_obj.set_agent_channel(vm_name)
vm.start()
if start_ga == "yes":
session = vm.wait_for_login()
# Check if qemu-ga already started automatically
cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
stat_install = session.cmd_status(cmd, 300)
if stat_install != 0:
raise error.TestFail("Fail to install qemu-guest-agent, make"
"sure that you have usable repo in guest")
# Check if qemu-ga already started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
session.cmd("qemu-ga -d")
# Check if the qemu-ga really started
stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
if stat_ps != 0:
raise error.TestFail("Fail to run qemu-ga in guest")
if domain_state == "paused":
virsh.suspend(vm_name)
# Record the previous snapshot-list
snaps_before = virsh.snapshot_list(vm_name)
# Run virsh command
# May create several snapshots, according to configuration
for count in range(int(multi_num)):
cmd_result = virsh.snapshot_create_as(vm_name, options,
ignore_status=True, debug=True)
output = cmd_result.stdout.strip()
status = cmd_result.exit_status
# check status_error
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
else:
# Check memspec file should be removed if failed
if (options.find("memspec") >= 0
and options.find("atomic") >= 0):
if os.path.isfile(option_dict['memspec']):
os.remove(option_dict['memspec'])
raise error.TestFail("Run failed but file %s exist"
% option_dict['memspec'])
else:
logging.info("Run failed as expected and memspec file"
" already beed removed")
else:
logging.info("Run failed as expected")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command: %s"
% output)
else:
# Check the special options
snaps_list = virsh.snapshot_list(vm_name)
logging.debug("snaps_list is %s", snaps_list)
check_snapslist(vm_name, options, option_dict, output,
snaps_before, snaps_list)
finally:
# Environment clean
if options.find("quiesce") >= 0 and start_ga == "yes":
session.cmd("rpm -e qemu-guest-agent")
# recover domain xml
xml_recover(vmxml_backup)
path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
if os.path.isfile(path):
raise error.TestFail("Still can find snapshot metadata")
# rm bad disks
if bad_disk is not None:
os.remove(bad_disk)
| gpl-2.0 | -9,063,266,437,046,036,000 | 40.472973 | 83 | 0.507386 | false |
valsson/MD-MC-Codes-2016 | HarmonicOscillator-MD/HarmonicOscillator-MD-Verlet.py | 1 | 4262 | #! /usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from DataTools import writeDataToFile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--time-step',dest='time_step',required=False)
parser.add_argument('--output-file',dest='fn_out',required=False)
args = parser.parse_args()
# Parameters of potential
m = 1.0
k = (2.0*np.pi)**2
angular_freq = np.sqrt(k/m)
freq = angular_freq/(2.0*np.pi)
period = 1.0/freq
# MD Parameters
if(args.time_step):
time_step = np.float64(args.time_step)
else:
time_step = 0.01*period
if(args.fn_out):
fn_out = args.fn_out
else:
fn_out = 'results.data'
showPlots = False
#num_periods = 20
#num_steps = np.int(np.rint( (num_periods*period)/time_step ))
num_steps = 10000
# initial postion and velocity at t=0
initial_position = 2.0
initial_velocity = 0.0
def getPotentialEnergy(x):
potential_ener = 0.5*k*x**2
return potential_ener
#-------------------------------
def getForce(x):
force = -k*x
return force
#-------------------------------
def getAccleration(x):
return getForce(x)/m
#-------------------------------
def getPotentialAndForce(x):
return ( getPotentialEnergy(x), getForce(x) )
#-------------------------------
def getKineticEnergy(v):
kinetic_ener = 0.5*m*v**2
return kinetic_ener
#-------------------------------
def getTotalEnergy(x,v):
return getPotentialEnergy(x)+getKineticEnergy(v)
#-------------------------------
# analytical solution:
phi = np.arctan(-initial_velocity/(initial_position*angular_freq))
amplitude = initial_position/np.cos(phi)
conserved_energy = getPotentialEnergy(amplitude)
# ----------------------
times = []
positions = []
velocites = []
pot_energies = []
kin_energies = []
tot_energies = []
time = 0.0
curr_position = initial_position
prev_position = curr_position-initial_velocity*time_step + 0.5*getAccleration(curr_position)*time_step**2
curr_velocity = initial_velocity
for i in range(num_steps):
if (i+1) % (num_steps/10) == 0:
print 'MD step {0:6d} of {1:6d}'.format(i+1,num_steps)
# get force at t
accleration = getAccleration(curr_position)
# get new position at t+dt
new_position = 2.0*curr_position - prev_position + accleration*time_step**2
# get velocity at t
curr_velocity = (new_position - prev_position) / (2.0*time_step)
# get energies at t
curr_pot_ener = getPotentialEnergy(curr_position)
curr_kin_ener = getKineticEnergy(curr_velocity)
curr_tot_ener = curr_pot_ener + curr_kin_ener
#
times.append( time )
positions.append( curr_position )
velocites.append( curr_velocity )
pot_energies.append( curr_pot_ener )
kin_energies.append( curr_kin_ener )
tot_energies.append( curr_tot_ener )
#
prev_position = curr_position
curr_position = new_position
time += time_step
#
#----------------------------------------
times = np.array(times)
positions = np.array(positions)
velocites = np.array(velocites)
pot_energies = np.array(pot_energies)
kin_energies = np.array(kin_energies)
tot_energies = np.array(tot_energies)
positions_analytical = amplitude*np.cos(angular_freq*times+phi)
velocites_analytical = -angular_freq*amplitude*np.sin(angular_freq*times+phi)
writeDataToFile(fn_out,
[times,positions,velocites,pot_energies,kin_energies,tot_energies,positions_analytical,velocites_analytical],
['time','pos','vel','pot_ene','kin_ene','tot_ene','pos_an','vel_an'],
constantsNames=['time_step','period','amplitude','k','m','phi','conserved_energy'],
constantsValues=[time_step,period,amplitude,k,m,phi,conserved_energy],
dataFormat='%15.8f')
if showPlots:
plt.figure(1)
plt.plot(times,tot_energies)
plt.plot(times,pot_energies)
plt.plot(times,kin_energies)
plt.show()
plt.figure(2)
plt.plot(times,pot_energies)
plt.show()
plt.figure(3)
plt.plot(times,kin_energies)
plt.show()
plt.figure(4)
plt.plot(times,velocites)
plt.show()
plt.figure(5)
plt.plot(times,positions)
plt.plot(times,positions_analytical)
plt.show()
plt.figure(6)
plt.plot(times,positions-positions_analytical)
plt.show()
#
| mit | 8,504,365,888,325,456,000 | 26.320513 | 125 | 0.638667 | false |
OCA/business-requirement | business_requirement_sale/models/business_requirement.py | 1 | 1458 | # Copyright 2019 Tecnativa Victor M.M. Torres>
# Copyright 2019 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class BusinessRequirement(models.Model):
_inherit = 'business.requirement'
sale_order_ids = fields.One2many(
comodel_name='sale.order',
inverse_name='business_requirement_id',
string='Sales Orders',
)
sale_order_count = fields.Integer(
string='Sales Orders Count',
compute='_compute_sale_order_count',
)
@api.multi
@api.depends('sale_order_ids')
def _compute_sale_order_count(self):
groups = self.env['sale.order'].read_group(
domain=[('business_requirement_id', 'in', self.ids)],
fields=['business_requirement_id'],
groupby=['business_requirement_id'],
)
data = {
x['business_requirement_id'][0]: x['business_requirement_id_count']
for x in groups
}
for rec in self:
rec.sale_order_count = data.get(rec.id, 0)
@api.multi
def open_orders(self):
action = self.env.ref('sale.action_quotations').read()[0]
if len(self) == 1:
action['context'] = {
'search_default_business_requirement_id': self.id,
}
else:
action['domain'] = [('business_requirement_id', 'in', self.ids)],
return action
| agpl-3.0 | -5,606,639,854,425,939,000 | 31.4 | 79 | 0.584362 | false |
deepmind/open_spiel | open_spiel/python/algorithms/external_sampling_mccfr_test.py | 1 | 4567 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr
import pyspiel
SEED = 39823987
class ExternalSamplingMCCFRTest(absltest.TestCase):
def test_external_sampling_leduc_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
# ensure that to_tabular() works on the returned policy and
# the tabular policy is equivalent
tabular_policy = es_solver.average_policy().to_tabular()
conv2 = exploitability.nash_conv(game, tabular_policy)
self.assertEqual(conv, conv2)
def test_external_sampling_leduc_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
def test_external_sampling_kuhn_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
def test_external_sampling_kuhn_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
# Liar's dice takes too long, so disable this test. Leave code for reference.
# pylint: disable=g-unreachable-test-method
def disabled_test_external_sampling_liars_dice_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("liars_dice")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(1):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Liar's dice, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
if __name__ == "__main__":
absltest.main()
| apache-2.0 | 8,920,101,350,981,107,000 | 37.70339 | 79 | 0.708123 | false |
wbushey/santaclaus | tests/test_santa_v0.py | 1 | 1789 | # -*- coding: utf8 -*-
from __future__ import absolute_import, unicode_literals
import json
from santaclaus import db
from santaclaus.models import Person
from santa_test_helper import SantaClausTestCase
class SantaClausV0Test(SantaClausTestCase):
def test_request_with_a_name(self):
r = self.app.get('/?name=%s' % self.fake.name().replace(" ", "%20"))
self.assertEqual(r.status_code, 200)
self.assert_json_response(r)
data_js = json.loads(r.data)
self.assertTrue('name' in data_js)
self.assertTrue('status' in data_js)
def test_request_without_a_name(self):
r = self.app.get('/')
self.assertEqual(r.status_code, 400)
self.assert_json_response(r)
data_js = json.loads(r.data)
self.assertTrue('error' in data_js)
def test_valid_list_requests(self):
persons = {
'Naughty': [],
'Nice': []
}
for i in range(10):
p = Person(self.fake.name())
persons[p.status].append(p.name)
db.session.add(p)
db.session.commit()
naught_r = self.app.get('/lists/naughty')
self.assert_json_response(naught_r)
self.assertEqual(naught_r.status_code, 200)
nice_r = self.app.get('/lists/nice')
self.assert_json_response(nice_r)
self.assertEqual(nice_r.status_code, 200)
naughty_list = json.loads(naught_r.data)['list']
nice_list = json.loads(nice_r.data)['list']
naughty_list_complete = all(
name in naughty_list for name in persons['Naughty'])
nice_list_complete = all(
name in nice_list for name in persons['Nice'])
self.assertTrue(naughty_list_complete)
self.assertTrue(nice_list_complete)
| gpl-3.0 | -474,736,959,304,978,000 | 31.527273 | 76 | 0.604807 | false |
SGenheden/Scripts | Mol/parse_optq.py | 1 | 2134 | # Author: Samuel Genheden [email protected]
"""
Program to parse RESP charges and make Gromacs residue template file (.rtp)
Atoms in the PDB file need to be in the same order as in the charge file
The atom types file need to have an atomtype definition on each line
NAME1 TYPE1
NAME2 TYPE2
...
Used in membrane engineering project
Examples
--------
parse_optq.py -f model0_1.pdb -q qout -o model0.rtp -t atypes.txt
Make an rtp file based on model0_1 and qout
"""
import argparse
import parmed
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description="Script to parse optimal charges")
argparser.add_argument('-f','--file',help="the PDB file")
argparser.add_argument('-q','--qout',help="the output charges",default="qout")
argparser.add_argument('-o','--out',help="the output RTP file")
argparser.add_argument('-t','--types',help="a file with atom types")
args = argparser.parse_args()
struct = parmed.load_file(args.file)
qline = ""
with open(args.qout, "r") as f :
line = f.readline()
while line :
qline += line.strip() + " "
line = f.readline()
charges = map(float,qline.strip().split())
for atom, charge in zip(struct.atoms, charges) :
print "%4s%10.6f"%(atom.name, charge)
if args.out is not None :
atype = {}
with open(args.types, "r") as f :
for line in f.readlines() :
a, t = line.strip().split()
atype[a] = t
with open(args.out, "w") as f :
f.write("[ bondedtypes ]\n")
f.write("1 5 9 2 1 3 1 0\n\n")
f.write("[ UNK ]\n\n")
f.write("[ atoms ]\n")
for i, (atom, charge) in enumerate(zip(struct.atoms, charges)) :
f.write("%5s %6s %10.6f %3d\n"%(atom.name,
atype[atom.name], charge, i))
f.write("\n[ bonds ]\n")
for bond in struct.bonds :
f.write("%5s %5s\n"%(bond.atom1.name, bond.atom2.name))
f.write("\n")
| mit | 8,400,268,021,729,467,000 | 32.34375 | 90 | 0.559044 | false |
joliva/wiki-appengine | main.py | 1 | 12161 | #!/usr/bin/env python
import cgi, re, os, logging, string
import hmac, random
from datetime import datetime
import webapp2, jinja2
from google.appengine.ext import db
from google.appengine.api import memcache
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=False)
UNAME_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
UPASS_RE = re.compile(r"^.{3,20}$")
UEMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
COOKIE_SALT = 'KISSMYGRITS'
def valid_username(username):
return UNAME_RE.match(username)
def valid_password(password):
return UPASS_RE.match(password)
def valid_email(email):
return email == "" or UEMAIL_RE.match(email)
def make_salt():
# salt will be a random six character string
return ''.join([chr(random.randint(97,122)) for idx in xrange(6)])
def make_password_hash(password):
if password:
salt = make_salt()
return hmac.new(salt, password).hexdigest() + ('|%s' % salt)
else:
return None
class WikiUsers(db.Model):
username = db.StringProperty(required = True)
password_hash = db.StringProperty(required = True)
email = db.StringProperty()
created = db.DateTimeProperty(auto_now_add = True)
@staticmethod
def get_user(username):
user = None
if username:
qry = "SELECT * FROM WikiUsers WHERE username = '%s'" % username
#logging.info('query = %s', qry)
user = db.GqlQuery(qry).get()
return user
@staticmethod
def create_user(user):
# assumes properties of user were previously validated
if user:
user = WikiUsers(**user)
key = user.put()
class WikiEntry(db.Model):
name = db.StringProperty(required = True, indexed = True)
content = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True, indexed = True)
class Handler(webapp2.RequestHandler):
def update_cache(self, name, value):
# store in cache
logging.info('insert %s into cache', name)
memcache.set(name, {'cached_time':datetime.now(), 'content':value})
def store(self, name, content):
# insert new wiki entry into datastore
p = WikiEntry(name = name, content=content)
key = p.put()
# update cache
self.update_cache(name, content)
def retrieve(self, name, id=None):
if id != None and id != '':
value = WikiEntry.get_by_id(int(id)).content
return {'cached_time':datetime.now(), 'content':value}
else:
# attempt first to get page from cache
value = memcache.get(name)
if value:
return value
else:
logging.info('%s is not in the cache', name)
# attempt to retrieve from database
query = "SELECT * FROM WikiEntry WHERE name='%s' ORDER BY created DESC LIMIT 1" % name
entry = db.GqlQuery(query).get()
if entry:
self.update_cache(name, entry.content)
value = memcache.get(name)
return value
else:
logging.info('%s is not in the DB', name)
return None
def retrieve_all(self, name):
# attempt to retrieve from database
query = "SELECT * FROM WikiEntry WHERE name='%s' ORDER BY created DESC" % name
entries = db.GqlQuery(query).fetch(100)
return entries
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def create_cookie(self, value):
# cookie format: value|salted hash
if value:
return '%s|' % value + hmac.new(COOKIE_SALT, value).hexdigest()
else:
return None
def store_cookie(self, key, value):
if key and value:
self.response.set_cookie(key, value=self.create_cookie(value), path='/')
def remove_cookie(self, key):
if key:
self.response.set_cookie(key, value='', path='/')
#self.response.delete_cookie(key)
def get_cookie(self, key):
# cookie format: value|salted hash
if key:
hashed_value = self.request.cookies.get(key)
if hashed_value:
value, salted_hash = hashed_value.split('|')
if hashed_value == ('%s|' % value) + hmac.new(COOKIE_SALT, value).hexdigest():
return value
return None
class Signup(Handler):
def get(self):
self.render('signup.html')
def post(self):
username = self.request.get("username")
password = self.request.get("password")
verify = self.request.get("verify")
email = self.request.get("email")
err_name=""
err_pass=""
err_vpass=""
err_email=""
err = False
if not valid_username(username):
err_name = "That's not a valid username."
err = True
if WikiUsers.get_user(username) != None:
err_name = "That user already exists"
err = True
if not valid_password(password):
password=""
verify=""
err_pass = "That's not a valid password."
err = True
elif verify != password:
password=""
verify=""
err_vpass = "Your passwords didn't match."
err = True
if not valid_email(email):
err_email = "That's not a valid email."
err = True
if err == True:
args = {"username":username, "password":password, "verify":verify, "email":email, "err_name":err_name, "err_pass":err_pass, "err_vpass":err_vpass, "err_email":err_email}
self.render('signup.html', **args)
else:
# save new user into DB
user = {}
user['username'] = username
user['password_hash'] = make_password_hash(password)
user['email'] = email
WikiUsers.create_user(user)
# save login session cookie
self.store_cookie('username', username)
self.redirect(FRONT_URL)
class Login(Handler):
def get(self):
self.render('login.html')
def post(self):
username = self.request.get("username")
password = self.request.get("password")
err = False
if username and password:
# validate login credentials
user = WikiUsers.get_user(username)
if user:
# password hash: hmac.new(salt, password).hexdigest() + '|' + salt
password_hash = user.password_hash.encode('ascii')
logging.info('password_hash = %s', password_hash)
hashval, salt = password_hash.split('|')
logging.info('hashval = %s salt=%s', hashval, salt)
if hashval == hmac.new(salt, password).hexdigest():
# save login session cookie
self.store_cookie('username', username)
self.redirect(FRONT_URL)
return
args = {"username":username, "password":password, "error":'Invalid Login'}
self.render('login.html', **args)
class Logout(Handler):
def get(self):
self.remove_cookie('username')
self.redirect(FRONT_URL)
class WikiPage(Handler):
def get(self, name):
if name == '': name = '_front'
logging.info('name=%s', name)
id = self.request.get('id')
# attempt to retrieve page from DB
value = self.retrieve(name, id)
if value == None:
# redirect to an edit page to create the new entry
logging.info('redirect to page to add new wiki topic: %s', BASE_EDIT + name)
self.redirect(BASE_EDIT + name)
else:
# display the page
now = datetime.now()
delta_secs = (now - value['cached_time']).seconds
if self.request.get('cause') == 'logoff':
self.remove_cookie('username')
self.redirect(BASE_URL + name) # reload page
# determine if user logged in to set header
username = self.get_cookie('username')
if username:
edit_link=BASE_EDIT + name
edit_status='edit'
edit_user_sep=' | '
hist_link=BASE_HIST + name
hist_status='history'
wiki_user='<%s>' % username
login_link=BASE_URL + name + '?cause=logoff'
login_status='logout'
login_signup_sep=''
signup_link=''
signup_status=''
else:
edit_link=BASE_URL + name
edit_status=''
edit_user_sep=''
hist_link=BASE_HIST + name
hist_status='history'
wiki_user=''
login_link=BASE_URL + '/login'
login_status='login'
login_signup_sep=' | '
signup_link=BASE_URL + '/signup'
signup_status='signup'
args = dict(topic=name,
content=value['content'],
cache_time=delta_secs,
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
hist_link=hist_link,
hist_status=hist_status,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status)
self.render('entry.html', **args)
class HistPage(Handler):
def get(self, name):
if self.request.get('cause') == 'logoff':
self.remove_cookie('username')
self.redirect(BASE_HIST + name) # reload page
# determine if user logged in to set header
username = self.get_cookie('username')
if username:
edit_link=BASE_EDIT + name
edit_status='edit'
edit_user_sep=''
wiki_user='<%s>' % username
login_link=BASE_HIST + name + '?cause=logoff'
login_status='logout'
login_signup_sep=''
signup_link=''
signup_status=''
else:
edit_link=BASE_URL + name
edit_status='view'
edit_user_sep=''
wiki_user=''
login_link=BASE_URL + '/login'
login_status='login'
login_signup_sep=' | '
signup_link=BASE_URL + '/signup'
signup_status='signup'
entries = self.retrieve_all(name)
args = dict(topic=name,
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status,
entries=entries)
self.render('history.html', **args)
class EditPage(Handler):
def get(self, name):
if self.request.get('cause') == 'logoff':
self.remove_cookie('username')
self.redirect(BASE_URL + name) # reload page
# determine if user logged in to set header
username = self.get_cookie('username')
if username:
edit_link=BASE_URL + name
edit_status='view'
edit_user_sep=''
wiki_user='<%s>' % username
login_link=BASE_URL + name + '?cause=logoff'
login_status='logout'
login_signup_sep=''
signup_link=''
signup_status=''
id = self.request.get('id')
# attempt to retrieve page from DB
value = self.retrieve(name, id)
if value:
content = value['content']
else:
content = ''
args = dict(topic=name,
content=content,
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status)
self.render('editentry.html', **args)
else:
edit_link=''
edit_status=''
edit_user_sep=''
wiki_user=''
login_link=BASE_URL + '/login'
login_status='login'
login_signup_sep=' | '
signup_link=BASE_URL + '/signup'
signup_status='signup'
args = dict(topic=name,
msg='Not Authorized to create topic if not logged in.',
edit_link=edit_link,
edit_status=edit_status,
edit_user_sep=edit_user_sep,
wiki_user=wiki_user,
login_link=login_link,
login_status=login_status,
login_signup_sep=login_signup_sep,
signup_link=signup_link,
signup_status=signup_status)
self.response.set_status(401)
self.render('unauthorized.html', **args)
def post(self, name):
# validate field
content = self.request.get('content')
# save into datastore and cache
self.store(name, content)
# redirect to entry permalink
self.redirect(BASE_URL + name)
class Flush(Handler):
def get(self):
memcache.flush_all()
BASE_URL = '/wiki'
FRONT_URL = BASE_URL + '/'
BASE_EDIT = BASE_URL + '/_edit'
BASE_HIST = BASE_URL + '/_history'
PAGE_RE = r'(/(?:[a-zA-Z0-9_-]+/?)*)'
routes = [
(BASE_URL + '/signup/?', Signup),
(BASE_URL + '/login/?', Login),
(BASE_URL + '/logout/?', Logout),
(BASE_URL + '/flush/?', Flush),
(BASE_EDIT + PAGE_RE + '/', EditPage),
(BASE_EDIT + PAGE_RE, EditPage),
(BASE_HIST + PAGE_RE + '/', HistPage),
(BASE_HIST + PAGE_RE, HistPage),
(BASE_URL + PAGE_RE + '/', WikiPage),
(BASE_URL + PAGE_RE, WikiPage)
]
app = webapp2.WSGIApplication(routes, debug=True)
| bsd-3-clause | 1,276,170,337,536,588,000 | 25.904867 | 172 | 0.649864 | false |
psychopy/psychopy | psychopy/hardware/forp.py | 1 | 6704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""fORP fibre optic (MR-compatible) response devices by CurrentDesigns:
http://www.curdes.com/
This class is only useful when the fORP is connected via the serial port.
If you're connecting via USB, just treat it like a standard keyboard.
E.g., use a Keyboard component, and typically listen for Allowed keys
``'1', '2', '3', '4', '5'``. Or use ``event.getKeys()``.
"""
# Jeremy Gray and Dan Grupe developed the asKeys and baud parameters
from __future__ import absolute_import, print_function
from builtins import object
from psychopy import logging, event
import sys
from collections import defaultdict
try:
import serial
except ImportError:
serial = False
BUTTON_BLUE = 1
BUTTON_YELLOW = 2
BUTTON_GREEN = 3
BUTTON_RED = 4
BUTTON_TRIGGER = 5
# Maps bit patterns to character codes
BUTTON_MAP = [
(0x01, BUTTON_BLUE),
(0x02, BUTTON_YELLOW),
(0x04, BUTTON_GREEN),
(0x08, BUTTON_RED),
(0x10, BUTTON_TRIGGER)]
class ButtonBox(object):
"""Serial line interface to the fORP MRI response box.
To use this object class, select the box use setting `serialPort`,
and connect the serial line. To emulate key presses with a serial
connection, use `getEvents(asKeys=True)` (e.g., to be able to use
a RatingScale object during scanning). Alternatively connect the USB
cable and use fORP to emulate a keyboard.
fORP sends characters at 800Hz, so you should check the buffer
frequently. Also note that the trigger event numpy the fORP is
typically extremely short (occurs for a single 800Hz epoch).
"""
def __init__(self, serialPort=1, baudrate=19200):
"""
:Parameters:
`serialPort` :
should be a number (where 1=COM1, ...)
`baud` :
the communication rate (baud), eg, 57600
"""
super(ButtonBox, self).__init__()
if not serial:
raise ImportError("The module serial is needed to connect to "
"fORP. On most systems this can be installed "
"with\n\t easy_install pyserial")
self.port = serial.Serial(serialPort - 1, baudrate=baudrate,
bytesize=8, parity='N', stopbits=1,
timeout=0.001)
if not self.port.isOpen():
self.port.open()
self.buttonStatus = defaultdict(bool) # Defaults to False
self.rawEvts = []
self.pressEvents = []
def clearBuffer(self):
"""Empty the input buffer of all characters"""
self.port.flushInput()
def clearStatus(self):
""" Resets the pressed statuses, so getEvents will return pressed
buttons, even if they were already pressed in the last call.
"""
for k in self.buttonStatus:
self.buttonStatus[k] = False
def getEvents(self, returnRaw=False, asKeys=False, allowRepeats=False):
"""Returns a list of unique events (one event per button pressed)
and also stores a copy of the full list of events since last
getEvents() (stored as ForpBox.rawEvts)
`returnRaw` :
return (not just store) the full event list
`asKeys` :
If True, will also emulate pyglet keyboard events, so that
button 1 will register as a keyboard event with value "1",
and as such will be detectable using `event.getKeys()`
`allowRepeats` :
If True, this will return pressed buttons even if they were held
down between calls to getEvents(). If the fORP is on the "Eprime"
setting, you will get a stream of button presses while a button is
held down. On the "Bitwise" setting, you will get a set of all
currently pressed buttons every time a button is pressed or
released.
This option might be useful if you think your participant may be
holding the button down before you start checking for presses.
"""
nToGet = self.port.inWaiting()
evtStr = self.port.read(nToGet)
self.rawEvts = []
self.pressEvents = []
if allowRepeats:
self.clearStatus()
# for each character convert to an ordinal int value (numpy the ascii
# chr)
for thisChr in evtStr:
pressCode = ord(thisChr)
self.rawEvts.append(pressCode)
decodedEvents = self._generateEvents(pressCode)
self.pressEvents += decodedEvents
if asKeys:
for code in decodedEvents:
event._onPygletKey(symbol=code, modifiers=0)
# better as: emulated='fORP_bbox_asKey', but need to
# adjust event._onPygletKey and the symbol conversion
# pyglet.window.key.symbol_string(symbol).lower()
# return the abbreviated list if necessary
if returnRaw:
return self.rawEvts
else:
return self.getUniqueEvents()
def _generateEvents(self, pressCode):
"""For a given button press, returns a list buttons that went from
unpressed to pressed.
Also flags any unpressed buttons as unpressed.
`pressCode` :
a number with a bit set for every button currently pressed.
"""
curStatuses = self.__class__._decodePress(pressCode)
pressEvents = []
for button, pressed in curStatuses:
if pressed and not self.buttonStatus[button]:
# We're transitioning to pressed...
pressEvents.append(button)
self.buttonStatus[button] = True
if not pressed:
self.buttonStatus[button] = False
return pressEvents
@classmethod
def _decodePress(kls, pressCode):
"""Returns a list of buttons and whether they're pressed, given a
character code.
`pressCode` :
A number with a bit set for every button currently pressed. Will
be between 0 and 31.
"""
return [(mapping[1], bool(mapping[0] & pressCode))
for mapping in BUTTON_MAP]
def getUniqueEvents(self, fullEvts=False):
"""Returns a Python set of the unique (unordered) events of either
a list given or the current rawEvts buffer
"""
if fullEvts:
return set(self.rawEvts)
return set(self.pressEvents)
| gpl-3.0 | -6,914,705,715,617,184,000 | 35.835165 | 79 | 0.616945 | false |
Thingee/cinder | cinder/common/config.py | 1 | 8272 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 NTT corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import socket
from oslo.config import cfg
from cinder.openstack.common.gettextutils import _
CONF = cfg.CONF
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
core_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for cinder-api'),
cfg.StrOpt('state_path',
default='/var/lib/cinder',
deprecated_name='pybasedir',
help="Top-level directory for maintaining cinder's state"), ]
debug_opts = [
]
CONF.register_cli_opts(core_opts)
CONF.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='IP address of this host'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance host name or IP'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance API servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_api_version',
default=1,
help='Version of the glance API to use'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.BoolOpt('glance_api_ssl_compression',
default=False,
help='Enables or disables negotiation of SSL layer '
'compression. In some cases disabling compression '
'can improve data throughput, such as when high '
'network bandwidth is available and you use '
'compressed image formats like qcow2.'),
cfg.IntOpt('glance_request_timeout',
default=None,
help='http/https timeout value for glance operations. If no '
'value (None) is supplied here, the glanceclient default '
'value is used.'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='The topic that scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='cinder-volume',
help='The topic that volume nodes listen on'),
cfg.StrOpt('backup_topic',
default='cinder-backup',
help='The topic that volume backup nodes listen on'),
cfg.BoolOpt('enable_v1_api',
default=True,
help=_("Deploy v1 of the Cinder API.")),
cfg.BoolOpt('enable_v2_api',
default=True,
help=_("Deploy v2 of the Cinder API.")),
cfg.BoolOpt('api_rate_limit',
default=True,
help='Enables or disables rate limit of the API.'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.contrib.'
'select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=['cinder.api.contrib.standard_extensions'],
help='osapi volume extension to load'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='Full class name for the Manager for volume'),
cfg.StrOpt('backup_manager',
default='cinder.backup.manager.BackupManager',
help='Full class name for the Manager for volume backup'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a host name, FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node'),
cfg.StrOpt('default_availability_zone',
default=None,
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '
'the default for new volumes.'),
cfg.StrOpt('default_volume_type',
default=None,
help='Default volume type to use'),
cfg.StrOpt('volume_usage_audit_period',
help='Time period for which to generate volume usages. '
'The options are hour, day, month, or year.'),
cfg.StrOpt('rootwrap_config',
default='/etc/cinder/rootwrap.conf',
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Enable monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for a service to be '
'considered up'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('backup_api_class',
default='cinder.backup.api.API',
help='The full class name of the volume backup API class'),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth. Supports noauth, keystone, '
'and deprecated.'),
cfg.ListOpt('enabled_backends',
default=None,
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help='Whether snapshots count against GigaByte quota'),
cfg.StrOpt('transfer_api_class',
default='cinder.transfer.api.API',
help='The full class name of the volume transfer API class'), ]
CONF.register_opts(global_opts)
| apache-2.0 | -4,396,400,754,648,289,000 | 40.777778 | 79 | 0.588975 | false |
nuchi/httpserver | httpserver.py | 1 | 1065 | #!/usr/bin/env python
import socket
from http_handler import Handler_thread
MAX_CONNECTIONS = 5
class HTTPserver(object):
def __init__(self, localOnly=False, port=80, max_connections=MAX_CONNECTIONS):
self.port = port
self.max_connections = max_connections
if localOnly:
self.hostname = '127.0.0.1'
else:
self.hostname = socket.gethostname()
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def serve(self):
self.server.bind((self.hostname, self.port))
self.server.listen(self.max_connections)
while True:
client_socket, address = self.server.accept()
ht = Handler_thread()
ht.daemon = True
ht.run(client_socket)
def close(self):
self.server.close()
def create_and_run_server(localOnly=True, port=8000):
new_server = HTTPserver(localOnly=localOnly, port=port)
try:
new_server.serve()
except KeyboardInterrupt:
print('\nClosing server.')
pass
finally:
new_server.close()
if __name__ == '__main__':
create_and_run_server() | mit | -7,448,993,895,145,337,000 | 24.380952 | 79 | 0.712676 | false |
elaeon/dsignature | creacion_firma/forms.py | 1 | 3487 | # -*- coding: utf-8 -*-
from django import forms
from django.forms import ModelForm
from creacion_firma.models import FirmarCertificado, NominaSubida, User
import datetime
class UserForm(forms.Form):
nombre = forms.CharField(max_length=150, widget=forms.TextInput(attrs={"style": "width: 400px"}))
correo_electronico = forms.EmailField(max_length=100)
password = forms.CharField(widget=forms.PasswordInput)
class FirmarCertificadoForm(ModelForm):
user = forms.ModelChoiceField(
queryset=User.objects.all().order_by("username"),
required=True)
class Meta:
model = FirmarCertificado
exclude = ('certificado',)
class SubirNominaForm(forms.Form):
anteriores = forms.ModelChoiceField(
queryset=NominaSubida.objects.filter(visible=True),
required=False)
nombre = forms.CharField(
max_length=50,
widget=forms.TextInput(attrs={"style": "width: 150px"}),
help_text="QNA, Reyes, etc",
required=False)
numero = forms.IntegerField(required=False)
year = forms.IntegerField(label=u"Año", required=False)
tipo = forms.ChoiceField(choices=(("ord", "Ordinaria"), ("ext", "Extraordinaria")), required=False)
pdf = forms.FileField()
xml = forms.FileField()
def clean(self):
cleaned_data = super(SubirNominaForm, self).clean()
anteriores_nomina = cleaned_data.get("anteriores")
nomina = cleaned_data.get("nombre")
if not (anteriores_nomina or nomina):
msg = "Elija un nombre o escriba uno"
self.add_error('anteriores', msg)
self.add_error('nombre', msg)
class SubirNominaXMLForm(forms.Form):
anteriores = forms.ModelChoiceField(
queryset=NominaSubida.objects.filter(visible=True),
required=False)
nombre = forms.CharField(
max_length=50,
widget=forms.TextInput(attrs={"style": "width: 150px"}),
help_text="QNA, Reyes, etc",
required=False)
numero = forms.IntegerField(required=False)
year = forms.IntegerField(label=u"Año", required=False)
tipo = forms.ChoiceField(choices=(("ord", "Ordinaria"), ("ext", "Extraordinaria")), required=False)
xml = forms.FileField()
def clean(self):
cleaned_data = super(SubirNominaXMLForm, self).clean()
anteriores_nomina = cleaned_data.get("anteriores")
nomina = cleaned_data.get("nombre")
if not (anteriores_nomina or nomina):
msg = "Elija un nombre o escriba uno"
self.add_error('anteriores', msg)
self.add_error('nombre', msg)
class LoginForm(forms.Form):
usuario = forms.CharField(max_length=150)
password = forms.CharField(max_length=32, widget=forms.PasswordInput)
class SelectYearForm(forms.Form):
year = forms.ChoiceField(label="Año", choices=((y, y) for y in xrange(2015, 2020)))
class FirmaOSinForm(forms.Form):
tipo = forms.ChoiceField(label="Tipo", choices=(("f", "firmado"), ("nf", "no firmado")))
class NominasFilterYear(forms.Form):
def __init__(self, *args, **kwargs):
if "year" in kwargs:
self.year = kwargs["year"]
del kwargs["year"]
else:
self.year = datetime.date.today().year
super(NominasFilterYear, self).__init__(*args, **kwargs)
self.fields['nomina'] = forms.ModelChoiceField(
queryset=NominaSubida.objects.filter(year=self.year).order_by("-numero", "nombre", "tipo")
)
| gpl-3.0 | -8,077,302,436,457,668,000 | 34.917526 | 103 | 0.650689 | false |
mdinacci/rtw | demos/proto2/src/proto2.py | 1 | 15023 | # -*- coding: utf-8-*-
"""
Author: Marco Dinacci <[email protected]>
Copyright © 2008-2009
"""
from pandac.PandaModules import *
loadPrcFile("../res/Config.prc")
#loadPrcFileData("", "want-directtools 1")
#loadPrcFileData("", "want-tk 1")
import direct.directbase.DirectStart
from direct.gui.OnscreenText import OnscreenText
from direct.directtools.DirectGeometry import LineNodePath
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
from direct.task.Task import Task
from mdlib.panda.entity import *
from mdlib.panda.core import AbstractScene, AbstractLogic, AbstractApplication
from mdlib.panda.data import GOM
from mdlib.panda.input import *
from mdlib.panda.utils import *
from mdlib.types import Types
import sys, math
#base.wireframeOn()
class Camera(object):
ZOOM = 30
TARGET_DISTANCE = 10
def __init__(self):
base.disableMouse()
base.camera.setPos(0,0,0)
def followTarget(self, target):
self.target = target
self.update()
def getPos(self):
return base.camera.getPos()
def zoomOut(self):
base.camera.setY(base.camera, - self.ZOOM)
def zoomIn(self):
base.camera.setY(base.camera, self.ZOOM)
def update(self):
base.camera.setPos(self.target.nodepath.getPos() - \
self.target.forward * self.TARGET_DISTANCE)
z = self.target.jumpZ
base.camera.setZ(self.target.nodepath.getZ() -z + 1)
pos = self.target.nodepath.getPos()
pos.setZ(pos.getZ() -z)
base.camera.lookAt(pos)
base.camera.setZ(self.target.nodepath.getZ() -z + 3)
HEIGHT_TRACK = 0.5
class GameLogic(AbstractLogic):
DUMMY_VALUE = -999
# the view is not really the view but just the scene for now.
def __init__(self, view):
super(GameLogic, self).__init__(view)
self.env = GOM.createEntity(environment_params)
self.view.addEntity(self.env)
self.track = GOM.createEntity(new_track_params)
self.track.nodepath.setCollideMask(BitMask32(1))
self.view.addEntity(self.track)
self.ball = GOM.createEntity(ball_params)
self.ball.nodepath.showTightBounds()
collSphere = self.ball.nodepath.find("**/ball")
collSphere.node().setIntoCollideMask(BitMask32(2))
collSphere.node().setFromCollideMask(BitMask32.allOff())
self.view.addEntity(self.ball)
self.player = GOM.createEntity(player_params)
self.player.nodepath.setPos(self.ball.nodepath.getPos())
self.player.nodepath.setQuat(self.track.nodepath,Quat(1,0,0,0))
self.ball.forward = Vec3(0,1,0)
self.view.addEntity(self.player)
# normally the view should create it
self.cam = Camera()
self.cam.followTarget(self.ball)
self.camGroundZ = -999
self.view.cam = self.cam
# HACK
self.view.player = self.player
self.view.ball = self.ball
self.view.track = self.track
self.lastTile = ""
self.tileType = "neutral"
self.lastTileType = "neutral"
self._setupCollisionDetection()
def update(self, task):
self.inputMgr.update()
return task.cont
def updatePhysics(self, task):
dt = globalClock.getDt()
if dt > .2: return task.cont
self.camGroundZ = self.DUMMY_VALUE
ballIsCollidingWithGround = False
# keep the collision node perpendicular to the track, this is necessary
# since the ball rolls all the time
self.ballCollNodeNp.setQuat(self.track.nodepath,Quat(1,0,0,0))
# check track collisions
# TODO must optimise this, no need to check the whole track,
# but only the current segment
self.picker.traverse(self.track.nodepath)
if self.pq.getNumEntries() > 0:
self.pq.sortEntries()
firstGroundContact = self.DUMMY_VALUE
firstTile = None
for i in range(self.pq.getNumEntries()):
entry = self.pq.getEntry(i)
z = entry.getSurfacePoint(render).getZ()
# check camera collision. There can be more than one
if entry.getFromNodePath() == self.cameraCollNodeNp:
if z > firstGroundContact:
firstGroundContact = z
firstTile = entry.getIntoNodePath()
# check ball's ray collision with ground
elif entry.getFromNodePath() == self.ballCollNodeNp:
np = entry.getIntoNodePath()
#print np
self.tileType = np.findAllTextures().getTexture(0).getName()
self.ball.RayGroundZ = z
ballIsCollidingWithGround = True
if entry != self.lastTile:
self.lastTile = entry
self.camGroundZ = firstGroundContact
if ballIsCollidingWithGround == False:
if self.ball.isJumping():
print "no ball-ground contact but jumping"
pass
else:
print "no ball-ground contact, losing"
self.ball.getLost()
self.view.gameIsAlive = False
return task.done # automatically stop the task
# check for rays colliding with the ball
self.picker.traverse(self.ball.nodepath)
if self.pq.getNumEntries() > 0:
self.pq.sortEntries()
if self.pq.getNumEntries() == 1:
entry = self.pq.getEntry(0)
if entry.getFromNodePath() == self.cameraCollNodeNp:
self.camBallZ = entry.getSurfacePoint(render).getZ()
else:
raise AssertionError("must always be 1")
#if self.camGroundZ > self.camBallZ:
# ground collision happened before ball collision, this means
# that the ball is descending a slope
# Get the row colliding with the cam's ray, get two rows after,
# set all of them transparent
# TODO store the rows in a list, as I have to set the transparency
# back to 0 after the ball has passed
#pass
#row = firstTile.getParent()
#row.setSa(0.8)
#row.setTransparency(TransparencyAttrib.MAlpha)
forward = self.view._rootNode.getRelativeVector(self.player.nodepath,
Vec3(0,1,0))
forward.setZ(0)
forward.normalize()
speedVec = forward * dt * self.ball.speed
self.ball.forward = forward
self.ball.speedVec = speedVec
self.player.nodepath.setPos(self.player.nodepath.getPos() + speedVec)
self.player.nodepath.setZ(self.ball.RayGroundZ + self.ball.jumpZ + \
self.ball.physics.radius + HEIGHT_TRACK)
# rotate the ball
self.ball.nodepath.setP(self.ball.nodepath.getP() -1 * dt * \
self.ball.speed * self.ball.spinningFactor)
# set the ball to the position of the controller node
self.ball.nodepath.setPos(self.player.nodepath.getPos())
# rotate the controller to follow the direction of the ball
self.player.nodepath.setH(self.ball.nodepath.getH())
return task.cont
def resetGame(self):
self.player.nodepath.setPos(Point3(12,7,.13))
self.ball.nodepath.setPos(Point3(12,7,.13))
self.ball.nodepath.setQuat(Quat(1,0,0,0))
self.view.gameIsAlive = True
def updateLogic(self, task):
# steer
if self.keyMap["right"] == True:
right = self.view._rootNode.getRelativeVector(self.player.nodepath,
Vec3(1,0,0))
if self.ball.speed > 0:
self.ball.turnRight()
if self.keyMap["left"] == True:
if self.ball.speed > 0:
self.ball.turnLeft()
if self.keyMap["forward"] == True:
self.ball.accelerate()
else:
self.ball.decelerate()
if self.keyMap["backward"] == True:
self.ball.brake()
if self.keyMap["jump"] == True:
self.ball.jump()
self.keyMap["jump"] = False
# special actions
if self.tileType == "neutral":
self.ball.neutral()
elif self.tileType == "jump":
if self.lastTileType != "jump":
self.ball.jump()
elif self.tileType == "accelerate":
self.ball.sprint()
elif self.tileType == "slow":
self.ball.slowDown()
self.lastTileType = self.tileType
if self.ball.speed < 0:
self.ball.speed = 0
return task.cont
def setKey(self, key, value):
self.keyMap[key] = value
def debugPosition(self):
for text in aspect2d.findAllMatches("**/text").asList():
text.getParent().removeNode()
OnscreenText(text="Camera's Ray-Ball: %s" % self.camBallZ,
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.45), scale = .07)
OnscreenText(text="Camera's Ray-Ground : %s" % self.camGroundZ,
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.55), scale = .07)
OnscreenText(text="Camera: %s" % base.camera.getZ(),
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.65), scale = .07)
OnscreenText(text="Ball ray-plane: %s" % self.ball.RayGroundZ,
style=1, fg=(1,1,1,1),
pos=(-0.9,-0.75), scale = .07)
def _setupCollisionDetection(self):
self.pq = CollisionHandlerQueue();
# ball-ground collision setup
self.ballCollNodeNp = self.ball.nodepath.attachCollisionRay("ball-ground",
0,0,10, # origin
0,0,-1, # direction
BitMask32(1),BitMask32.allOff())
self.ballCollNodeNp.setQuat(self.track.nodepath, Quat(1,0,0,0))
self.ballCollNodeNp.show()
# camera-ball collision setup
bmFrom = BitMask32(1); bmFrom.setBit(1)
self.cameraCollNodeNp = base.camera.attachCollisionRay("camera-ball",
0,0,0,
0,1,0,
bmFrom,BitMask32.allOff())
self.cameraCollNodeNp.setQuat(base.camera.getQuat() + Quat(.1,0,0,0))
self.cameraCollNodeNp.show()
self.picker = CollisionTraverser()
self.picker.setRespectPrevTransform(True)
self.picker.addCollider(self.ballCollNodeNp, self.pq)
self.picker.addCollider(self.cameraCollNodeNp, self.pq)
def _subscribeToEvents(self):
self.keyMap = {"left":False, "right":False, "forward":False, \
"backward":False, "jump": False}
self.inputMgr = InputManager(base)
self.inputMgr.createSchemeAndSwitch("game")
self.inputMgr.bindCallback("arrow_left", self.setKey, ["left",True], scheme="game")
self.inputMgr.bindCallback("arrow_right", self.setKey, ["right",True])
self.inputMgr.bindCallback("arrow_up", self.setKey, ["forward",True])
self.inputMgr.bindCallback("arrow_left-up", self.setKey, ["left",False])
self.inputMgr.bindCallback("arrow_right-up", self.setKey, ["right",False])
self.inputMgr.bindCallback("arrow_up-up", self.setKey, ["forward",False])
self.inputMgr.bindCallback("arrow_down", self.setKey, ["backward",True])
self.inputMgr.bindCallback("arrow_down-up", self.setKey, ["backward",False])
self.inputMgr.bindCallback("space", self.setKey, ["jump",True])
self.inputMgr.bindCallback("c", self.view.switchCamera)
self.inputMgr.bindCallback("d", self.debugPosition)
class World(AbstractScene):
def __init__(self):
super(World, self).__init__()
self.lines = render.attachNewNode("lines")
loader.loadModelCopy("models/misc/xyzAxis").reparentTo(render)
self.setSceneGraphNode(render)
#self._setupCollisionDetection()
self._setupLights()
self.gameIsAlive = True
def update(self, task):
#dt = globalClock.getDt()
#if dt > .2: return task.cont
if self.gameIsAlive:
self.cam.update()
self.lines.removeNode()
self.lines = render.attachNewNode("lines")
return task.cont
def switchCamera(self):
base.oobe()
def _setupLights(self):
lAttrib = LightAttrib.makeAllOff()
ambientLight = AmbientLight( "ambientLight" )
ambientLight.setColor( Vec4(.55, .55, .55, 1) )
lAttrib = lAttrib.addLight( ambientLight )
directionalLight = DirectionalLight( "directionalLight" )
directionalLight.setDirection( Vec3( 0, 0, -1 ) )
directionalLight.setColor( Vec4( 0.375, 0.375, 0.375, 1 ) )
directionalLight.setSpecularColor(Vec4(1,1,1,1))
lAttrib = lAttrib.addLight( directionalLight )
class GameApplication(AbstractApplication):
def _subscribeToEvents(self):
base.accept("escape", self.shutdown)
base.accept("r", self.restartGame)
def _createLogicAndView(self):
self.scene = World()
self.logic = GameLogic(self.scene)
def restartGame(self):
taskMgr.remove("update-input")
taskMgr.remove("update-logic")
taskMgr.remove("update-physics")
taskMgr.remove("update-scene")
self.logic.resetGame()
self.start()
def start(self):
taskMgr.add(self.logic.update, "update-input")
taskMgr.add(self.logic.updateLogic, "update-logic")
taskMgr.add(self.logic.updatePhysics, "update-physics")
taskMgr.add(self.scene.update, "update-scene")
def shutdown(self):
sys.exit()
# set a fixed frame rate
from pandac.PandaModules import ClockObject
FPS = 40
globalClock = ClockObject.getGlobalClock()
#globalClock.setMode(ClockObject.MLimited)
#globalClock.setFrameRate(FPS)
if __name__ == '__main__':
GameApplication().start()
run()
| mit | 6,073,786,822,308,098,000 | 35.28744 | 91 | 0.563174 | false |
dokterbob/django-shopkit | shopkit/price/advanced/__init__.py | 1 | 1338 | # Copyright (C) 2010-2011 Mathijs de Bruin <[email protected]>
#
# This file is part of django-shopkit.
#
# django-shopkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
The model structure in this extension is very preliminary. Ideally, one would
want all ones prices to reside in a single table.
One way to approach this would be using a private function `_get_valid` for
`PriceBase` subclasses and then implementing a `get_valid` in `PriceBase` which
calls the `_get_valid` functions for direct parent classes that inherit from
`PriceBase`. This could then be collapsed into a single QuerySet using Q objects.
But perhaps this is too complicated. Any comments welcomed.
""" | agpl-3.0 | -4,863,587,485,629,527,000 | 46.821429 | 81 | 0.773543 | false |
hlzz/dotfiles | graphics/cgal/Documentation/conversion_tools/markup_replacement.py | 1 | 1846 | #!/usr/bin/python2
#replace markup #, ## ,### by \section, \subsection, \subsubsection.
#anchor names are preserved and generated from the section name otherwise
#The script is not perfect and might miss some specific cases
from sys import argv
from os import path
import string
import re
anchors={}
def generate_anchor(chapter,text):
pattern = re.compile('[\W_]+')
words=text.split()
i=1;
res=chapter+pattern.sub('',words[0])
while len(res)<40 and i<len(words):
word=pattern.sub('',words[i])
res+=word
i+=1
if anchors.has_key(res):
anchors[res]+=1
res+="_"+str(anchors[res])
else:
anchors[res]=0
return res
f=file(argv[1])
regexp_line=re.compile('^\s*#')
#~ regexp_section=re.compile('^\s*#\s*([ a-b().,]+)\s*#(.*)')
regexp_section=re.compile('^\s*(#+)\s*([0-9a-zA-Z (),.:?%-`\']+[0-9a-zA-Z.?`)])\s*#+(.*)')
regexp_anchor=re.compile('^\s*{#([0-9a-zA-Z_]+)}')
result=""
diff=False
chapter=path.abspath(argv[1]).split('/')[-2]
for line in f.readlines():
if regexp_line.match(line):
m=regexp_section.search(line)
if m:
values=m.groups()
anchor=''
if len(values)==2:
anchor=generate_anchor(chapter,values[1])
else:
anchor=regexp_anchor.match(values[2])
if anchor:
anchor=anchor.group(1)
else:
anchor=generate_anchor(chapter,values[1])
if len(values[0])==1:
result+="\section "+anchor+" "+values[1]+"\n"
elif len(values[0])==2:
result+="\subsection "+anchor+" "+values[1]+"\n"
elif len(values[0])==3:
result+="\subsubsection "+anchor+" "+values[1]+"\n"
else:
print "Error while processing "+argv[1]
assert False
diff=True
else:
result+=line
else:
result+=line
f.close()
if diff:
f=file(argv[1],'w')
f.write(result)
f.close()
| bsd-3-clause | -7,124,903,640,389,768,000 | 24.638889 | 90 | 0.591008 | false |
crosslinks/XlinkAnalyzer | pytests/XlaGuiTests.py | 1 | 2262 | import chimera
import unittest
from os import path
import xlinkanalyzer
from xlinkanalyzer import gui
RUNME = False
description = "Base classes for testing gui"
class XlaBaseTest(unittest.TestCase):
def setUp(self, mPaths, cPath):
mPath = xlinkanalyzer.__path__[0]
xlaTestPath = path.join(path.split(mPath)[0], 'pytests/test_data')
self.xlaTestMPaths = [path.join(xlaTestPath, _path) for _path in mPaths]
self.xlaTestCPath = path.join(xlaTestPath, cPath)
[chimera.openModels.open(_path) for _path in self.xlaTestMPaths]
self.models = chimera.openModels.list()
gui.show_dialog()
guiWin = xlinkanalyzer.get_gui()
guiWin.configFrame.resMngr.loadAssembly(guiWin, self.xlaTestCPath)
guiWin.configFrame.clear()
guiWin.configFrame.update()
guiWin.configFrame.mainWindow.setTitle(guiWin.configFrame.config.file)
guiWin.configFrame.config.state = "unchanged"
self.config = guiWin.configFrame.config
class TestLoadFromStructure(unittest.TestCase):
def setUp(self, mPaths):
mPath = xlinkanalyzer.__path__[0]
xlaTestPath = path.join(path.split(mPath)[0], 'pytests/test_data')
self.xlaTestMPaths = [path.join(xlaTestPath, _path) for _path in mPaths]
[chimera.openModels.open(_path) for _path in self.xlaTestMPaths]
self.models = chimera.openModels.list()
gui.show_dialog()
guiWin = xlinkanalyzer.get_gui()
guiWin.configFrame.resMngr.config.loadFromStructure(self.models[-1])
guiWin.configFrame.clear()
guiWin.configFrame.update()
guiWin.configFrame.config.state = "changed"
self.config = guiWin.configFrame.config
class XlaJustOpenXlaTest(unittest.TestCase):
def setUp(self, mPaths, cPath):
mPath = xlinkanalyzer.__path__[0]
xlaTestPath = path.join(path.split(mPath)[0], 'pytests/test_data')
self.xlaTestMPaths = [path.join(xlaTestPath, _path) for _path in mPaths]
self.xlaTestCPath = path.join(xlaTestPath, cPath)
[chimera.openModels.open(_path) for _path in self.xlaTestMPaths]
self.models = chimera.openModels.list()
gui.show_dialog()
guiWin = xlinkanalyzer.get_gui() | gpl-2.0 | -6,684,249,693,731,408,000 | 32.279412 | 80 | 0.679487 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.