code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
##############################################################################
#
# Mandate module for openERP
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# @author: Cyril Sester <[email protected]>,
# Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class payment_line(orm.Model):
_inherit = 'payment.line'
_columns = {
'account_invoice_id': fields.many2one(
'account.invoice',
copy=False),
'invoice_amount': fields.related(
'move_line_id', 'amount_residual', type='float', string='Total Amount',
readonly=True,
copy=False),
'partner_ref': fields.related(
'partner_id', 'ref', type='string', string='partner ref',
readonly=True),
}
| noemis-fr/custom | account_banking_natixis_direct_debit/models/payment_line.py | Python | gpl-3.0 | 1,722 |
"""Geo sub-package of Mobile Security & Privacy Simulator.
This sub-package contains all geo-related code.""" | bhenne/MoSP | mosp/geo/__init__.py | Python | gpl-3.0 | 110 |
#!/usr/bin/python
#
# Compares vmstate information stored in JSON format, obtained from
# the -dump-vmstate QEMU command.
#
# Copyright 2014 Amit Shah <[email protected]>
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
# Count the number of errors found
taint = 0
def bump_taint():
global taint
# Ensure we don't wrap around or reset to 0 -- the shell only has
# an 8-bit return value.
if taint < 255:
taint = taint + 1
def check_fields_match(name, s_field, d_field):
if s_field == d_field:
return True
# Some fields changed names between qemu versions. This list
# is used to whitelist such changes in each section / description.
changed_names = {
'e1000': ['dev', 'parent_obj'],
'ehci': ['dev', 'pcidev'],
'I440FX': ['dev', 'parent_obj'],
'ich9_ahci': ['card', 'parent_obj'],
'ioh-3240-express-root-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'mch': ['d', 'parent_obj'],
'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
'pcnet': ['pci_dev', 'parent_obj'],
'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'],
'piix4_pm': ['dev', 'parent_obj', 'pci0_status',
'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]'],
'rtl8139': ['dev', 'parent_obj'],
'qxl': ['num_surfaces', 'ssd.num_surfaces'],
'usb-host': ['dev', 'parent_obj'],
'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'xhci': ['pci_dev', 'parent_obj'],
'xio3130-express-downstream-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj',
'br.dev.exp.aer_log',
'parent_obj.parent_obj.exp.aer_log'],
}
if not name in changed_names:
return False
if s_field in changed_names[name] and d_field in changed_names[name]:
return True
return False
def get_changed_sec_name(sec):
# Section names can change -- see commit 292b1634 for an example.
changes = {
"ICH9 LPC": "ICH9-LPC",
}
for item in changes:
if item == sec:
return changes[item]
if changes[item] == sec:
return item
return ""
def exists_in_substruct(fields, item):
# Some QEMU versions moved a few fields inside a substruct. This
# kept the on-wire format the same. This function checks if
# something got shifted inside a substruct. For example, the
# change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f
if not "Description" in fields:
return False
if not "Fields" in fields["Description"]:
return False
substruct_fields = fields["Description"]["Fields"]
if substruct_fields == []:
return False
return check_fields_match(fields["Description"]["name"],
substruct_fields[0]["field"], item)
def check_fields(src_fields, dest_fields, desc, sec):
# This function checks for all the fields in a section. If some
# fields got embedded into a substruct, this function will also
# attempt to check inside the substruct.
d_iter = iter(dest_fields)
s_iter = iter(src_fields)
# Using these lists as stacks to store previous value of s_iter
# and d_iter, so that when time comes to exit out of a substruct,
# we can go back one level up and continue from where we left off.
s_iter_list = []
d_iter_list = []
advance_src = True
advance_dest = True
while True:
if advance_src:
try:
s_item = s_iter.next()
except StopIteration:
if s_iter_list == []:
break
s_iter = s_iter_list.pop()
continue
else:
# We want to avoid advancing just once -- when entering a
# dest substruct, or when exiting one.
advance_src = True
if advance_dest:
try:
d_item = d_iter.next()
except StopIteration:
if d_iter_list == []:
# We were not in a substruct
print "Section \"" + sec + "\",",
print "Description " + "\"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "while dest has no further fields"
bump_taint()
break
d_iter = d_iter_list.pop()
advance_src = False
continue
else:
advance_dest = True
if not check_fields_match(desc, s_item["field"], d_item["field"]):
# Some fields were put in substructs, keeping the
# on-wire format the same, but breaking static tools
# like this one.
# First, check if dest has a new substruct.
if exists_in_substruct(d_item, s_item["field"]):
# listiterators don't have a prev() function, so we
# have to store our current location, descend into the
# substruct, and ensure we come out as if nothing
# happened when the substruct is over.
#
# Essentially we're opening the substructs that got
# added which didn't change the wire format.
d_iter_list.append(d_iter)
substruct_fields = d_item["Description"]["Fields"]
d_iter = iter(substruct_fields)
advance_src = False
continue
# Next, check if src has substruct that dest removed
# (can happen in backward migration: 2.0 -> 1.5)
if exists_in_substruct(s_item, d_item["field"]):
s_iter_list.append(s_iter)
substruct_fields = s_item["Description"]["Fields"]
s_iter = iter(substruct_fields)
advance_dest = False
continue
print "Section \"" + sec + "\",",
print "Description \"" + desc + "\":",
print "expected field \"" + s_item["field"] + "\",",
print "got \"" + d_item["field"] + "\"; skipping rest"
bump_taint()
break
check_version(s_item, d_item, sec, desc)
if not "Description" in s_item:
# Check size of this field only if it's not a VMSTRUCT entry
check_size(s_item, d_item, sec, desc, s_item["field"])
check_description_in_list(s_item, d_item, sec, desc)
def check_subsections(src_sub, dest_sub, desc, sec):
for s_item in src_sub:
found = False
for d_item in dest_sub:
if s_item["name"] != d_item["name"]:
continue
found = True
check_descriptions(s_item, d_item, sec)
if not found:
print "Section \"" + sec + "\", Description \"" + desc + "\":",
print "Subsection \"" + s_item["name"] + "\" not found"
bump_taint()
def check_description_in_list(s_item, d_item, sec, desc):
if not "Description" in s_item:
return
if not "Description" in d_item:
print "Section \"" + sec + "\", Description \"" + desc + "\",",
print "Field \"" + s_item["field"] + "\": missing description"
bump_taint()
return
check_descriptions(s_item["Description"], d_item["Description"], sec)
def check_descriptions(src_desc, dest_desc, sec):
check_version(src_desc, dest_desc, sec, src_desc["name"])
if not check_fields_match(sec, src_desc["name"], dest_desc["name"]):
print "Section \"" + sec + "\":",
print "Description \"" + src_desc["name"] + "\"",
print "missing, got \"" + dest_desc["name"] + "\" instead; skipping"
bump_taint()
return
for f in src_desc:
if not f in dest_desc:
print "Section \"" + sec + "\"",
print "Description \"" + src_desc["name"] + "\":",
print "Entry \"" + f + "\" missing"
bump_taint()
continue
if f == 'Fields':
check_fields(src_desc[f], dest_desc[f], src_desc["name"], sec)
if f == 'Subsections':
check_subsections(src_desc[f], dest_desc[f], src_desc["name"], sec)
def check_version(s, d, sec, desc=None):
if s["version_id"] > d["version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "version error:", s["version_id"], ">", d["version_id"]
bump_taint()
if not "minimum_version_id" in d:
return
if s["version_id"] < d["minimum_version_id"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\":",
print "minimum version error:", s["version_id"], "<",
print d["minimum_version_id"]
bump_taint()
def check_size(s, d, sec, desc=None, field=None):
if s["size"] != d["size"]:
print "Section \"" + sec + "\"",
if desc:
print "Description \"" + desc + "\"",
if field:
print "Field \"" + field + "\"",
print "size mismatch:", s["size"], ",", d["size"]
bump_taint()
def check_machine_type(s, d):
if s["Name"] != d["Name"]:
print "Warning: checking incompatible machine types:",
print "\"" + s["Name"] + "\", \"" + d["Name"] + "\""
return
def main():
help_text = "Parse JSON-formatted vmstate dumps from QEMU in files SRC and DEST. Checks whether migration from SRC to DEST QEMU versions would break based on the VMSTATE information contained within the JSON outputs. The JSON output is created from a QEMU invocation with the -dump-vmstate parameter and a filename argument to it. Other parameters to QEMU do not matter, except the -M (machine type) parameter."
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument('-s', '--src', type=file, required=True,
help='json dump from src qemu')
parser.add_argument('-d', '--dest', type=file, required=True,
help='json dump from dest qemu')
parser.add_argument('--reverse', required=False, default=False,
action='store_true',
help='reverse the direction')
args = parser.parse_args()
src_data = json.load(args.src)
dest_data = json.load(args.dest)
args.src.close()
args.dest.close()
if args.reverse:
temp = src_data
src_data = dest_data
dest_data = temp
for sec in src_data:
dest_sec = sec
if not dest_sec in dest_data:
# Either the section name got changed, or the section
# doesn't exist in dest.
dest_sec = get_changed_sec_name(sec)
if not dest_sec in dest_data:
print "Section \"" + sec + "\" does not exist in dest"
bump_taint()
continue
s = src_data[sec]
d = dest_data[dest_sec]
if sec == "vmschkmachine":
check_machine_type(s, d)
continue
check_version(s, d, sec)
for entry in s:
if not entry in d:
print "Section \"" + sec + "\": Entry \"" + entry + "\"",
print "missing"
bump_taint()
continue
if entry == "Description":
check_descriptions(s[entry], d[entry], sec)
return taint
if __name__ == '__main__':
sys.exit(main())
| mwhudson/qemu | scripts/vmstate-static-checker.py | Python | gpl-2.0 | 12,817 |
import pytest
from pages.treeherder import Treeherder
RESULTS = ['testfailed', 'busted', 'exception']
@pytest.fixture
def test_jobs(eleven_job_blobs, create_jobs):
for i, status in enumerate(RESULTS):
eleven_job_blobs[i]['job']['result'] = status
return create_jobs(eleven_job_blobs[0:len(RESULTS)])
@pytest.mark.parametrize('result', RESULTS)
def test_filter_jobs_by_failure_result(base_url, selenium, test_jobs, result):
page = Treeherder(selenium, base_url).open()
page.wait.until(lambda _: len(page.all_jobs) == len(test_jobs))
assert len(page.all_jobs) == len(RESULTS)
with page.filters_menu() as filters:
for result in RESULTS:
getattr(filters, 'toggle_{}_jobs'.format(result))()
assert len(page.all_jobs) == 0
with page.filters_menu() as filters:
getattr(filters, 'toggle_{}_jobs'.format(result))()
assert len(page.all_jobs) == 1
page.all_jobs[0].click()
assert page.details_panel.job_details.result == result
| edmorley/treeherder | tests/selenium/test_filter_jobs_by_failure_result.py | Python | mpl-2.0 | 1,003 |
# -*- coding: utf-8 -*-
from distutils.core import setup
from pyrobotics.BB import __version__
setup(name='pyRobotics',
version=__version__,
author='Adrián Revuelta Cuauhtli',
author_email='[email protected]',
url='http://bioroboticsunam.github.io/pyRobotics',
license='LICENSE.txt',
data_files=[('', ['README', 'LICENSE.txt'])],
description="A Python API to create modules that connect to our message-passing and shared varaibels hub 'BlackBoard'.",
packages=['pyrobotics'])
| BioRoboticsUNAM/pyRobotics | setup.py | Python | mit | 513 |
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import exec_statement
mpl_data_dir = exec_statement(
"import matplotlib; print(matplotlib._get_data_path())")
datas = [
(mpl_data_dir, ""),
]
| ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/hooks/hook-matplotlib.py | Python | gpl-3.0 | 591 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
class BaseRunner(object):
"""
The runner represents a connecting layer between the solver and the machine learning model.
Responsible for communicating with the model with a data batch: prepare, train, evaluate.
"""
def build_model(self):
"""
Builds and prepares a model. Method is not expected to return anything.
"""
raise NotImplementedError()
def init(self, **kwargs):
"""
Runs the model initializer.
"""
raise NotImplementedError()
def run_batch(self, batch_x, batch_y):
"""
Runs the training iteration for a batch of data. Method is not expected to return anything.
"""
raise NotImplementedError()
def evaluate(self, batch_x, batch_y):
"""
Evaluates the test result for a batch of data. Method should return the dictionary that contains
one (or all) of the following:
- batch accuracy (key 'accuracy')
- associated loss (key 'loss')
- any other computed data (key 'data')
"""
raise NotImplementedError()
def model_size(self):
"""
Returns the model size.
"""
raise NotImplementedError()
| maxim5/hyper-engine | hyperengine/model/base_runner.py | Python | apache-2.0 | 1,183 |
import unittest
import os
import sys
import shutil
sys.path.append('../lib/')
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import alchemy
from alchemy.schema import *
class TestAlchemy(unittest.TestCase):
def setUp(self):
# this basically resets our testing database
path = config.get('sqlite').get('path')
shutil.copyfile('{0}/alchemy.raw'.format(path), '{0}/test.db'.format(path))
def tearDown(self):
# we keep this to tidy up our database if it fails
session.close()
def test_raw_clean(self):
# add a Clean record to mark something against
asg0 = session.query(RawAssignee).limit(10)
asg1 = session.query(RawAssignee).limit(10).offset(10)
alchemy.match(asg0, session)
alchemy.match(asg1, session)
alchemy.match([asg0[0], asg1[0].assignee], session)
def test_match_all(self):
alchemy.match(session.query(RawAssignee), session)
def test_set_default(self):
# create two items
loc = session.query(RawLocation)
alchemy.match(loc, session)
alchemy.match(loc[0], session, {"city": u"Frisco", "state": u"Cali", "country": u"US", "longitude": 10.0, "latitude": 10.0})
self.assertEqual("Frisco, Cali, US", loc[0].location.address)
alchemy.match(loc[0], session, keepexisting=True)
self.assertEqual("Frisco, Cali, US", loc[0].location.address)
self.assertEqual(10.0, loc[0].location.latitude)
self.assertEqual(10.0, loc[0].location.longitude)
alchemy.match(loc[0], session)
self.assertEqual("Hong Kong, OH, US", loc[0].location.address)
self.assertEqual(10.0, loc[0].location.latitude)
self.assertEqual(10.0, loc[0].location.longitude)
alchemy.match(loc[0], session, {"city": u"Frisco"}, keepexisting=True)
self.assertEqual("Frisco, OH, US", loc[0].location.address)
self.assertEqual(10.0, loc[0].location.latitude)
self.assertEqual(10.0, loc[0].location.longitude)
def test_unmatch_asgloc(self):
loc = session.query(RawLocation).limit(20)
asg = session.query(RawAssignee).limit(20)
alchemy.match(asg, session)
alchemy.match(loc[0:5], session)
alchemy.match(loc[5:10], session)
alchemy.match(loc[10:15], session)
alchemy.match(loc[15:20], session)
clean = asg[0].assignee
alchemy.unmatch(asg[0], session)
self.assertEqual(None, asg[0].assignee)
self.assertEqual(19, len(clean.rawassignees))
self.assertEqual(19, len(clean.patents))
self.assertEqual(4, session.query(Location).count())
self.assertEqual(4, session.query(locationassignee).count())
clean = loc[0].location
self.assertEqual(5, len(clean.rawlocations))
alchemy.unmatch(loc[0], session)
self.assertEqual(4, len(clean.rawlocations))
alchemy.unmatch(loc[1], session)
self.assertEqual(3, len(clean.rawlocations))
alchemy.unmatch(loc[2:5], session)
self.assertEqual(None, loc[0].location)
self.assertEqual(3, session.query(Location).count())
self.assertEqual(3, session.query(locationassignee).count())
alchemy.unmatch(loc[5].location, session)
self.assertEqual(2, session.query(Location).count())
self.assertEqual(2, session.query(locationassignee).count())
alchemy.unmatch(asg[3:20], session)
alchemy.unmatch(loc[10].location, session)
self.assertEqual(1, session.query(Location).count())
self.assertEqual(0, session.query(locationassignee).count())
def test_unmatch_invloc(self):
loc = session.query(RawLocation).limit(20)
inv = session.query(RawInventor).limit(20)
alchemy.match(inv, session)
alchemy.match(loc[0:5], session)
alchemy.match(loc[5:10], session)
alchemy.match(loc[10:15], session)
alchemy.match(loc[15:20], session)
clean = inv[0].inventor
alchemy.unmatch(inv[0], session)
self.assertEqual(None, inv[0].inventor)
self.assertEqual(19, len(clean.rawinventors))
self.assertEqual(10, len(clean.patents))
self.assertEqual(4, session.query(Location).count())
self.assertEqual(4, session.query(locationinventor).count())
clean = loc[0].location
self.assertEqual(5, len(clean.rawlocations))
alchemy.unmatch(loc[0], session)
self.assertEqual(4, len(clean.rawlocations))
alchemy.unmatch(loc[1], session)
self.assertEqual(3, len(clean.rawlocations))
alchemy.unmatch(loc[2:5], session)
self.assertEqual(None, loc[0].location)
self.assertEqual(3, session.query(Location).count())
self.assertEqual(3, session.query(locationinventor).count())
clean = inv[5].inventor
alchemy.unmatch(inv[1], session)
self.assertEqual(None, inv[1].inventor)
self.assertEqual(18, len(clean.rawinventors))
# this patent is repeated
self.assertEqual(10, len(clean.patents))
alchemy.unmatch(inv[2], session)
self.assertEqual(None, inv[2].inventor)
self.assertEqual(17, len(clean.rawinventors))
self.assertEqual(9, len(clean.patents))
alchemy.unmatch(loc[5].location, session)
self.assertEqual(2, session.query(Location).count())
self.assertEqual(2, session.query(locationinventor).count())
alchemy.unmatch(inv[3:20], session)
alchemy.unmatch(loc[10].location, session)
self.assertEqual(1, session.query(Location).count())
self.assertEqual(0, session.query(locationinventor).count())
def test_unmatch_lawyer(self):
law = session.query(RawLawyer).limit(20)
alchemy.match(law, session)
alchemy.unmatch(law[0], session)
self.assertEqual(None, law[0].lawyer)
self.assertEqual(19, len(law[1].lawyer.rawlawyers))
self.assertEqual(14, len(law[1].lawyer.patents))
def test_assigneematch(self):
# blindly assume first 10 are the same
asg0 = session.query(RawAssignee).limit(10)
asg1 = session.query(RawAssignee).limit(10).offset(10)
asgs = session.query(Assignee)
alchemy.match(asg0, session)
alchemy.match(asg1, session)
# create two items
self.assertEqual(10, len(asg0[0].assignee.rawassignees))
self.assertEqual(10, len(asg1[0].assignee.rawassignees))
self.assertEqual(10, len(asg0[0].assignee.patents))
self.assertEqual(2, asgs.count())
self.assertEqual("CAFEPRESS.COM", asg0[0].assignee.organization)
# merge the assignees together
alchemy.match([asg0[0], asg1[0]], session)
self.assertEqual(20, len(asg0[0].assignee.rawassignees))
self.assertEqual(20, len(asg1[0].assignee.rawassignees))
self.assertEqual(20, len(asg0[0].assignee.patents))
self.assertEqual(1, asgs.count())
# override the default values provided
alchemy.match(asg0[0], session, {"organization": u"Kevin"})
self.assertEqual("Kevin", asg0[0].assignee.organization)
# determine the most common organization name
alchemy.match(session.query(RawAssignee).limit(40).all(), session)
self.assertEqual(40, len(asg1[0].assignee.rawassignees))
self.assertEqual("The Procter & Gamble Company", asg0[0].assignee.organization)
def test_inventormatch(self):
# blindly assume first 10 are the same
inv0 = session.query(RawInventor).limit(10)
inv1 = session.query(RawInventor).limit(10).offset(10)
invs = session.query(Inventor)
alchemy.match(inv0, session)
alchemy.match(inv1, session)
# create two items
self.assertEqual(10, len(inv0[0].inventor.rawinventors))
self.assertEqual(10, len(inv1[0].inventor.rawinventors))
self.assertEqual(2, invs.count())
self.assertEqual(6, len(inv0[0].inventor.patents))
self.assertEqual(5, len(inv1[0].inventor.patents))
self.assertEqual("David C. Mattison", inv0[0].inventor.name_full)
# merge the assignees together
alchemy.match([inv0[0], inv1[0]], session)
self.assertEqual(20, len(inv0[0].inventor.rawinventors))
self.assertEqual(20, len(inv1[0].inventor.rawinventors))
self.assertEqual(11, len(inv0[0].inventor.patents))
self.assertEqual(1, invs.count())
# override the default values provided
alchemy.match(inv0[0], session, {"name_first": u"Kevin", "name_last": u"Yu"})
self.assertEqual("Kevin Yu", inv0[0].inventor.name_full)
# determine the most common organization name
alchemy.match(session.query(RawInventor).all(), session)
self.assertEqual(137, len(inv1[0].inventor.rawinventors))
self.assertEqual("Robert Wang", inv0[0].inventor.name_full)
def test_lawyermatch(self):
# blindly assume first 10 are the same
law0 = session.query(RawLawyer).limit(10)
law1 = session.query(RawLawyer).limit(10).offset(10)
laws = session.query(Lawyer)
alchemy.match(law0, session)
alchemy.match(law1, session)
# create two items
self.assertEqual(10, len(law0[0].lawyer.rawlawyers))
self.assertEqual(10, len(law1[0].lawyer.rawlawyers))
self.assertEqual(2, laws.count())
self.assertEqual(7, len(law0[0].lawyer.patents))
self.assertEqual(9, len(law1[0].lawyer.patents))
self.assertEqual("Warner Norcross & Judd LLP", law0[0].lawyer.organization)
# merge the assignees together
alchemy.match([law0[0], law1[0]], session)
self.assertEqual(20, len(law0[0].lawyer.rawlawyers))
self.assertEqual(20, len(law1[0].lawyer.rawlawyers))
self.assertEqual(15, len(law0[0].lawyer.patents))
self.assertEqual(1, laws.count())
# override the default values provided
alchemy.match(law0[0], session, {"name_first": u"Devin", "name_last": u"Ko"})
self.assertEqual("Devin Ko", law0[0].lawyer.name_full)
# determine the most common organization name
alchemy.match(session.query(RawLawyer).all(), session)
self.assertEqual(57, len(law1[0].lawyer.rawlawyers))
self.assertEqual("Robert Robert Chuey", law0[0].lawyer.name_full)
def test_locationmatch(self):
# blindly assume first 10 are the same
loc0 = session.query(RawLocation).limit(10)
loc1 = session.query(RawLocation).limit(10).offset(10)
locs = session.query(Location)
alchemy.match(loc0, session)
alchemy.match(loc1, session)
# create two items
self.assertEqual(10, len(loc0[0].location.rawlocations))
self.assertEqual(10, len(loc1[0].location.rawlocations))
self.assertEqual(0, len(loc0[0].location.assignees))
self.assertEqual(0, len(loc0[0].location.inventors))
self.assertEqual(2, locs.count())
self.assertEqual("Hong Kong, MN, NL", loc0[0].location.address)
# merge the assignees together
alchemy.match([loc0[0], loc1[0]], session)
self.assertEqual(20, len(loc0[0].location.rawlocations))
self.assertEqual(20, len(loc1[0].location.rawlocations))
self.assertEqual(0, len(loc0[0].location.assignees))
self.assertEqual(0, len(loc0[0].location.inventors))
self.assertEqual(1, locs.count())
self.assertEqual("Hong Kong, MN, US", loc0[0].location.address)
self.assertEqual(None, loc0[0].location.latitude)
self.assertEqual(None, loc0[0].location.longitude)
# override the default values provided
alchemy.match(loc0[0], session, {"city": u"Frisco", "state": u"Cali", "country": u"US", "longitude": 10.0, "latitude": 10.0})
self.assertEqual("Frisco, Cali, US", loc0[0].location.address)
self.assertEqual(10.0, loc0[0].location.latitude)
self.assertEqual(10.0, loc0[0].location.longitude)
def test_assignee_location(self):
# insert an assignee first.
# then location. make sure links ok
asg = session.query(RawAssignee).limit(20)
loc = session.query(RawLocation).limit(40)
alchemy.match(asg[0:5], session)
alchemy.match(asg[5:10], session)
alchemy.match(asg[10:15], session)
alchemy.match(asg[15:20], session)
alchemy.match(loc[0:20], session)
alchemy.match(loc[20:40], session)
self.assertEqual(2, len(loc[19].location.assignees))
self.assertEqual(1, len(asg[4].assignee.locations))
self.assertEqual(2, len(asg[5].assignee.locations))
def test_inventor_location(self):
# insert an assignee first.
# then location. make sure links ok
inv = session.query(RawInventor).limit(20)
loc = session.query(RawLocation).limit(40)
alchemy.match(inv[0:5], session)
alchemy.match(inv[5:10], session)
alchemy.match(inv[10:15], session)
alchemy.match(inv[15:20], session)
alchemy.match(loc[0:20], session)
alchemy.match(loc[20:40], session)
self.assertEqual(1, len(inv[14].inventor.locations))
self.assertEqual(2, len(inv[15].inventor.locations))
self.assertEqual(4, len(loc[19].location.inventors))
self.assertEqual(1, len(loc[20].location.inventors))
def test_location_assignee(self):
asg = session.query(RawAssignee).limit(20)
loc = session.query(RawLocation).limit(40)
alchemy.match(loc[0:20], session)
alchemy.match(loc[20:40], session)
alchemy.match(asg[0:5], session)
alchemy.match(asg[5:10], session)
alchemy.match(asg[10:15], session)
alchemy.match(asg[15:20], session)
self.assertEqual(2, len(loc[19].location.assignees))
self.assertEqual(1, len(asg[4].assignee.locations))
self.assertEqual(2, len(asg[5].assignee.locations))
def test_location_inventor(self):
# insert an assignee first.
# then location. make sure links ok
inv = session.query(RawInventor).limit(20)
loc = session.query(RawLocation).limit(40)
alchemy.match(loc[0:20], session)
alchemy.match(loc[20:40], session)
alchemy.match(inv[0:5], session)
alchemy.match(inv[5:10], session)
alchemy.match(inv[10:15], session)
alchemy.match(inv[15:20], session)
self.assertEqual(1, len(inv[14].inventor.locations))
self.assertEqual(2, len(inv[15].inventor.locations))
self.assertEqual(4, len(loc[19].location.inventors))
self.assertEqual(1, len(loc[20].location.inventors))
if __name__ == '__main__':
config = alchemy.get_config()
session = alchemy.session
unittest.main()
| yngcan/patentprocessor | test/test_alchemy.py | Python | bsd-2-clause | 14,826 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
extentSelector.py
---------------------
Date : December 2010
Copyright : (C) 2010 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'December 2010'
__copyright__ = '(C) 2010, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import SIGNAL
from PyQt4.QtGui import QWidget, QColor
from qgis.core import QgsPoint, QgsRectangle, QGis
from qgis.gui import QgsMapTool, QgsMapToolEmitPoint, QgsRubberBand
from ui_extentSelector import Ui_GdalToolsExtentSelector as Ui_ExtentSelector
class GdalToolsExtentSelector(QWidget, Ui_ExtentSelector):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.canvas = None
self.tool = None
self.previousMapTool = None
self.isStarted = False
self.setupUi(self)
self.connect(self.x1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.x2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.y1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.y2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.btnEnable, SIGNAL("clicked()"), self.start)
def setCanvas(self, canvas):
self.canvas = canvas
self.tool = RectangleMapTool(self.canvas)
self.previousMapTool = self.canvas.mapTool()
self.connect(self.tool, SIGNAL("rectangleCreated()"), self.fillCoords)
self.connect(self.tool, SIGNAL("deactivated()"), self.pause)
def stop(self):
if not self.isStarted:
return
self.isStarted = False
self.btnEnable.setVisible(False)
self.tool.reset()
self.canvas.unsetMapTool(self.tool)
if self.previousMapTool != self.tool:
self.canvas.setMapTool(self.previousMapTool)
#self.coordsChanged()
self.emit(SIGNAL("selectionStopped()"))
def start(self):
prevMapTool = self.canvas.mapTool()
if prevMapTool != self.tool:
self.previousMapTool = prevMapTool
self.canvas.setMapTool(self.tool)
self.isStarted = True
self.btnEnable.setVisible(False)
self.coordsChanged()
self.emit(SIGNAL("selectionStarted()"))
def pause(self):
if not self.isStarted:
return
self.btnEnable.setVisible(True)
self.emit(SIGNAL("selectionPaused()"))
def setExtent(self, rect):
if self.tool.setRectangle(rect):
self.emit(SIGNAL("newExtentDefined()"))
def getExtent(self):
return self.tool.rectangle()
def isCoordsValid(self):
try:
QgsPoint(float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()))
QgsPoint(float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()))
except ValueError:
return False
return True
def coordsChanged(self):
rect = None
if self.isCoordsValid():
point1 = QgsPoint(float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()))
point2 = QgsPoint(float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()))
rect = QgsRectangle(point1, point2)
self.setExtent(rect)
def fillCoords(self):
rect = self.getExtent()
self.blockSignals(True)
if rect is not None:
self.x1CoordEdit.setText(unicode(rect.xMinimum()))
self.x2CoordEdit.setText(unicode(rect.xMaximum()))
self.y1CoordEdit.setText(unicode(rect.yMaximum()))
self.y2CoordEdit.setText(unicode(rect.yMinimum()))
else:
self.x1CoordEdit.clear()
self.x2CoordEdit.clear()
self.y1CoordEdit.clear()
self.y2CoordEdit.clear()
self.blockSignals(False)
self.emit(SIGNAL("newExtentDefined()"))
class RectangleMapTool(QgsMapToolEmitPoint):
def __init__(self, canvas):
self.canvas = canvas
QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = QgsRubberBand(self.canvas, QGis.Polygon)
self.rubberBand.setColor(QColor(255, 0, 0, 100))
self.rubberBand.setWidth(2)
self.reset()
def reset(self):
self.startPoint = self.endPoint = None
self.isEmittingPoint = False
self.rubberBand.reset(QGis.Polygon)
def canvasPressEvent(self, e):
self.startPoint = self.toMapCoordinates(e.pos())
self.endPoint = self.startPoint
self.isEmittingPoint = True
self.showRect(self.startPoint, self.endPoint)
def canvasReleaseEvent(self, e):
self.isEmittingPoint = False
#if self.rectangle() != None:
# self.emit( SIGNAL("rectangleCreated()") )
self.emit(SIGNAL("rectangleCreated()"))
def canvasMoveEvent(self, e):
if not self.isEmittingPoint:
return
self.endPoint = self.toMapCoordinates(e.pos())
self.showRect(self.startPoint, self.endPoint)
def showRect(self, startPoint, endPoint):
self.rubberBand.reset(QGis.Polygon)
if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y():
return
point1 = QgsPoint(startPoint.x(), startPoint.y())
point2 = QgsPoint(startPoint.x(), endPoint.y())
point3 = QgsPoint(endPoint.x(), endPoint.y())
point4 = QgsPoint(endPoint.x(), startPoint.y())
self.rubberBand.addPoint(point1, False)
self.rubberBand.addPoint(point2, False)
self.rubberBand.addPoint(point3, False)
self.rubberBand.addPoint(point4, True) # true to update canvas
self.rubberBand.show()
def rectangle(self):
if self.startPoint is None or self.endPoint is None:
return None
elif self.startPoint.x() == self.endPoint.x() or self.startPoint.y() == self.endPoint.y():
return None
return QgsRectangle(self.startPoint, self.endPoint)
def setRectangle(self, rect):
if rect == self.rectangle():
return False
if rect is None:
self.reset()
else:
self.startPoint = QgsPoint(rect.xMaximum(), rect.yMaximum())
self.endPoint = QgsPoint(rect.xMinimum(), rect.yMinimum())
self.showRect(self.startPoint, self.endPoint)
return True
def deactivate(self):
QgsMapTool.deactivate(self)
self.emit(SIGNAL("deactivated()"))
| sebastic/QGIS | python/plugins/GdalTools/tools/extentSelector.py | Python | gpl-2.0 | 7,376 |
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, func, Index, Integer, select, String, text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import column_property, relationship
from radar.auth.passwords import check_password_hash, generate_password_hash, get_password_hash_method
from radar.database import db
from radar.models.common import CreatedDateMixin, ModifiedDateMixin
from radar.models.logs import Log, log_changes
class UserCreatedUserMixin(object):
@declared_attr
def created_user_id(cls):
# Nullable as it is a self-reference
return Column(Integer, ForeignKey('users.id'), nullable=True)
@declared_attr
def created_user(cls):
return relationship(
'User',
primaryjoin="User.id == %s.created_user_id" % cls.__name__,
remote_side='User.id', post_update=True)
class UserModifiedUserMixin(object):
@declared_attr
def modified_user_id(cls):
# Nullable as it is a self-reference
return Column(Integer, ForeignKey('users.id'), nullable=True)
@declared_attr
def modified_user(cls):
return relationship(
'User',
primaryjoin="User.id == %s.modified_user_id" % cls.__name__,
remote_side='User.id', post_update=True)
@log_changes
class User(db.Model, UserCreatedUserMixin, UserModifiedUserMixin, CreatedDateMixin, ModifiedDateMixin):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
_username = Column('username', String, nullable=False)
_password = Column('password', String)
_email = Column('email', String)
first_name = Column(String)
last_name = Column(String)
telephone_number = Column(String)
is_admin = Column(Boolean, default=False, nullable=False, server_default=text('false'))
is_bot = Column(Boolean, default=False, nullable=False, server_default=text('false'))
is_enabled = Column(Boolean, default=True, nullable=False, server_default=text('true'))
reset_password_token = Column(String)
reset_password_date = Column(DateTime)
force_password_change = Column(Boolean, default=False, nullable=False, server_default=text('false'))
last_login_date = column_property(
select([func.max(Log.date)]).where(Log.user_id == id).where(Log.type == 'LOGIN')
)
last_active_date = column_property(
select([func.max(Log.date)]).where(Log.user_id == id)
)
@hybrid_property
def username(self):
return self._username
@username.setter
def username(self, username):
if username is not None:
username = username.lower()
self._username = username
@hybrid_property
def email(self):
return self._email
@email.setter
def email(self, email):
if email is not None:
email = email.lower()
self._email = email
@property
def groups(self):
return [x.group for x in self.group_users]
def password(self, value):
self.password_hash = generate_password_hash(value)
self.reset_password_token = None
password = property(None, password)
@property
def password_hash(self):
return self._password
@password_hash.setter
def password_hash(self, value):
self._password = value
def check_password(self, password):
return (
self.password_hash is not None and
check_password_hash(self.password_hash, password)
)
@property
def needs_password_rehash(self):
new_hash_method = get_password_hash_method()
if self.password_hash is None:
r = False
else:
current_hash_method = self.password_hash.split('$')[0]
r = current_hash_method != new_hash_method
return r
@property
def name(self):
if self.first_name and self.last_name:
return '{} {}'.format(self.first_name, self.last_name)
elif self.first_name:
return self.first_name
elif self.last_name:
return self.last_name
return None
@classmethod
def is_authenticated(cls):
return True
# Ensure usernames are unique
Index('users_username_idx', func.lower(User.username), unique=True)
class AnonymousUser(object):
@classmethod
def is_authenticated(cls):
return False
| renalreg/radar | radar/models/users.py | Python | agpl-3.0 | 4,448 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import os.path
import sys
from mojom import fileutil
from mojom.error import Error
fileutil.AddLocalRepoThirdPartyDirToModulePath()
from ply.lex import TOKEN
class LexError(Error):
"""Class for errors from the lexer."""
def __init__(self, filename, message, lineno):
Error.__init__(self, filename, message, lineno=lineno)
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Lexer(object):
def __init__(self, filename):
self.filename = filename
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
raise LexError(self.filename, msg, token.lineno)
##
## Reserved keywords
##
keywords = (
'HANDLE',
'IMPORT',
'MODULE',
'STRUCT',
'UNION',
'INTERFACE',
'ENUM',
'CONST',
'TRUE',
'FALSE',
'DEFAULT',
'ARRAY',
'MAP',
'ASSOCIATED',
'PENDING_REMOTE',
'PENDING_RECEIVER',
'PENDING_ASSOCIATED_REMOTE',
'PENDING_ASSOCIATED_RECEIVER',
)
keyword_map = {}
for keyword in keywords:
keyword_map[keyword.lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'NAME',
# Constants
'ORDINAL',
'INT_CONST_DEC',
'INT_CONST_HEX',
'FLOAT_CONST',
# String literals
'STRING_LITERAL',
# Operators
'MINUS',
'PLUS',
'QSTN',
# Assignment
'EQUALS',
# Request / response
'RESPONSE',
# Delimiters
'LPAREN',
'RPAREN', # ( )
'LBRACKET',
'RBRACKET', # [ ]
'LBRACE',
'RBRACE', # { }
'LANGLE',
'RANGLE', # < >
'SEMI', # ;
'COMMA',
'DOT' # , .
)
##
## Regexes for use in tokens
##
# valid C identifiers (K&R2: A.2.3)
identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
# integer constants (K&R2: A.2.5.1)
decimal_constant = '0|([1-9][0-9]*)'
hex_constant = hex_prefix + hex_digits
# Don't allow octal constants (even invalid octal).
octal_constant_disallowed = '0[0-9]+'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
decimal_escape = r"""(\d+)"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
escape_sequence = \
r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|""" + escape_sequence + ')'
string_literal = '"' + string_char + '*"'
bad_string_literal = '"' + string_char + '*' + bad_escape + string_char + '*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = \
'(((('+fractional_constant+')'+ \
exponent_part+'?)|([0-9]+'+exponent_part+')))'
# Ordinals
ordinal = r'@[0-9]+'
missing_ordinal_value = r'@'
# Don't allow ordinal values in octal (even invalid octal, like 09) or
# hexadecimal.
octal_or_hex_ordinal_disallowed = (
r'@((0[0-9]+)|(' + hex_prefix + hex_digits + '))')
##
## Rules for the normal state
##
t_ignore = ' \t\r'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# Operators
t_MINUS = r'-'
t_PLUS = r'\+'
t_QSTN = r'\?'
# =
t_EQUALS = r'='
# =>
t_RESPONSE = r'=>'
# Delimiters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LANGLE = r'<'
t_RANGLE = r'>'
t_COMMA = r','
t_DOT = r'\.'
t_SEMI = r';'
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(octal_constant_disallowed)
def t_OCTAL_CONSTANT_DISALLOWED(self, t):
msg = "Octal values not allowed"
self._error(msg, t)
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
# Handle ordinal-related tokens in the right order:
@TOKEN(octal_or_hex_ordinal_disallowed)
def t_OCTAL_OR_HEX_ORDINAL_DISALLOWED(self, t):
msg = "Octal and hexadecimal ordinal values not allowed"
self._error(msg, t)
@TOKEN(ordinal)
def t_ORDINAL(self, t):
return t
@TOKEN(missing_ordinal_value)
def t_BAD_ORDINAL(self, t):
msg = "Missing ordinal value"
self._error(msg, t)
@TOKEN(identifier)
def t_NAME(self, t):
t.type = self.keyword_map.get(t.value, "NAME")
return t
# Ignore C and C++ style comments
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
msg = "Illegal character %s" % repr(t.value[0])
self._error(msg, t)
| chromium/chromium | mojo/public/tools/mojom/mojom/parse/lexer.py | Python | bsd-3-clause | 5,808 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "greengov2015.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| jthidalgojr/greengov2015-TeamAqua | manage.py | Python | mit | 255 |
"""
progressbar.py
A Python module with a ProgressBar class which can be used to represent a
task's progress in the form of a progress bar and it can be formated in a
basic way.
Here is some basic usage with the default options:
>>> from progressbar import ProgressBar
>>> p = ProgressBar()
>>> print p
[>............] 0%
>>> p + 1
>>> print p
[=>...........] 10%
>>> p + 9
>>> print p
[============>] 0%
And here another example with different options:
>>> from progressbar import ProgressBar
>>> custom_options = {
... 'end': 100,
... 'width': 20,
... 'fill': '#',
... 'format': '%(progress)s%% [%(fill)s%(blank)s]'
... }
>>> p = ProgressBar(**custom_options)
>>> print p
0% [....................]
>>> p + 5
>>> print p
5% [#...................]
>>> p + 9
>>> print p
100% [####################]
"""
import sys
import time
class ProgressBar(object):
"""ProgressBar class holds the options of the progress bar.
The options are:
start State from which start the progress. For example, if start is
5 and the end is 10, the progress of this state is 50%
end State in which the progress has terminated.
width --
fill String to use for "filled" used to represent the progress
blank String to use for "filled" used to represent remaining space.
format Format
incremental
"""
def __init__(self, start=0, end=10, width=12, fill='=', blank='.', format='[%(fill)s>%(blank)s] %(progress)s%%', incremental=True):
super(ProgressBar, self).__init__()
self.start = start
self.end = end
self.width = width
self.fill = fill
self.blank = blank
self.format = format
self.incremental = incremental
self.reset()
def __add__(self, increment):
if self.end > self.progress + increment:
self.progress += increment
else:
self.progress = float(self.end)
return self
def __sub__(self, decrement):
if self.start < self.progress - decrement:
self.progress -= decrement
else:
self.progress = float(self.start)
return self
def __str__(self):
cur_width = int(self.progress / self.end * self.width)
fill = cur_width * self.fill
blank = (self.width - cur_width) * self.blank
percentage = int(self.progress / self.end * 100)
return self.format % {'fill': fill, 'blank': blank, 'progress': percentage}
__repr__ = __str__
def reset(self):
"""Resets the current progress to the start point"""
self.progress = float(self.start)
return self
class AnimatedProgressBar(ProgressBar):
"""Extends ProgressBar to allow you to use it straighforward on a script.
Accepts an extra keyword argument named `stdout` (by default use sys.stdout)
and may be any file-object to which send the progress status.
"""
def __init__(self, *args, **kwargs):
super(AnimatedProgressBar, self).__init__(*args, **kwargs)
self.stdout = kwargs.get('stdout', sys.stdout)
def show_progress(self):
if hasattr(self.stdout, 'isatty') and self.stdout.isatty():
self.stdout.write('\r')
else:
self.stdout.write('\n')
self.stdout.write(str(self))
self.stdout.flush()
if __name__ == '__main__':
p = AnimatedProgressBar(end=100, width=80)
while True:
p + 5
p.show_progress()
time.sleep(0.1)
if p.progress == 100:
break
print #new line
| RohanArora13/Hook | progressbar.py | Python | mit | 3,815 |
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for mesos manager
"""
import json
from cfgm_common.vnc_db import DBBase
from bitstring import BitArray
from vnc_api.vnc_api import (KeyValuePair)
from mesos_manager.vnc.vnc_mesos_config import VncMesosConfig as vnc_mesos_config
from mesos_manager.sandesh.mesos_introspect import ttypes as introspect
class DBBaseMM(DBBase):
obj_type = __name__
# Infra annotations that will be added on objects with custom annotations.
ann_fq_name_infra_key = ["project", "cluster", "owner"]
def __init__(self, uuid, obj_dict=None):
# By default there are no annotations added on an object.
self.ann_fq_name = None
@staticmethod
def get_infra_annotations():
"""Get infra annotations."""
annotations = {}
annotations['owner'] = vnc_mesos_config.cluster_owner()
annotations['cluster'] = vnc_mesos_config.cluster_name()
return annotations
@classmethod
def _get_annotations(cls, vnc_caller, name, mesos_type,
**custom_ann_kwargs):
"""Get all annotations.
Annotations are aggregated from multiple sources like infra info,
input params and custom annotations. This method is meant to be an
aggregator of all possible annotations.
"""
# Get annotations declared on the caller.
annotations = dict(vnc_caller.get_annotations())
# Update annotations with infra specific annotations.
infra_anns = cls.get_infra_annotations()
infra_anns['project'] = vnc_mesos_config.cluster_project_name()
annotations.update(infra_anns)
# Update annotations based on explicity input params.
input_anns = {}
input_anns['name'] = name
if mesos_type:
input_anns['kind'] = mesos_type
annotations.update(input_anns)
# Append other custom annotations.
annotations.update(custom_ann_kwargs)
return annotations
@classmethod
def add_annotations(cls, vnc_caller, obj, name, mesos_type=None,
**custom_ann_kwargs):
"""Add annotations on the input object.
Given an object, this method will add all required and specfied
annotations on that object.
"""
# Construct annotations to be added on the object.
annotations = cls._get_annotations(vnc_caller, name,
mesos_type, **custom_ann_kwargs)
# Validate that annotations have all the info to construct
# the annotations-based-fq-name as required by the object's db.
if hasattr(cls, 'ann_fq_name_key'):
if not set(cls.ann_fq_name_key).issubset(annotations):
err_msg = "Annotations required to contruct mesos_fq_name for"+\
" object (%s:%s) was not found in input keyword args." %\
(name)
raise Exception(err_msg)
# Annotate the object.
for ann_key, ann_value in annotations.iteritems():
obj.add_annotations(KeyValuePair(key=ann_key, value=ann_value))
@classmethod
def _update_fq_name_to_uuid(cls, uuid, obj_dict):
cls._fq_name_to_uuid[tuple(obj_dict['fq_name'])] = uuid
@classmethod
def get_fq_name_to_uuid(cls, fq_name):
return cls._fq_name_to_uuid.get(tuple(fq_name))
@classmethod
def _get_ann_fq_name_from_obj(cls, obj_dict):
"""Get the annotated fully qualified name from the object.
Annotated-fq-names are contructed from annotations found on the
object. The format of the fq-name is specified in the object's db
class. This method will construct the annoated-fq-name of the input
object.
"""
fq_name = None
if hasattr(cls, 'ann_fq_name_key'):
fq_name = []
fq_name_key = cls.ann_fq_name_infra_key + cls.ann_fq_name_key
if obj_dict.get('annotations') and\
obj_dict['annotations'].get('key_value_pair'):
kvps = obj_dict['annotations']['key_value_pair']
for elem in fq_name_key:
for kvp in kvps:
if kvp.get("key") != elem:
continue
fq_name.append(kvp.get("value"))
break
return fq_name
@classmethod
def _get_ann_fq_name_from_params(cls, **kwargs):
"""Construct annotated fully qualified name using input params."""
fq_name = []
fq_name_key = cls.ann_fq_name_infra_key + cls.ann_fq_name_key
for elem in fq_name_key:
for key, value in kwargs.iteritems():
if key != elem:
continue
fq_name.append(value)
break
return fq_name
@classmethod
def get_ann_fq_name_to_uuid(cls, vnc_caller, name,
mesos_type=None, **kwargs):
"""Get vnc object uuid corresponding to an annotated-fq-name.
The annotated-fq-name is constructed from the input params given
by the caller.
"""
# Construct annotations based on input params.
annotations = cls._get_annotations(vnc_caller, name,
mesos_type, **kwargs)
# Validate that annoatations has all info required for construction
# of annotated-fq-name.
if hasattr(cls, 'ann_fq_name_key'):
if not set(cls.ann_fq_name_key).issubset(annotations):
err_msg = "Annotations required to contruct mesos_fq_name for"+\
" object (%s:%s) was not found in input keyword args." %\
(name)
raise Exception(err_msg)
# Lookup annnoated-fq-name in annotated-fq-name to uuid table.
return cls._ann_fq_name_to_uuid.get(
tuple(cls._get_ann_fq_name_from_params(**annotations)))
@classmethod
def _update_ann_fq_name_to_uuid(cls, uuid, ann_fq_name):
cls._ann_fq_name_to_uuid[tuple(ann_fq_name)] = uuid
def build_fq_name_to_uuid(self, uuid, obj_dict):
"""Populate uuid in all tables tracking uuid."""
if not obj_dict:
return
# Update annotated-fq-name to uuid table.
self.ann_fq_name = self._get_ann_fq_name_from_obj(obj_dict)
if self.ann_fq_name:
self._update_ann_fq_name_to_uuid(uuid, self.ann_fq_name)
# Update vnc fq-name to uuid table.
self._update_fq_name_to_uuid(uuid, obj_dict)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.ann_fq_name:
if tuple(obj.ann_fq_name) in cls._ann_fq_name_to_uuid:
del cls._ann_fq_name_to_uuid[tuple(obj.ann_fq_name)]
if tuple(obj.fq_name) in cls._fq_name_to_uuid:
del cls._fq_name_to_uuid[tuple(obj.fq_name)]
def evaluate(self):
# Implement in the derived class
pass
@classmethod
def objects(cls):
# Get all vnc objects of this class.
return cls._dict.values()
@staticmethod
def _build_annotation_dict(annotation_dict):
return {str(annot['key']): str(annot['value'])
for annot
in annotation_dict['key_value_pair']} \
if annotation_dict and annotation_dict.get('key_value_pair') \
else {}
@staticmethod
def _build_string_dict(src_dict):
dst_dict = {}
if src_dict:
for key, value in src_dict.iteritems():
dst_dict[str(key)] = str(value)
return dst_dict
@staticmethod
def _build_cls_uuid_list(cls, collection):
return [cls(str(list(collection)[i]))
for i in xrange(len(collection))] \
if collection else []
class VirtualMachineMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_machine'
_ann_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.owner = None
self.cluster = None
self.virtual_router = None
self.virtual_machine_interfaces = set()
self.pod_labels = None
self.pod_node = None
self.node_ip = None
super(VirtualMachineMM, self).__init__(uuid, obj_dict)
obj_dict = self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
if not obj:
return
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
if self.annotations:
for kvp in self.annotations['key_value_pair'] or []:
if kvp['key'] == 'owner':
self.owner = kvp['value']
elif kvp['key'] == 'cluster':
self.cluster = kvp['value']
elif kvp['key'] == 'labels':
self.pod_labels = json.loads(kvp['value'])
self.update_single_ref('virtual_router', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('virtual_router', {})
obj.update_multiple_refs('virtual_machine_interface', {})
super(VirtualMachineMM, cls).delete(uuid)
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Machine DB lookup/introspect request. """
vm_resp = introspect.VirtualMachineDatabaseListResp(vms=[])
# Iterate through all elements of Virtual Machine DB.
for vm in VirtualMachineMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vm_uuid and req.vm_uuid != vm.uuid:
continue
vm_annotations = cls._build_annotation_dict(vm.annotations)
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, vm.virtual_machine_interfaces)
vr = introspect.VRUuid(vr_uuid=str(vm.virtual_router)) \
if vm.virtual_router else None
# Construct response for an element.
vm_instance = introspect.VirtualMachineInstance(
uuid=vm.uuid,
name=vm.name,
cluster=vm.cluster,
annotations=vm_annotations,
owner=vm.owner,
node_ip=str(vm.node_ip),
pod_node=vm.pod_node,
pod_labels=vm.pod_labels,
vm_interfaces=vmis,
vrouter_uuid=vr)
# Append the constructed element info to the response.
vm_resp.vms.append(vm_instance)
# Send the reply out.
vm_resp.response(req.context())
class VirtualRouterMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_router'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
_ip_addr_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(VirtualRouterMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.virtual_machines = set()
self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
self.update_multiple_refs('virtual_machine', obj)
self.virtual_router_ip_address = obj.get('virtual_router_ip_address')
if self.virtual_router_ip_address:
self.build_ip_addr_to_uuid(
self.uuid, self.virtual_router_ip_address)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine', {})
del cls._dict[uuid]
@classmethod
def build_ip_addr_to_uuid(cls, uuid, ip_addr):
cls._ip_addr_to_uuid[tuple(ip_addr)] = uuid
@classmethod
def get_ip_addr_to_uuid(cls, ip_addr):
return cls._ip_addr_to_uuid.get(tuple(ip_addr))
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Router DB lookup/introspect request. """
vr_resp = introspect.VirtualRouterDatabaseListResp(vrs=[])
# Iterate through all elements of Virtual Router DB.
for vr in VirtualRouterMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vr_uuid and req.vr_uuid != vr.uuid:
continue
vr_annotations = cls._build_annotation_dict(vr.annotations)
vms = cls._build_cls_uuid_list(
introspect.VMUuid, vr.virtual_machines)
# Construct response for an element.
vr_instance = introspect.VirtualRouterInstance(
uuid=vr.uuid,
name=vr.fq_name[-1],
fq_name=vr.fq_name,
annotations=vr_annotations,
virtual_machines=vms)
# Append the constructed element info to the response.
vr_resp.vrs.append(vr_instance)
# Send the reply out.
vr_resp.response(req.context())
class VirtualMachineInterfaceMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_machine_interface'
_ann_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(VirtualMachineInterfaceMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.host_id = None
self.virtual_network = None
self.virtual_machine = None
self.instance_ips = set()
self.floating_ips = set()
self.virtual_machine_interfaces = set()
self.security_groups = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
# Cache bindings on this VMI.
if obj.get('virtual_machine_interface_bindings', None):
bindings = obj['virtual_machine_interface_bindings']
kvps = bindings.get('key_value_pair', None)
for kvp in kvps or []:
if kvp['key'] == 'host_id':
self.host_id = kvp['value']
self.update_multiple_refs('instance_ip', obj)
self.update_multiple_refs('floating_ip', obj)
self.update_single_ref('virtual_network', obj)
self.update_single_ref('virtual_machine', obj)
self.update_multiple_refs('security_group', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('instance_ip', {})
obj.update_multiple_refs('floating_ip', {})
obj.update_single_ref('virtual_network', {})
obj.update_single_ref('virtual_machine', {})
obj.update_multiple_refs('security_group', {})
obj.update_multiple_refs('virtual_machine_interface', {})
obj.remove_from_parent()
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Machine Interface DB lookup/introspect request. """
vmi_resp = introspect.VirtualMachineInterfaceDatabaseListResp(vmis=[])
# Iterate through all elements of Virtual Router DB.
for vmi in VirtualMachineInterfaceMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vmi_uuid and req.vmi_uuid != vmi.uuid:
continue
vmi_annotations = cls._build_annotation_dict(vmi.annotations)
fips = cls._build_cls_uuid_list(
introspect.FIPUuid, vmi.floating_ips)
sgs = cls._build_cls_uuid_list(
introspect.SGUuid, vmi.security_groups)
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, vmi.virtual_machine_interfaces)
# Construct response for an element.
vmi_instance = introspect.VirtualMachineInterfaceInstance(
uuid=vmi.uuid,
name=vmi.fq_name[-1],
fq_name=vmi.fq_name,
annotations=vmi_annotations,
floating_ips=fips,
host_id=vmi.host_id,
security_groups=sgs,
virtual_machine=str(vmi.virtual_machine),
virtual_machine_interfaces=vmis,
virtual_network=str(vmi.virtual_network))
# Append the constructed element info to the response.
vmi_resp.vmis.append(vmi_instance)
# Send the reply out.
vmi_resp.response(req.context())
class VirtualNetworkMM(DBBaseMM):
_dict = {}
obj_type = 'virtual_network'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
def __init__(self, uuid, obj_dict=None):
super(VirtualNetworkMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.instance_ips = set()
self.network_ipams = set()
self.network_ipam_subnets = {}
self.annotations = None
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.build_fq_name_to_uuid(self.uuid, obj)
# Cache ipam-subnet-uuid to ipam-fq-name mapping.
# This is useful when we would like to locate an ipam in a VN,
# from which we would like to request ip allocation.
self.network_ipam_subnets = {}
# Iterate through ipam's on this VN.
for ipam in obj.get('network_ipam_refs', []):
# Get the ipam's attributes.
ipam_attr = ipam.get('attr', None)
# Get the ipam fq-name.
ipam_fq_name = ipam['to']
if ipam_attr:
# Iterate through ipam subnets to cache uuid - fqname mapping.
for subnet in ipam_attr.get('ipam_subnets', []):
subnet_uuid = subnet.get('subnet_uuid', None)
if subnet_uuid:
self.network_ipam_subnets[subnet_uuid] = ipam_fq_name
# Get annotations on this virtual network.
self.annotations = obj.get('annotations', {})
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('instance_ip', obj)
self.update_multiple_refs('network_ipam', obj)
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs('instance_ip', {})
obj.update_multiple_refs('network_ipam', {})
obj.remove_from_parent()
del cls._dict[uuid]
# Given an ipam-fq-name, return its subnet uuid on this VN.
def get_ipam_subnet_uuid(self, ipam_fq_name):
for subnet_uuid, fq_name in self.network_ipam_subnets.iteritems():
if fq_name == ipam_fq_name:
return subnet_uuid
return None
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Virtual Network DB lookup/introspect request. """
vn_resp = introspect.VirtualNetworkDatabaseListResp(vns=[])
# Iterate through all elements of Virtual Network DB.
for vn in VirtualNetworkMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.vn_uuid and req.vn_uuid != vn.uuid:
continue
vn_annotations = cls._build_annotation_dict(vn.annotations)
ipam_subnets = [introspect.NetworkIpamSubnetInstance(
uuid=sub[0], fq_name=sub[1])
for sub
in vn.network_ipam_subnets.iteritems()]
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, vn.virtual_machine_interfaces)
iips = cls._build_cls_uuid_list(
introspect.IIPUuid, vn.instance_ips)
nipams = cls._build_cls_uuid_list(
introspect.NIPAMUuid, vn.network_ipams)
# Construct response for an element.
vn_instance = introspect.VirtualNetworkInstance(
uuid=vn.uuid,
name=vn.fq_name[-1],
fq_name=vn.fq_name,
annotations=vn_annotations,
virtual_machine_interfaces=vmis,
instance_ips=iips,
network_ipams=nipams,
network_ipam_subnets=ipam_subnets)
# Append the constructed element info to the response.
vn_resp.vns.append(vn_instance)
# Send the reply out.
vn_resp.response(req.context())
class InstanceIpMM(DBBaseMM):
_dict = {}
obj_type = 'instance_ip'
_ann_fq_name_to_uuid = {}
ann_fq_name_key = ["kind", "name"]
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(InstanceIpMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.address = None
self.family = None
self.virtual_machine_interfaces = set()
self.virtual_networks = set()
self.floating_ips = set()
self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.family = obj.get('instance_ip_family', 'v4')
self.address = obj.get('instance_ip_address', None)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('virtual_network', obj)
self.floating_ips = set([fip['uuid']
for fip in obj.get('floating_ips', [])])
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs('virtual_network', {})
del cls._dict[uuid]
@classmethod
def get_object(cls, ip, vn_fq_name):
items = cls._dict.items()
for uuid, iip_obj in items:
if ip == iip_obj.address:
vn_uuid = VirtualNetworkMM.get_fq_name_to_uuid(vn_fq_name)
if vn_uuid and vn_uuid in iip_obj.virtual_networks:
return iip_obj
return None
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to InstanceIp DB lookup/introspect request. """
iip_resp = introspect.InstanceIpDatabaseListResp(iips=[])
# Iterate through all elements of InstanceIp DB.
for iip in InstanceIpMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.iip_uuid and req.iip_uuid != iip.uuid:
continue
vmis = cls._build_cls_uuid_list(
introspect.VMIUuid, iip.virtual_machine_interfaces)
vns = cls._build_cls_uuid_list(
introspect.VNUuid, iip.virtual_networks)
fips = cls._build_cls_uuid_list(
introspect.FIPUuid, iip.floating_ips)
# Construct response for an element.
iip_instance = introspect.InstanceIpInstance(
uuid=iip.uuid,
name=iip.fq_name[-1],
fq_name=iip.fq_name,
address=str(iip.address),
family=iip.family,
vm_interfaces=vmis,
virtual_networks=vns,
floating_ips=fips)
# Append the constructed element info to the response.
iip_resp.iips.append(iip_instance)
# Send the reply out.
iip_resp.response(req.context())
# end class InstanceIpMM
class ProjectMM(DBBaseMM):
_dict = {}
obj_type = 'project'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(ProjectMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.ns_labels = {}
self.virtual_networks = set()
self.annotations = None
self.security_groups = set()
obj_dict = self.update(obj_dict)
self.set_children('virtual_network', obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.build_fq_name_to_uuid(self.uuid, obj)
# Update SecurityGroup info.
sg_list = obj.get('security_groups', [])
for sg in sg_list:
self.security_groups.add(sg['uuid'])
self.annotations = obj.get('annotations', {})
return obj
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
def get_security_groups(self):
return set(self.security_groups)
def add_security_group(self, sg_uuid):
self.security_groups.add(sg_uuid)
def remove_security_group(self, sg_uuid):
self.security_groups.discard(sg_uuid)
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Project DB lookup/introspect request. """
project_resp = introspect.ProjectDatabaseListResp(projects=[])
# Iterate through all elements of Project DB.
for project in ProjectMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.project_uuid and req.project_uuid != project.uuid:
continue
project_annotations = cls._build_annotation_dict(
project.annotations)
ns_labels = cls._build_string_dict(project.ns_labels)
sgs = cls._build_cls_uuid_list(
introspect.SGUuid, project.security_groups)
vns = cls._build_cls_uuid_list(
introspect.VNUuid, project.virtual_networks)
# Construct response for an element.
project_instance = introspect.ProjectInstance(
uuid=project.uuid,
name=project.fq_name[-1],
fq_name=project.fq_name,
annotations=project_annotations,
ns_labels=ns_labels,
security_groups=sgs,
virtual_networks=vns)
# Append the constructed element info to the response.
project_resp.projects.append(project_instance)
# Send the reply out.
project_resp.response(req.context())
class DomainMM(DBBaseMM):
_dict = {}
obj_type = 'domain'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(DomainMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.update(obj_dict)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Domain DB lookup/introspect request. """
domain_resp = introspect.DomainDatabaseListResp(domains=[])
# Iterate through all elements of Domain DB.
for domain in DomainMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.domain_uuid and req.domain_uuid != domain.uuid:
continue
domain_annotations = cls._build_annotation_dict(
domain.annotations)
# Construct response for an element.
domain_instance = introspect.DomainInstance(
uuid=domain.uuid,
name=domain.fq_name[-1],
fq_name=domain.fq_name,
annotations=domain_annotations)
# Append the constructed element info to the response.
domain_resp.domains.append(domain_instance)
# Send the reply out.
domain_resp.response(req.context())
class NetworkIpamMM(DBBaseMM):
_dict = {}
obj_type = 'network_ipam'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(NetworkIpamMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
@classmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to NetworkIpam DB lookup/introspect request. """
network_ipam_resp = introspect.NetworkIpamDatabaseListResp(
network_ipams=[])
# Iterate through all elements of NetworkIpam DB.
for network_ipam in NetworkIpamMM.objects():
# If the request is for a specific entry, then locate the entry.
if req.network_ipam_uuid \
and req.network_ipam_uuid != network_ipam.uuid:
continue
network_ipam_annotations = cls._build_annotation_dict(
network_ipam.annotations)
# Construct response for an element.
network_ipam_instance = introspect.NetworkIpamInstance(
uuid=network_ipam.uuid,
name=network_ipam.fq_name[-1],
fq_name=network_ipam.fq_name,
annotations=network_ipam_annotations)
# Append the constructed element info to the response.
network_ipam_resp.network_ipams.append(network_ipam_instance)
# Send the reply out.
network_ipam_resp.response(req.context())
# end class NetworkIpamMM
class NetworkPolicyMM(DBBaseMM):
_dict = {}
obj_type = 'network_policy'
_ann_fq_name_to_uuid = {}
_fq_name_to_uuid = {}
def __init__(self, uuid, obj_dict=None):
super(NetworkPolicyMM, self).__init__(uuid, obj_dict)
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.annotations = obj.get('annotations', None)
self.build_fq_name_to_uuid(self.uuid, obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
del cls._dict[uuid]
# end class NetworkPolicyMM
| rombie/contrail-controller | src/container/mesos-manager/mesos_manager/vnc/config_db.py | Python | apache-2.0 | 32,110 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class MetricBaselineOperations(object):
"""MetricBaselineOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-11-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01-preview"
self.config = config
def get(
self, resource_uri, metric_name, timespan=None, interval=None, aggregation=None, sensitivities=None, result_type=None, custom_headers=None, raw=False, **operation_config):
"""**Gets the baseline values for a specific metric**.
:param resource_uri: The identifier of the resource. It has the
following structure:
subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}.
For example:
subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1
:type resource_uri: str
:param metric_name: The name of the metric to retrieve the baseline
for.
:type metric_name: str
:param timespan: The timespan of the query. It is a string with the
following format 'startDateTime_ISO/endDateTime_ISO'.
:type timespan: str
:param interval: The interval (i.e. timegrain) of the query.
:type interval: timedelta
:param aggregation: The aggregation type of the metric to retrieve the
baseline for.
:type aggregation: str
:param sensitivities: The list of sensitivities (comma separated) to
retrieve.
:type sensitivities: str
:param result_type: Allows retrieving only metadata of the baseline.
On data request all information is retrieved. Possible values include:
'Data', 'Metadata'
:type result_type: str or ~azure.mgmt.monitor.models.ResultType
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BaselineResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.BaselineResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'metricName': self._serialize.url("metric_name", metric_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timespan is not None:
query_parameters['timespan'] = self._serialize.query("timespan", timespan, 'str')
if interval is not None:
query_parameters['interval'] = self._serialize.query("interval", interval, 'duration')
if aggregation is not None:
query_parameters['aggregation'] = self._serialize.query("aggregation", aggregation, 'str')
if sensitivities is not None:
query_parameters['sensitivities'] = self._serialize.query("sensitivities", sensitivities, 'str')
if result_type is not None:
query_parameters['resultType'] = self._serialize.query("result_type", result_type, 'ResultType')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BaselineResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/baseline/{metricName}'}
def calculate_baseline(
self, resource_uri, time_series_information, custom_headers=None, raw=False, **operation_config):
"""**Lists the baseline values for a resource**.
:param resource_uri: The identifier of the resource. It has the
following structure:
subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}.
For example:
subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1
:type resource_uri: str
:param time_series_information: Information that need to be specified
to calculate a baseline on a time series.
:type time_series_information:
~azure.mgmt.monitor.models.TimeSeriesInformation
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CalculateBaselineResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.monitor.models.CalculateBaselineResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>`
"""
# Construct URL
url = self.calculate_baseline.metadata['url']
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(time_series_information, 'TimeSeriesInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CalculateBaselineResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
calculate_baseline.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/calculatebaseline'}
| lmazuel/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/operations/metric_baseline_operations.py | Python | mit | 9,160 |
from distutils.core import setup
setup(name='ePowerSwitch',
version='1.0',
py_modules=['ePowerSwitch'],
)
| bajo/ePowerSwitch | setup.py | Python | gpl-2.0 | 118 |
# Wrapper module for waagent
#
# waagent is not written as a module. This wrapper module is created
# to use the waagent code as a module.
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
import os.path
#
# The following code will search and load waagent code and expose
# it as a submodule of current module
#
def searchWAAgent():
agentPath = os.path.join(os.getcwd(), "main/WaagentLib.py")
if(os.path.isfile(agentPath)):
return agentPath
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
for user_path in user_paths:
agentPath = os.path.join(user_path, 'waagent')
if(os.path.isfile(agentPath)):
return agentPath
return None
def searchWAAgentOld():
agentPath = '/usr/sbin/waagent'
if(os.path.isfile(agentPath)):
return agentPath
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
for user_path in user_paths:
agentPath = os.path.join(user_path, 'waagent')
if(os.path.isfile(agentPath)):
return agentPath
return None
pathUsed = 1
try:
agentPath = searchWAAgent()
if(agentPath):
waagent = imp.load_source('waagent', agentPath)
else:
raise Exception("Can't load new waagent.")
except Exception as e:
pathUsed = 0
agentPath = searchWAAgentOld()
if(agentPath):
waagent = imp.load_source('waagent', agentPath)
else:
raise Exception("Can't load old waagent.")
if not hasattr(waagent, "AddExtensionEvent"):
"""
If AddExtensionEvent is not defined, provide a dummy impl.
"""
def _AddExtensionEvent(*args, **kwargs):
pass
waagent.AddExtensionEvent = _AddExtensionEvent
if not hasattr(waagent, "WALAEventOperation"):
class _WALAEventOperation:
HeartBeat = "HeartBeat"
Provision = "Provision"
Install = "Install"
UnIsntall = "UnInstall"
Disable = "Disable"
Enable = "Enable"
Download = "Download"
Upgrade = "Upgrade"
Update = "Update"
waagent.WALAEventOperation = _WALAEventOperation
__ExtensionName__ = None
def InitExtensionEventLog(name):
__ExtensionName__ = name
def AddExtensionEvent(name=__ExtensionName__,
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message=None):
if name is not None:
waagent.AddExtensionEvent(name=name,
op=op,
isSuccess=isSuccess,
message=message)
def GetPathUsed():
return pathUsed
| andyliuliming/azure-linux-extensions | VMBackup/main/Utils/WAAgentUtil.py | Python | apache-2.0 | 3,173 |
import numpy as np
from pycuda import driver, compiler, gpuarray, tools
# -- initialize the device
import pycuda.autoinit
kernel_code_template = """
__global__ void MatrixMulKernel(float *a, float *b, float *c)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0;
for (int k = 0; k < %(MATRIX_SIZE)s; ++k) {
float Aelement = a[ty * %(MATRIX_SIZE)s + k];
float Belement = b[k * %(MATRIX_SIZE)s + tx];
Pvalue += Aelement * Belement;
}
c[ty * %(MATRIX_SIZE)s + tx] = Pvalue;
}
"""
MATRIX_SIZE = 5
a_cpu = np.random.randn(MATRIX_SIZE, MATRIX_SIZE).astype(np.float32)
b_cpu = np.random.randn(MATRIX_SIZE, MATRIX_SIZE).astype(np.float32)
c_cpu = np.dot(a_cpu, b_cpu)
a_gpu = gpuarray.to_gpu(a_cpu)
b_gpu = gpuarray.to_gpu(b_cpu)
c_gpu = gpuarray.empty((MATRIX_SIZE, MATRIX_SIZE), np.float32)
kernel_code = kernel_code_template % {
'MATRIX_SIZE': MATRIX_SIZE
}
mod = compiler.SourceModule(kernel_code)
matrixmul = mod.get_function("MatrixMulKernel")
matrixmul(
a_gpu, b_gpu,
c_gpu,
block = (MATRIX_SIZE, MATRIX_SIZE, 1),
)
# print the results
print "-" * 80
print "Matrix A (GPU):"
print a_gpu.get()
print "-" * 80
print "Matrix B (GPU):"
print b_gpu.get()
print "-" * 80
print "Matrix C (GPU):"
print c_gpu.get()
print "-" * 80
print "CPU-GPU difference:"
print c_cpu - c_gpu.get()
np.allclose(c_cpu, c_gpu.get())
| IdiosyncraticDragon/Reading-Notes | Python Parallel Programming Cookbook_Code/Chapter 6/PyCUDA/PyCudaMatrixManipulation.py | Python | apache-2.0 | 1,480 |
# python imports
from os import path
import simplejson as json
# library imports
from werkzeug import Response
from werkzeug.exceptions import Forbidden
from mako.lookup import TemplateLookup
from formencode import Invalid
from formencode.variabledecode import variable_decode
# local imports
from ..utils.utils import session, ROOT_DIR, multidict_to_dict, config
from ..utils.mail import email
from ..model import User, CaseStatus, Case, Task, TaskStatus, Evidence, has_permissions, ForemanOptions, UserCaseRoles
from ..model import TaskUpload, EvidencePhotoUpload, Team, Department, CaseHistory, UserTaskRoles, TaskHistory
from ..model import EvidenceHistory, EvidenceStatus, SpecialText, CaseUpload, UserRoles
lookup = TemplateLookup(directories=[path.join(ROOT_DIR, 'templates')], output_encoding='utf-8', input_encoding='utf-8')
def jsonify(func):
""" Wrap a function so as to jsonify the return value and wrap it in
a Werkzeug response object. """
def _wrapper(*args, **kwds):
r = func(*args, **kwds)
if isinstance(r, Response):
return r
else:
return Response(json.dumps(r), mimetype='application/json')
return _wrapper
class BaseController:
def __init__(self, request, urls):
self.request = request
self.urls = urls
self.form_error = {}
self.form_result = {}
self.user_posted = {}
self._create_breadcrumbs()
def _create_breadcrumbs(self):
self.breadcrumbs = [{'title': 'Home', 'path': self.urls.build('general.index')}]
def return_404(self, **variables):
variables.update(**self._get_base_variables())
template = lookup.get_template(path.join('base', '404.html'))
html = template.render(urls=self.urls, **variables)
return Response(html, mimetype='text/html', status=404)
def return_500(self):
template = lookup.get_template(path.join('base', '500.html'))
html = template.render(urls=self.urls, **self._get_base_variables())
return Response(html, mimetype='text/html', status=500)
def return_403(self):
template = lookup.get_template(path.join('base', '403.html'))
html = template.render(urls=self.urls, **self._get_base_variables())
return Response(html, mimetype='text/html', status=403)
def return_response(self, *location, **variables):
""" Return the rendered template with variables """
variables.update(**self._get_base_variables())
template = lookup.get_template(path.join(*location))
html = template.render(urls=self.urls, breadcrumbs=self.breadcrumbs, **variables)
return Response(html, mimetype='text/html', status=variables.get('_status', 200))
def validate_form(self, schema):
""" Validates a form post against schema. If no form was posted, return False.
If form was posted and it is invalid, return False and set self.form_error.
If form validated correctly, return True and set self.form_result """
if self.request.method != 'POST':
return False
try:
# Convert fields with more than one value into lists
form_vars = multidict_to_dict(self.request.form)
self.user_posted = form_vars
form_vars.update(multidict_to_dict(self.request.files))
self.form_result = schema.to_python(variable_decode(form_vars))
return True
except Invalid, e:
self.form_error = e.unpack_errors(encode_variables=True)
return False
def _get_current_user(self):
""" Load the current user from the database. If no user is logged in, return None."""
if 'userid' in self.request.session:
return User.get(self.request.session['userid'])
else:
return None
current_user = property(_get_current_user)
def _get_base_variables(self):
""" Variables needed on every template page. Automatically added """
base_vars = dict()
base_vars['current_user'] = self.current_user
base_vars['check_perms'] = self.check_view_permissions
base_vars['check_perms_user'] = self.check_permissions
base_vars['error_message_website_wide'] = []
base_vars['help_message_website_wide'] = []
base_vars['admin_help_message_website_wide'] = []
base_vars['form_result'] = self.user_posted
base_vars['case_special_text'] = SpecialText.get_text('case').text if SpecialText.get_text(
'case') is not None else ""
base_vars['task_special_text'] = SpecialText.get_text('task').text if SpecialText.get_text(
'task') is not None else ""
base_vars['evidence_special_text'] = SpecialText.get_text('evidence').text if SpecialText.get_text(
'evidence') is not None else ""
if self.current_user:
base_vars['user_qa_cases'] = Case.get_cases(CaseStatus.OPEN, self.current_user, worker=True, QA=True)
base_vars['user_cases'] = Case.get_cases(CaseStatus.OPEN, self.current_user, worker=True)
base_vars['open_cases'] = len(
Case.get_cases(CaseStatus.OPEN, self.current_user, case_perm_checker=self.check_permissions))
base_vars['created_cases'] = len(
Case.get_cases(CaseStatus.CREATED, self.current_user, case_perm_checker=self.check_permissions))
base_vars['created_cases_no_manager'] = len(
Case.get_cases(CaseStatus.CREATED, self.current_user, case_perm_checker=self.check_permissions,
case_man=True))
if self.current_user.is_case_manager:
base_vars['my_cases'] = len(Case.get_current_cases(self.current_user, self.check_permissions,
self.current_user))
else:
base_vars['my_cases'] = 0
if self.current_user.is_admin():
overload = ForemanOptions.run_out_of_names()
if overload[0]:
base_vars['error_message_website_wide'].append(
{'title': "Task name issue",
'text': """Foreman has run out of names from your uploaded task names list.
Please ask your administrator to add more.
More details can be found in the admin control panel."""
}
)
if overload[1]:
base_vars['error_message_website_wide'].append(
{'title': "Case name issue",
'text': """Foreman has run out of names from your uploaded case names list.
Please ask your administrator to add more.
More details can be found in the admin control panel."""
}
)
if User.get_amount() == 1:
base_vars['admin_help_message_website_wide'].append(
{'title': "Add more users",
'text': "You are currently the only user of Foreman.<a href='" +
self.urls.build('user.add') + "'>Add more users here.</a>"
}
)
if self.current_user.id == 1 and User.check_password(self.current_user.username, "changeme"):
base_vars['error_message_website_wide'].append(
{'title': "Change your default password",
'text': """You are currently using the default admin password which is published publicly.
You are advised to change this immediately. """
}
)
num_invalid = User.get_number_unvalidated()
if num_invalid >= 1:
plural = "s" if num_invalid > 1 else ""
base_vars['help_message_website_wide'].append(
{'title': "Validate Users",
'text': "You currently have {} user{} waiting to be validated. "
"<a href='{}'>Validate them here</a>".format(num_invalid, plural,
self.urls.build("general.admin",
dict(active_tab=5)))
}
)
if self.current_user and self.current_user.is_requester():
auths = len(UserRoles.get_authorisers(self.current_user.department))
if auths == 0:
base_vars['help_message_website_wide'].append(
{'title': "You have no authorisers",
'text': """You are currently a requester with no authorisers for your department.
You will not be able to add any new cases unless authorisers are added."""})
base_vars['invRoles'] = TaskStatus.invRoles
base_vars['qaRoles'] = TaskStatus.qaRoles
base_vars['unassigned_tasks'] = len(Task.get_queued_tasks())
base_vars['task_statuses'] = {'created': TaskStatus.CREATED, 'start': TaskStatus.ALLOCATED,
'progress': TaskStatus.PROGRESS, 'deliver': TaskStatus.DELIVERY,
'queued': TaskStatus.QUEUED, 'complete': TaskStatus.COMPLETE, 'qa': TaskStatus.QA,
'closed': TaskStatus.CLOSED}
base_vars['case_statuses'] = {'created': CaseStatus.CREATED, 'archived': CaseStatus.ARCHIVED,
'closed': CaseStatus.CLOSED, 'open': CaseStatus.OPEN,
'rejected': CaseStatus.REJECTED, 'pending': CaseStatus.PENDING}
if self.current_user and self.current_user.is_requester():
base_vars['requester_created_cases'] = Case.get_cases_requested(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.CREATED])
base_vars['requester_opened_cases'] = Case.get_cases_requested(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.OPEN])
base_vars['requester_closed_cases'] = Case.get_cases_requested(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.CLOSED])
base_vars['requester_archived_cases'] = Case.get_cases_requested(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.ARCHIVED])
base_vars['requester_rejected_cases'] = Case.get_cases_requested(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.REJECTED])
base_vars['requester_pending_cases'] = Case.get_cases_requested(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.PENDING])
if self.current_user and self.current_user.is_case_manager():
base_vars['caseman_rejected_cases'] = Case.get_cases_requested_case_manager(self.current_user,
self.check_permissions,
self.current_user,
[CaseStatus.REJECTED])
base_vars['caseman_pending_cases'] = Case.get_cases_requested_case_manager(self.current_user,
self.check_permissions,
self.current_user,
[CaseStatus.PENDING])
if self.current_user and self.current_user.is_authoriser():
base_vars['authoriser_to_authorise'] = Case.get_cases_authorised(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.PENDING])
base_vars['authoriser_rejected'] = Case.get_cases_authorised(self.current_user, self.check_permissions,
self.current_user, [CaseStatus.REJECTED])
base_vars['authoriser_authorised'] = Case.get_cases_authorised(self.current_user, self.check_permissions,
self.current_user,
CaseStatus.approved_statuses)
return base_vars
@staticmethod
def send_email_alert(to_users, title, message):
email_addr = [user.email for user in to_users if user is not None]
subject = "auto notification: {}".format(title)
email(email_addr, subject, """
Hello,
{}
Thanks,
Foreman
{}""".format(message, config.get('admin', 'website_domain')), config.get('email', 'from_address'))
@staticmethod
def _validate_task(case_id, task_id):
try:
int(case_id)
except ValueError:
return None
case = Case.get(case_id)
if case is not None:
try:
int(task_id)
except ValueError:
return None
task = Task.get(task_id)
if task.case.id == case.id:
return task
else:
return None
else:
return None
@staticmethod
def _validate_task_upload(case_id, task_id, upload_id):
task = BaseController._validate_task(case_id, task_id)
if task is not None:
try:
int(upload_id)
except ValueError:
return None
upload = TaskUpload.get_filter_by(task_id=task.id, id=upload_id).first()
if upload is not None and upload.deleted is False:
return upload
else:
return None
else:
return None
@staticmethod
def _validate_case_upload(case_id, upload_id):
case = BaseController._validate_case(case_id)
if case is not None:
try:
int(upload_id)
except ValueError:
return None
upload = CaseUpload.get_filter_by(case_id=case.id, id=upload_id).first()
if upload is not None and upload.deleted is False:
return upload
else:
return None
else:
return None
@staticmethod
def _validate_evidence_photo(evidence_id, upload_id):
evidence = BaseController._validate_evidence(evidence_id)
if evidence is not None:
try:
int(upload_id)
except ValueError:
return None
upload = EvidencePhotoUpload.get_filter_by(evidence_id=evidence.id, id=upload_id).first()
if upload is not None and upload.deleted is False:
return upload
else:
return None
else:
return None
@staticmethod
def _validate_case(case_id):
try:
int(case_id)
except ValueError:
return None
case = Case.get(case_id)
return case
@staticmethod
def _validate_user(user_id):
try:
int(user_id)
except ValueError:
return None
user = User.get_filter_by(id=user_id).first()
return user
@staticmethod
def _validate_evidence(evidence_id, case_id=None):
try:
int(evidence_id)
except ValueError:
return None
if case_id:
case = Case.get(case_id)
if case is not None:
evidence = Evidence.get_filter_by(case_id=case.id, id=evidence_id).first()
return evidence
else:
return None
else:
evidence = Evidence.get_filter_by(id=evidence_id).first()
return evidence
@staticmethod
def _validate_team(team_id):
try:
int(team_id)
except ValueError:
return None
team = Team.get_filter_by(id=team_id).first()
return team
@staticmethod
def _validate_department(dep_id):
try:
int(dep_id)
except ValueError:
return None
team = Department.get_filter_by(id=dep_id).first()
return team
@staticmethod
def check_permissions(user, obj, action):
allowed = has_permissions(user, obj, action)
if not allowed:
raise Forbidden
def check_view_permissions(self, obj, action):
return has_permissions(self.current_user, obj, action)
@staticmethod
def _get_case_history_changes(case):
history = CaseHistory.get_changes(case)
statuses = CaseStatus.get_changes(case)
uploads = CaseUpload.get_changes(case)
results = history + statuses + uploads
results.sort(key=lambda d: d['date_time'])
return results
@staticmethod
def _get_case_manager_history_changes(case):
primary = UserCaseRoles.get_history(case, UserCaseRoles.PRINCIPLE_CASE_MANAGER)
secondary = UserCaseRoles.get_history(case, UserCaseRoles.SECONDARY_CASE_MANAGER)
results = primary + secondary
results.sort(key=lambda d: d['date_time'])
return results
@staticmethod
def _get_all_user_history_changes(case):
case_managers = BaseController._get_case_manager_history_changes(case)
requester = UserCaseRoles.get_history(case, UserCaseRoles.REQUESTER)
authoriser = UserCaseRoles.get_history(case, UserCaseRoles.AUTHORISER)
requester = [] if requester is None else requester
authoriser = [] if authoriser is None else authoriser
results = case_managers + requester + authoriser
results.sort(key=lambda d: d['date_time'])
return results
@staticmethod
def _get_all_task_user_history_changes(task):
primary = UserTaskRoles.get_history(task, UserTaskRoles.PRINCIPLE_INVESTIGATOR)
secondary = UserTaskRoles.get_history(task, UserTaskRoles.SECONDARY_INVESTIGATOR)
primary_qa = UserTaskRoles.get_history(task, UserTaskRoles.PRINCIPLE_QA)
secondary_qa = UserTaskRoles.get_history(task, UserTaskRoles.SECONDARY_QA)
results = primary + secondary + primary_qa + secondary_qa
results.sort(key=lambda d: d['date_time'])
return results
@staticmethod
def _get_tasks_history_changes(case):
history = []
for task in case.tasks:
history += BaseController._get_task_history_changes(task)
history.sort(key=lambda d: d['date_time'])
return history
@staticmethod
def _get_task_history_changes(task):
history = []
history += TaskHistory.get_changes(task)
history += TaskStatus.get_changes(task)
history.sort(key=lambda d: d['date_time'])
return history
@staticmethod
def _get_evidence_history_changes(evidence):
history = []
history += EvidenceHistory.get_changes(evidence)
history += EvidenceStatus.get_changes(evidence)
history.sort(key=lambda d: d['date_time'])
return history
def _create_new_user_role(self, role, obj, form_result, role_obj="case"):
if role_obj == "case":
role_class = UserCaseRoles
user_role = role_class.get_filter_by(role=role, case=obj).first()
else:
role_class = UserTaskRoles
user_role = role_class.get_filter_by(role=role, task=obj).first()
if form_result is None:
if user_role is None:
# no change, empty role stays empty
pass
else:
# person being removed
user_role.add_change(self.current_user, True)
session.flush()
else:
if user_role is None:
# empty role getting a person added
new_role = role_class(form_result, obj, role)
session.add(new_role)
session.flush()
new_role.add_change(self.current_user)
else:
# person being replaced
user_role.add_change(self.current_user, form_result)
session.flush()
| ubunteroz/foreman | foreman/controllers/baseController.py | Python | gpl-3.0 | 21,061 |
"""Unit tests for the driving_license module."""
import datetime
import typing
import unittest
from bob_emploi.frontend.api import geo_pb2
from bob_emploi.frontend.api import boolean_pb2
from bob_emploi.frontend.server.test import scoring_test
class DrivingLicenseHelpScoringModelTestCase(scoring_test.ScoringModelTestBase):
"""Unit tests for the "Get help for getting your driving license" advice."""
model_id = 'advice-driving-license-low-income'
def _create_scoreable_persona(self, rome_id: str = 'A1234', departement: str = '69') \
-> 'scoring_test._Persona':
"""Assumes user does not have CAR driving license,
is old enough and has been searching for some time.
"""
self.now = datetime.datetime(2018, 2, 2)
self.database.local_diagnosis.insert_one({
'_id': f'{departement}:{rome_id}',
'salary': {
'medianSalary': 18000,
},
})
persona = self._random_persona().clone()
persona.user_profile.year_of_birth = 1990
persona.project.ClearField('job_search_has_not_started')
persona.project.target_job.job_group.rome_id = rome_id
persona.project.city.departement_id = departement
persona.project.job_search_started_at.FromDatetime(datetime.datetime(2017, 5, 1))
persona.user_profile.has_car_driving_license = boolean_pb2.FALSE
return persona
def test_already_has_license(self) -> None:
"""User already has driving license."""
persona = self._random_persona().clone()
persona.user_profile.has_car_driving_license = boolean_pb2.TRUE
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_license_status_unknown(self) -> None:
"""We don't know whether user has driving license."""
persona = self._random_persona().clone()
persona.user_profile.ClearField('has_car_driving_license')
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_is_too_young(self) -> None:
"""User is younger than required age for driving license."""
self.now = datetime.datetime(2018, 2, 2)
persona = self._random_persona().clone()
persona.user_profile.year_of_birth = 1995
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_not_searched_enough(self) -> None:
"""User hasn't been searching for long."""
self.now = datetime.datetime(2018, 2, 2)
persona = self._random_persona().clone()
persona.project.job_search_started_at.FromDatetime(datetime.datetime(2017, 11, 1))
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_too_rich(self) -> None:
"""User has probably too much in indemnities."""
self.database.local_diagnosis.insert_one({
'_id': '69:A1234',
'salary': {
'medianSalary': 25000,
},
})
persona = self._random_persona().clone()
persona.project.target_job.job_group.rome_id = 'A1234'
persona.project.city.departement_id = '69'
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_big_city_not_required(self) -> None:
"""User lives in a large enough city with good public transportation,
and job doesn't need a car."""
self.database.job_group_info.insert_one({
'_id': 'A1234',
'requirements': {
'drivingLicenses': [],
},
})
persona = self._random_persona().clone()
persona.project.city.urban_score = 7
persona.project.city.public_transportation_score = 8
persona.project.target_job.job_group.rome_id = 'A1234'
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_required_by_job(self) -> None:
"""Job group expects people to have a car."""
self.database.job_group_info.insert_one({
'_id': 'A1234',
'requirements': {
'drivingLicenses': [{
'drivingLicense': 'CAR',
'percentRequired': 90,
}],
},
})
persona = self._create_scoreable_persona(rome_id='A1234')
persona.project.city.urban_score = 7
score = self._score_persona(persona)
self.assertEqual(score, 3, msg=f'Failed for "{persona.name}"')
def test_small_city(self) -> None:
"""Small town people need cars more often."""
persona = self._create_scoreable_persona()
persona.project.city.urban_score = 5
persona.project.city.public_transportation_score = 7
score = self._score_persona(persona)
self.assertGreaterEqual(score, 1, msg=f'Failed for "{persona.name}"')
def test_bad_transport_city(self) -> None:
"""City with bad public transportations forces people to use cars."""
persona = self._create_scoreable_persona()
persona.project.city.urban_score = 7
persona.project.city.public_transportation_score = 3.2
score = self._score_persona(persona)
self.assertGreaterEqual(score, 1, msg=f'Failed for "{persona.name}"')
def test_expanded_card_data(self) -> None:
"""city coordinates are given as expanded card data."""
self.database.cities.insert_one({
'_id': '69383',
'latitude': 45.5,
'longitude': 4.5,
})
persona = self._create_scoreable_persona()
persona.project.city.city_id = '69383'
project = persona.scoring_project(self.database)
result = typing.cast(geo_pb2.FrenchCity, self.model.get_expanded_card_data(project))
self.assertEqual(result.latitude, 45.5, msg=f'Failed for "{persona.name}"')
self.assertEqual(result.longitude, 4.5, msg=f'Failed for "{persona.name}"')
class DrivingLicenseOneEuroScoringModelTestCase(scoring_test.ScoringModelTestBase):
"""Unit tests for the "Driving License at 1 euro / day" advice."""
model_id = 'advice-driving-license-euro'
def _create_scoreable_persona(self) -> 'scoring_test._Persona':
"""Assumes user does not have CAR driving license,
is old enough and has been searching for some time.
"""
self.now = datetime.datetime(2018, 2, 2)
persona = self._random_persona().clone()
persona.user_profile.year_of_birth = 2000
persona.user_profile.has_car_driving_license = boolean_pb2.FALSE
return persona
def test_already_has_license(self) -> None:
"""User already has driving license."""
persona = self._random_persona().clone()
persona.user_profile.has_car_driving_license = boolean_pb2.TRUE
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_license_status_unknown(self) -> None:
"""We don't know whether user has driving license."""
persona = self._random_persona().clone()
persona.user_profile.ClearField('has_car_driving_license')
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_is_too_young(self) -> None:
"""User is younger than required age for 1 euro driving license program."""
self.now = datetime.datetime(2018, 2, 2)
persona = self._random_persona().clone()
persona.user_profile.year_of_birth = 2004
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_is_too_old(self) -> None:
"""User is older than required age for 1 euro driving license program."""
self.now = datetime.datetime(2018, 2, 2)
persona = self._random_persona().clone()
persona.user_profile.year_of_birth = 1987
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_big_city_not_required(self) -> None:
"""User lives in a large enough city, and job doesn't need a car."""
self.database.job_group_info.insert_one({
'_id': 'A1234',
'requirements': {
'drivingLicenses': [],
},
})
persona = self._random_persona().clone()
persona.project.city.urban_score = 7
persona.project.city.public_transportation_score = 7
persona.project.target_job.job_group.rome_id = 'A1234'
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_required_by_job(self) -> None:
"""Job group expects people to have a car."""
self.database.job_group_info.insert_one({
'_id': 'A1234',
'requirements': {
'drivingLicenses': [{
'drivingLicense': 'CAR',
'percentRequired': 90,
}],
},
})
persona = self._create_scoreable_persona()
persona.project.target_job.job_group.rome_id = 'A1234'
persona.project.city.urban_score = 7
score = self._score_persona(persona)
self.assertEqual(score, 3, msg=f'Failed for "{persona.name}"')
def test_small_city(self) -> None:
"""Small town people need cars more often."""
persona = self._create_scoreable_persona()
persona.project.city.urban_score = 5
persona.project.city.public_transportation_score = 7
score = self._score_persona(persona)
self.assertGreaterEqual(score, 1, msg=f'Failed for "{persona.name}"')
def test_bad_transport_city(self) -> None:
"""City with bad public transportations forces people to use cars."""
persona = self._create_scoreable_persona()
persona.project.city.urban_score = 7
persona.project.city.public_transportation_score = 3.2
score = self._score_persona(persona)
self.assertGreaterEqual(score, 1, msg=f'Failed for "{persona.name}"')
class DrivingLicenseWrittenScoringModelTestCase(scoring_test.ScoringModelTestBase):
"""Unit tests for the "Prepare your driving license written exam" advice."""
model_id = 'advice-driving-license-written'
def _create_scoreable_persona(self) -> 'scoring_test._Persona':
"""Assumes user does not have CAR driving license,
is old enough and has been searching for some time.
"""
self.now = datetime.datetime(2018, 2, 2)
persona = self._random_persona().clone()
persona.user_profile.year_of_birth = 2000
persona.user_profile.has_car_driving_license = boolean_pb2.FALSE
return persona
def test_already_has_license(self) -> None:
"""User already has driving license."""
persona = self._random_persona().clone()
persona.user_profile.has_car_driving_license = boolean_pb2.TRUE
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_license_status_unknown(self) -> None:
"""We don't know whether user has driving license."""
persona = self._random_persona().clone()
persona.user_profile.ClearField('has_car_driving_license')
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_is_too_young(self) -> None:
"""User is younger than required age for 1 euro driving license program."""
self.now = datetime.datetime(2018, 2, 2)
persona = self._random_persona().clone()
persona.user_profile.year_of_birth = 2004
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_big_city_not_required(self) -> None:
"""User lives in a large enough city, and job doesn't need a car."""
self.database.job_group_info.insert_one({
'_id': 'A1234',
'requirements': {
'drivingLicenses': [],
},
})
persona = self._random_persona().clone()
persona.project.city.urban_score = 7
persona.project.city.public_transportation_score = 7
persona.project.target_job.job_group.rome_id = 'A1234'
score = self._score_persona(persona)
self.assertEqual(score, 0, msg=f'Failed for "{persona.name}"')
def test_required_by_job(self) -> None:
"""Job group expects people to have a car."""
self.database.job_group_info.insert_one({
'_id': 'A1234',
'requirements': {
'drivingLicenses': [{
'drivingLicense': 'CAR',
'percentRequired': 90,
}],
},
})
persona = self._create_scoreable_persona()
persona.project.target_job.job_group.rome_id = 'A1234'
persona.project.city.urban_score = 7
score = self._score_persona(persona)
self.assertEqual(score, 2, msg=f'Failed for "{persona.name}"')
def test_small_city(self) -> None:
"""Small town people need cars more often."""
persona = self._create_scoreable_persona()
persona.project.city.urban_score = 5
persona.project.city.public_transportation_score = 7
score = self._score_persona(persona)
self.assertGreaterEqual(score, 1, msg=f'Failed for "{persona.name}"')
def test_bad_transport_city(self) -> None:
"""City with bad public transportations forces people to use cars."""
persona = self._create_scoreable_persona()
persona.project.city.urban_score = 7
persona.project.city.public_transportation_score = 3.2
score = self._score_persona(persona)
self.assertGreaterEqual(score, 1, msg=f'Failed for "{persona.name}"')
if __name__ == '__main__':
unittest.main()
| bayesimpact/bob-emploi | frontend/server/modules/test/driving_license_test.py | Python | gpl-3.0 | 14,248 |
""" test File Plugin
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import mock
import unittest
import tempfile
import os
import shutil
import errno
from DIRAC import S_OK
from DIRAC.Resources.Storage.StorageElement import StorageElementItem
def mock_StorageFactory_getConfigStorageName(storageName, referenceType, seConfigPath=None):
resolvedName = storageName
return S_OK(resolvedName)
def mock_StorageFactory_getConfigStorageOptions(storageName, derivedStorageName=None, seConfigPath=None):
"""Get the options associated to the StorageElement as defined in the CS"""
optionsDict = {
"BackendType": "local",
"ReadAccess": "Active",
"WriteAccess": "Active",
"AccessProtocols": ["file"],
"WriteProtocols": ["file"],
}
return S_OK(optionsDict)
def mock_StorageFactory_getConfigStorageProtocols(storageName, derivedStorageName=None, seConfigPath=None):
"""Protocol specific information is present as sections in the Storage configuration"""
protocolDetails = {
"Section": {
"Host": "",
"Path": "/tmp/se",
"PluginName": "File",
"Port": "",
"Protocol": "file",
"SpaceToken": "",
"WSUrl": "",
}
}
return S_OK(protocolDetails)
class TestBase(unittest.TestCase):
"""Base test class. Defines all the method to test"""
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageName",
side_effect=mock_StorageFactory_getConfigStorageName,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageOptions",
side_effect=mock_StorageFactory_getConfigStorageOptions,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageProtocols",
side_effect=mock_StorageFactory_getConfigStorageProtocols,
)
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def setUp(
self,
mk_getConfigStorageName,
mk_getConfigStorageOptions,
mk_getConfigStorageProtocols,
mk_isLocalSE,
mk_addAccountingOperation,
):
self.se = StorageElementItem("FAKE")
self.se.vo = "test"
self.basePath = tempfile.mkdtemp(dir="/tmp")
# Update the basePath of the plugin
self.se.storages[0].basePath = self.basePath
self.srcPath = tempfile.mkdtemp(dir="/tmp")
self.destPath = tempfile.mkdtemp(dir="/tmp")
self.existingFile = "/test/file.txt"
self.existingFileSize = 0
self.nonExistingFile = "/test/nonExistingFile.txt"
self.subDir = "/test/subDir"
self.subFile = os.path.join(self.subDir, "subFile.txt")
self.subFileSize = 0
self.FILES = [self.existingFile, self.nonExistingFile, self.subFile]
self.DIRECTORIES = [self.subDir]
self.ALL = self.FILES + self.DIRECTORIES
with open(os.path.join(self.srcPath, self.existingFile.replace("/test/", "")), "w") as f:
f.write("I put something in the file so that it has a size\n")
self.existingFileSize = os.path.getsize(os.path.join(self.srcPath, self.existingFile.replace("/test/", "")))
assert self.existingFileSize
os.mkdir(os.path.join(self.srcPath, os.path.basename(self.subDir)))
with open(os.path.join(self.srcPath, self.subFile.replace("/test/", "")), "w") as f:
f.write("This one should have a size as well\n")
self.subFileSize = os.path.getsize(os.path.join(self.srcPath, self.subFile.replace("/test/", "")))
assert self.subFileSize
def tearDown(self):
shutil.rmtree(self.basePath)
shutil.rmtree(self.srcPath)
shutil.rmtree(self.destPath)
pass
def walkAll(self):
for dirname in [self.basePath, self.destPath]:
self.walkPath(dirname)
def walkPath(self, path):
for root, dirs, files in os.walk(path):
print(root)
print(" dirs")
for d in dirs:
print(" ", os.path.join(root, d))
print(" files")
for f in files:
print(" ", os.path.join(root, f))
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_01_getURL(self, mk_isLocalSE, mk_addAccounting):
"""Testing getURL"""
# Testing the getURL
res = self.se.getURL(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Failed"], res["Value"]["Failed"])
self.assertTrue(len(res["Value"]["Successful"]) == len(self.ALL))
for lfn, url in res["Value"]["Successful"].items():
self.assertEqual(url, self.basePath.rstrip("/") + lfn)
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_02_FileTest(self, mk_isLocalSE, mk_addAccounting):
"""Testing createDirectory"""
# Putting the files
def localPutFile(fn, size=0):
"""If fn is '/test/fn.txt', it calls
{ '/test/fn.txt' : /tmp/generatedPath/fn.txt}
"""
transfDic = {fn: os.path.join(self.srcPath, fn.replace("/test/", ""))}
return self.se.putFile(transfDic, sourceSize=size)
# wrong size
res = localPutFile(self.existingFile, size=-1)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Failed"], res)
self.assertTrue("not match" in res["Value"]["Failed"][self.existingFile], res)
self.assertTrue(not os.path.exists(self.basePath + self.existingFile))
# Correct size
res = localPutFile(self.existingFile, size=self.existingFileSize)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Successful"], res)
self.assertTrue(os.path.exists(self.basePath + self.existingFile))
# No size
res = localPutFile(self.existingFile)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Successful"], res)
self.assertTrue(os.path.exists(self.basePath + self.existingFile))
# No existing source file
res = localPutFile(self.nonExistingFile)
self.assertTrue(res["OK"], res)
self.assertTrue(self.nonExistingFile in res["Value"]["Failed"], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
# sub file
res = localPutFile(self.subFile)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subFile in res["Value"]["Successful"], res)
self.assertTrue(os.path.exists(self.basePath + self.subFile))
# Directory
res = localPutFile(self.subDir)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subDir in res["Value"]["Failed"])
self.assertTrue(
os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir] or
# Python 3.9.7+ improved the Exception that is raised
"Directory does not exist" in res["Value"]["Failed"][self.subDir],
res,
)
res = self.se.exists(self.FILES)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Failed"], res)
self.assertTrue(res["Value"]["Successful"][self.existingFile], res)
self.assertTrue(not res["Value"]["Successful"][self.nonExistingFile], res)
res = self.se.getFileSize(self.ALL)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.existingFile], self.existingFileSize)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir], res)
res = self.se.getFileMetadata(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(self.existingFile in res["Value"]["Successful"])
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir], res)
res = self.se.isFile(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(res["Value"]["Successful"][self.existingFile], res)
self.assertTrue(not res["Value"]["Successful"][self.subDir], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
res = self.se.getFile(self.ALL, localPath=self.destPath)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.existingFile], self.existingFileSize)
self.assertTrue(os.path.exists(os.path.join(self.destPath, os.path.basename(self.existingFile))))
self.assertEqual(res["Value"]["Successful"][self.subFile], self.subFileSize)
self.assertTrue(os.path.exists(os.path.join(self.destPath, os.path.basename(self.subFile))))
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(
os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir] or
# Python 3.9.7+ improved the Exception that is raised
"Directory does not exist" in res["Value"]["Failed"][self.subDir],
res,
)
res = self.se.removeFile(self.ALL)
self.assertTrue(res["OK"], res)
self.assertTrue(res["Value"]["Successful"][self.existingFile])
self.assertTrue(not os.path.exists(self.basePath + self.existingFile))
self.assertTrue(res["Value"]["Successful"][self.subFile])
self.assertTrue(not os.path.exists(self.basePath + self.subFile))
self.assertTrue(res["Value"]["Successful"][self.nonExistingFile])
self.assertTrue(os.strerror(errno.EISDIR) in res["Value"]["Failed"][self.subDir])
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_03_createDirectory(self, mk_isLocalSE, mk_addAccounting):
"""Testing creating directories"""
res = self.se.createDirectory(self.subDir)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subDir in res["Value"]["Successful"])
self.assertTrue(os.path.exists(self.basePath + self.subDir))
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE",
return_value=S_OK(True),
) # Pretend it's local
@mock.patch(
"DIRAC.Resources.Storage.StorageElement.StorageElementItem.addAccountingOperation", return_value=None
) # Don't send accounting
def test_04_putDirectory(self, mk_isLocalSE, mk_addAccounting):
"""Testing putDirectory"""
nonExistingDir = "/test/forsuredoesnotexist"
localdirs = ["/test", nonExistingDir]
# Correct size
res = self.se.putDirectory({"/test": self.srcPath})
self.assertTrue(res["OK"], res)
self.assertTrue("/test" in res["Value"]["Successful"], res)
self.assertEqual(
res["Value"]["Successful"]["/test"], {"Files": 2, "Size": self.existingFileSize + self.subFileSize}
)
self.assertTrue(os.path.exists(self.basePath + "/test"))
self.assertTrue(os.path.exists(self.basePath + self.existingFile))
self.assertTrue(os.path.exists(self.basePath + self.subFile))
# No existing source directory
res = self.se.putDirectory({"/test": nonExistingDir})
self.assertTrue(res["OK"], res)
self.assertTrue("/test" in res["Value"]["Failed"], res)
self.assertEqual(res["Value"]["Failed"]["/test"], {"Files": 0, "Size": 0})
# sub file
res = self.se.putDirectory({"/test": self.existingFile})
self.assertTrue(res["OK"], res)
self.assertTrue("/test" in res["Value"]["Failed"], res)
self.assertEqual(res["Value"]["Failed"]["/test"], {"Files": 0, "Size": 0})
res = self.se.exists(self.DIRECTORIES + localdirs)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Failed"], res)
self.assertTrue(res["Value"]["Successful"][self.subDir], res)
self.assertTrue(not res["Value"]["Successful"][nonExistingDir], res)
res = self.se.getDirectorySize(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.subDir], {"Files": 1, "Size": self.subFileSize, "SubDirs": 0})
self.assertEqual(res["Value"]["Successful"]["/test"], {"Files": 1, "Size": self.existingFileSize, "SubDirs": 1})
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
res = self.se.getDirectoryMetadata(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertTrue(self.subDir in res["Value"]["Successful"])
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
res = self.se.isDirectory(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertTrue(not res["Value"]["Successful"][self.existingFile])
self.assertTrue(res["Value"]["Successful"][self.subDir], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
res = self.se.listDirectory(self.ALL + localdirs)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.subDir], {"Files": [self.subFile], "SubDirs": []})
self.assertEqual(res["Value"]["Successful"]["/test"], {"Files": [self.existingFile], "SubDirs": [self.subDir]})
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][self.nonExistingFile], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
self.assertTrue(os.strerror(errno.ENOENT) in res["Value"]["Failed"][nonExistingDir], res)
res = self.se.getDirectory(self.ALL + localdirs, localPath=self.destPath)
self.assertTrue(res["OK"], res)
self.assertEqual(
res["Value"]["Successful"]["/test"], {"Files": 2, "Size": self.existingFileSize + self.subFileSize}
)
self.assertTrue(os.path.exists(self.destPath + self.existingFile))
self.assertTrue(os.path.exists(self.destPath + self.subFile))
self.assertEqual(res["Value"]["Successful"][self.subDir], {"Files": 1, "Size": self.subFileSize})
self.assertTrue(os.path.exists(self.destPath + self.subFile.replace("/test", "")))
self.assertEqual(res["Value"]["Failed"][self.nonExistingFile], {"Files": 0, "Size": 0})
self.assertEqual(res["Value"]["Failed"][self.existingFile], {"Files": 0, "Size": 0})
self.assertEqual(res["Value"]["Failed"][nonExistingDir], {"Files": 0, "Size": 0})
res = self.se.removeDirectory(nonExistingDir, recursive=False)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][nonExistingDir], True)
res = self.se.removeDirectory(nonExistingDir, recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Failed"][nonExistingDir], {"FilesRemoved": 0, "SizeRemoved": 0})
res = self.se.removeDirectory(self.nonExistingFile, recursive=False)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"][self.nonExistingFile], True)
res = self.se.removeDirectory(self.nonExistingFile, recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Failed"][self.nonExistingFile], {"FilesRemoved": 0, "SizeRemoved": 0})
res = self.se.removeDirectory(self.existingFile, recursive=False)
self.assertTrue(res["OK"], res)
self.assertTrue(os.strerror(errno.ENOTDIR) in res["Value"]["Failed"][self.existingFile], res)
res = self.se.removeDirectory(self.existingFile, recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Failed"][self.existingFile], {"FilesRemoved": 0, "SizeRemoved": 0})
res = self.se.removeDirectory("/test", recursive=False)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"]["/test"], True)
self.assertTrue(not os.path.exists(self.basePath + self.existingFile))
self.assertTrue(os.path.exists(self.basePath + self.subFile))
res = self.se.removeDirectory("/test", recursive=True)
self.assertTrue(res["OK"], res)
self.assertEqual(res["Value"]["Successful"]["/test"], {"FilesRemoved": 1, "SizeRemoved": self.subFileSize})
self.assertTrue(not os.path.exists(self.basePath + "/test"))
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestBase)
unittest.TextTestRunner(verbosity=2).run(suite)
| ic-hep/DIRAC | src/DIRAC/Resources/Storage/test/Test_FilePlugin.py | Python | gpl-3.0 | 18,616 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-05 16:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ticketing', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FeatureRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Enter a short, descriptive name of the feature request.', max_length=300, unique=True)),
('desc', models.TextField()),
('targetdate', models.DateField()),
('url', models.URLField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter the name of the Product', max_length=150, unique=True)),
],
),
migrations.AddField(
model_name='featurerequest',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='ticketing.Product'),
),
]
| himadriganguly/featurerequest | ticketing/migrations/0002_auto_20160305_1647.py | Python | gpl-3.0 | 1,432 |
# coding: utf-8
from websocket import create_connection
def ws_handler():
ws = create_connection("ws://localhost:8000/echo")
try:
# ws.send("Hello, world")
while 1:
result = ws.recv()
print(result)
except:
pass
finally:
ws.close()
# with create_connection("ws://localhost:8000/echo") as ws:
# ws.send("Hello world")
# result = ws.recv()
# print(result)
if __name__ == "__main__":
ws_handler()
| fpagyu/glory | pi/client.py | Python | mit | 500 |
#!/usr/bin/python
""" PN CLI vrouter-create/vrouter-delete/vrouter-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: pn_vrouter
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to create/delete/modify a vrouter.
description:
- Execute vrouter-create, vrouter-delete, vrouter-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a virtual router (vRouter) service that forwards
traffic between networks and implements Layer 3 protocols.
- C(vrouter-create) creates a new vRouter service.
- C(vrouter-delete) deletes a vRouter service.
- C(vrouter-modify) modifies a vRouter service.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to create vrouter,
'absent' to delete vrouter and 'update' to modify vrouter.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name of the vRouter.
required: true
pn_vnet:
description:
- Specify the name of the VNET.
- Required for vrouter-create.
pn_service_type:
description:
- Specify if the vRouter is a dedicated or shared VNET service.
choices: ['dedicated', 'shared']
pn_service_state:
description:
- Specify to enable or disable vRouter service.
choices: ['enable', 'disable']
pn_router_type:
description:
- Specify if the vRouter uses software or hardware.
- Note that if you specify hardware as router type, you cannot assign IP
addresses using DHCP. You must specify a static IP address.
choices: ['hardware', 'software']
pn_hw_vrrp_id:
description:
- Specifies the VRRP ID for a hardware vrouter.
pn_router_id:
description:
- Specify the vRouter IP address.
pn_bgp_as:
description:
- Specify the Autonomous System Number(ASN) if the vRouter runs Border
Gateway Protocol(BGP).
pn_bgp_redistribute:
description:
- Specify how BGP routes are redistributed.
choices: ['static', 'connected', 'rip', 'ospf']
pn_bgp_max_paths:
description:
- Specify the maximum number of paths for BGP. This is a number between
1 and 255 or 0 to unset.
pn_bgp_options:
description:
- Specify other BGP options as a whitespaces separated string within
single quotes ''.
pn_rip_redistribute:
description:
- Specify how RIP routes are redistributed.
choices: ['static', 'connected', 'ospf', 'bgp']
pn_ospf_redistribute:
description:
- Specify how OSPF routes are redistributed.
choices: ['static', 'connected', 'bgp', 'rip']
pn_ospf_options:
description:
- Specify other OSPF options as a whitespaces separated string within
single quotes ''.
"""
EXAMPLES = """
- name: create vrouter
pn_vrouter:
state: 'present'
pn_name: 'ansible-vrouter'
pn_vnet: 'ansible-fab-global'
pn_router_id: 208.74.182.1
- name: delete vrouter
pn_vrouter:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vrouter command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouter command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
VROUTER_EXISTS = None
VROUTER_NAME_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the vlan-show command.
A switch can have only one vRouter configuration.
If a vRouter already exists on the given switch, return VROUTER_EXISTS as
True else False.
If a vRouter with the given name exists(on a different switch), return
VROUTER_NAME_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, VROUTER_NAME_EXISTS
"""
name = module.params['pn_name']
# Global flags
global VROUTER_EXISTS, VROUTER_NAME_EXISTS
# Get the name of the local switch
location = cli + ' switch-setup-show format switch-name'
location = shlex.split(location)
out = module.run_command(location)[1]
location = out.split()[1]
# Check for any vRouters on the switch
check_vrouter = cli + ' vrouter-show location %s ' % location
check_vrouter += 'format name no-show-headers'
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
if out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for any vRouters with the given name
show = cli + ' vrouter-show format name no-show-headers '
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if name in out:
VROUTER_NAME_EXISTS = True
else:
VROUTER_NAME_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-create'
if state == 'absent':
command = 'vrouter-delete'
if state == 'update':
command = 'vrouter-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state =dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_vnet=dict(type='str'),
pn_service_type=dict(type='str', choices=['dedicated', 'shared']),
pn_service_state=dict(type='str', choices=['enable', 'disable']),
pn_router_type=dict(type='str', choices=['hardware', 'software']),
pn_hw_vrrp_id=dict(type='int'),
pn_router_id=dict(type='str'),
pn_bgp_as=dict(type='int'),
pn_bgp_redistribute=dict(type='str', choices=['static', 'connected',
'rip', 'ospf']),
pn_bgp_max_paths=dict(type='int'),
pn_bgp_options=dict(type='str'),
pn_rip_redistribute=dict(type='str', choices=['static', 'connected',
'bgp', 'ospf']),
pn_ospf_redistribute=dict(type='str', choices=['static', 'connected',
'bgp', 'rip']),
pn_ospf_options=dict(type='str'),
pn_vrrp_track_port=dict(type='str')
),
required_if=(
["state", "present", ["pn_name", "pn_vnet"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
vnet = module.params['pn_vnet']
service_type = module.params['pn_service_type']
service_state = module.params['pn_service_state']
router_type = module.params['pn_router_type']
hw_vrrp_id = module.params['pn_hw_vrrp_id']
router_id = module.params['pn_router_id']
bgp_as = module.params['pn_bgp_as']
bgp_redistribute = module.params['pn_bgp_redistribute']
bgp_max_paths = module.params['pn_bgp_max_paths']
bgp_options = module.params['pn_bgp_options']
rip_redistribute = module.params['pn_rip_redistribute']
ospf_redistribute = module.params['pn_ospf_redistribute']
ospf_options = module.params['pn_ospf_options']
vrrp_track_port = module.params['pn_vrrp_track_port']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'vrouter-delete':
check_cli(module, cli)
if VROUTER_NAME_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'vrouter-create':
check_cli(module, cli)
if VROUTER_EXISTS is True:
module.exit_json(
skipped=True,
msg='Maximum number of vRouters has been reached on this '
'switch'
)
if VROUTER_NAME_EXISTS is True:
module.exit_json(
skipped=True,
msg='vRouter with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
if vnet:
cli += ' vnet ' + vnet
if service_type:
cli += ' %s-vnet-service ' % service_type
if service_state:
cli += ' ' + service_state
if router_type:
cli += ' router-type ' + router_type
if hw_vrrp_id:
cli += ' hw-vrrp-id ' + str(hw_vrrp_id)
if router_id:
cli += ' router-id ' + router_id
if bgp_as:
cli += ' bgp-as ' + str(bgp_as)
if bgp_redistribute:
cli += ' bgp-redistribute ' + bgp_redistribute
if bgp_max_paths:
cli += ' bgp-max-paths ' + str(bgp_max_paths)
if bgp_options:
cli += ' %s ' % bgp_options
if rip_redistribute:
cli += ' rip-redistribute ' + rip_redistribute
if ospf_redistribute:
cli += ' ospf-redistribute ' + ospf_redistribute
if ospf_options:
cli += ' %s ' % ospf_options
if vrrp_track_port:
cli += ' vrrp-track-port ' + vrrp_track_port
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| t0mk/ansible | lib/ansible/modules/network/netvisor/pn_vrouter.py | Python | gpl-3.0 | 13,328 |
#!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup
setup(
name='xcache',
version='0.2',
description='clean caches when needed',
author='Sven R. Kunze',
author_email='[email protected]',
url='https://github.com/srkunze/xcache',
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
],
py_modules=['xcache'],
install_requires=[],
)
| srkunze/xcache | setup.py | Python | mit | 573 |
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Plotextractor configuration."""
__revision__ = "$Id$"
## CFG_PLOTEXTRACTOR_DESY_BASE --
CFG_PLOTEXTRACTOR_DESY_BASE = 'http://www-library.desy.de/preparch/desy/'
## CFG_PLOTEXTRACTOR_DESY_PIECE --
CFG_PLOTEXTRACTOR_DESY_PIECE = '/desy'
| Panos512/invenio | modules/miscutil/lib/plotextractor_config.py | Python | gpl-2.0 | 1,037 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Generated by Django 1.10.5 on 2017-04-05 08:26
from __future__ import unicode_literals
from django.db import migrations
def create_gleason_4_elements(apps, schema_editor):
FocusRegionAnnotation = apps.get_model('clinical_annotations_manager', 'FocusRegionAnnotation')
Gleason4Element = apps.get_model('clinical_annotations_manager', 'Gleason4Element')
for annotation in FocusRegionAnnotation.objects.all():
if annotation.gleason_4_path_json:
g4_element = Gleason4Element(
json_path=annotation.gleason_4_path_json,
area=annotation.gleason_4_area,
cellular_density_helper_json=annotation.gleason_4_cellular_density_helper_json,
cellular_density=annotation.gleason_4_cellular_density,
cells_count=annotation.gleason_4_cells_count,
focus_region_annotation=annotation
)
g4_element.save()
def reverse_create_gleason_4_elements(apps, schema_editor):
Gleason4Element = apps.get_model('clinical_annotations_manager', 'Gleason4Element')
Gleason4Element.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('clinical_annotations_manager', '0007_gleason4element'),
]
operations = [
migrations.RunPython(create_gleason_4_elements, reverse_create_gleason_4_elements),
]
| lucalianas/ProMort | promort/clinical_annotations_manager/migrations/0008_auto_20170405_0826.py | Python | mit | 2,509 |
#
#+ Copyright (c) 2014, 2015 Rikard Lindstrom <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from random import *
def show():
form.start()
form.slider("hue_min", "Hue min", 0.0, 1.0, 0.01, form.get("hue_min"))
form.slider("hue_max", "Hue max", 0.0, 1.0, 0.01, form.get("hue_max"))
form.slider("saturation", "Saturation", 0.0, 1.0, 0.01, form.get("saturation"))
form.slider("value", "Value", 0.0, 1.0, 0.01, form.get("value"))
form.slider("decay", "Decay", 0.0, 1.0, 0.01, form.get("decay"))
form.slider("delay", "Delay", 0, 10, 1, form.get("delay"))
form.finnish()
def init():
config["hue_min"] = 0.0
config["hue_max"] = 0.0
config["saturation"] = 1.0
config["value"] = 1.0
config["decay"] = 0.5
config["delay"] = 1
value = [0.0]
hue = [0.0]
def run():
global value
value = [0.0]*strip.length
global hue
hue = [0.0]*strip.length
d = 0
while True:
value = [config["decay"]*x for x in value]
if (d == 0):
make_led()
d = config["delay"]
else: d -= 1
for n in range(0, strip.length):
if stop: return
strip.hsv( hue[n], config["saturation"], value[n] )
strip.show()
def range_hue():
if ( config["hue_min"] < config["hue_max"] ):
hmin = config["hue_min"]
hmax = config["hue_max"]
else:
hmin = config["hue_max"]
hmax = config["hue_min"]
hdiff = hmax - hmin
hue = hmin + hdiff * random()
return hue
def make_led():
led = int(strip.length * random())
value[led] = config["value"]
hue[led] = range_hue() | ornotermes/WebLights | effects/Fading glitter.py | Python | gpl-3.0 | 2,085 |
from ckan.common import c
from ckanext.ytp.request.helper import get_user_member
import logging
log = logging.getLogger(__name__)
def _member_common_access_check(context, data_dict, status):
if not c.userobj:
return {'success': False}
organization_id = data_dict.get("organization_id")
if not organization_id:
return {'success': False}
member = get_user_member(organization_id, status)
if not member:
return {'success': False}
if member.table_name == 'user' and member.table_id == c.userobj.id and member.state == status:
return {'success': True}
return {'success': False}
def member_request_membership_cancel(context, data_dict):
return _member_common_access_check(context, data_dict, 'active')
def member_request_cancel(context, data_dict):
return _member_common_access_check(context, data_dict, 'pending')
| yhteentoimivuuspalvelut/ckanext-ytp-request | ckanext/ytp/request/logic/auth/delete.py | Python | agpl-3.0 | 887 |
#!/bin/python3
import sys
S = input().strip()
try:
S = int(S)
print(int(S))
except ValueError:
print("Bad String") | kyle8998/Practice-Coding-Questions | hackerrank/30-days-of-code/day-16.py | Python | unlicense | 140 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetProfile
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-talent
# [START jobs_v4beta1_generated_ProfileService_GetProfile_sync]
from google.cloud import talent_v4beta1
def sample_get_profile():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.GetProfileRequest(
name="name_value",
)
# Make the request
response = client.get_profile(request=request)
# Handle the response
print(response)
# [END jobs_v4beta1_generated_ProfileService_GetProfile_sync]
| googleapis/python-talent | samples/generated_samples/jobs_v4beta1_generated_profile_service_get_profile_sync.py | Python | apache-2.0 | 1,440 |
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
from core.db.manager import DataHubManager
import random
import string
from collections import namedtuple
def random_slug(length):
return ''.join(
random.SystemRandom().choice(
string.ascii_letters) for _ in range(length))
class APIEndpointTests(APITestCase):
"""docstring for APIEndpointTests"""
def setUp(self):
self.username = "delete_me_api_test_user"
self.email = self.username + "@mit.edu"
self.password = self.username
self.user = User.objects.create_user(
self.username, self.email, self.password)
self.client.force_authenticate(user=self.user)
def tearDown(self):
DataHubManager.remove_user(self.username)
class CurrentUserTests(APIEndpointTests):
def test_get_user(self):
url = reverse('api:user')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'username': self.username,
'last_login': None,
'email': self.email,
})
class CurrentUserReposTests(APIEndpointTests):
def test_get_user_repos(self):
url = reverse('api:user_repos')
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'repos': []})
class ReposTests(APIEndpointTests):
def test_get_repos(self):
url = reverse('api:repos_all')
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {'repos': []})
class RepoTests(APIEndpointTests):
def test_get_repo(self):
# Make sure it's a 404 when there are no repos.
repo_name = 'repo_one'
url = reverse('api:repo',
kwargs={'repo_base': self.username,
'repo_name': repo_name})
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data,
{'error_type': 'LookupError',
'detail': 'Invalid repository name: repo_one'})
# Create a repo and make sure it's a 200.
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
{'files': [],
'tables': [],
'collaborators': [],
'views': [],
'cards': [],
'owner': {'username': u'delete_me_api_test_user'}})
def test_patch_repo(self):
repo_name = 'repo_one'
url = reverse('api:repo',
kwargs={'repo_base': self.username,
'repo_name': repo_name})
# Try renaming a repo that doesn't exist
response = self.client.patch(
url, {'new_name': repo_name}, follow=True, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
error_detail = ('Repo not found. '
'You must specify a repo in your query. '
'i.e. select * from REPO_NAME.TABLE_NAME. ')
self.assertEqual(response.data,
{'error_type': 'LookupError',
'detail': error_detail})
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
# Try renaming a repo to its current name
response = self.client.patch(
url, {'new_name': 'repo_one'}, follow=True, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data,
{'error_type': 'ValueError',
'detail': 'A repo with that name already exists.'})
# Try renaming for real
response = self.client.patch(
url, {'new_name': 'repo_five_thousand'},
follow=True, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
{'files': [],
'tables': [],
'collaborators': [],
'views': [],
'cards': [],
'owner': {'username': u'delete_me_api_test_user'}})
def test_delete_repo(self):
# Make sure it's a 404 when there are no repos.
repo_name = 'repo_one'
url = reverse('api:repo',
kwargs={'repo_base': self.username,
'repo_name': repo_name})
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
error_detail = ('Repo not found. '
'You must specify a repo in your query. '
'i.e. select * from REPO_NAME.TABLE_NAME. ')
self.assertEqual(response.data,
{'error_type': 'LookupError',
'detail': error_detail})
# Create a repo and make sure it's a 200.
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(response.data, None)
class ReposForUserTests(APIEndpointTests):
def _expected_response(self, owner, repo_names):
result = []
base = 'http://testserver/api/v1/repos/'
for repo in repo_names:
result.append({'owner': owner,
'href': '{0}{1}/{2}'.format(base, owner, repo),
'repo_name': repo})
return {'repos': result}
def test_get_repos(self):
# Expect an empty array when the user has no repos
url = reverse('api:repos_specific',
kwargs={'repo_base': self.username})
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, self._expected_response(
self.username, []))
# Add some repos and expect they show up
repos = ['foo', 'bar', 'baz']
with DataHubManager(self.username) as manager:
[manager.create_repo(repo) for repo in repos]
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Sort because the results will be sorted
self.assertEqual(response.data, self._expected_response(
self.username, sorted(repos)))
def test_create_repo(self):
# Create two repos
url = reverse('api:repos_specific',
kwargs={'repo_base': self.username})
response = self.client.post(
url, {'repo_name': 'repo_one'}, follow=True, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, self._expected_response(
self.username, ['repo_one']))
response = self.client.post(
url, {'repo_name': 'repo_two'}, follow=True, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, self._expected_response(
self.username, ['repo_one', 'repo_two']))
Query = namedtuple(
'Query', ['sql', 'status_code', 'expect_json', 'expect_csv'])
class QueryTests(APIEndpointTests):
def _queries(self, table):
return [
Query(sql="""CREATE TABLE """ + table + """ (
name varchar (255) NOT NULL,
deliciousness numeric,
is_deep_fried boolean);
""",
status_code=status.HTTP_200_OK,
expect_json=[{'status': 'success'}],
expect_csv=''),
Query(sql="SELECT * FROM " + table + ";",
status_code=status.HTTP_200_OK,
expect_json=[],
expect_csv=''),
Query(sql="INSERT INTO " + table +
" VALUES ('reuben', 25, FALSE);",
status_code=status.HTTP_200_OK,
expect_json=[{'status': 'success'}],
expect_csv=''),
Query(sql="SELECT * FROM " + table + ";",
status_code=status.HTTP_200_OK,
expect_json=[
{'is_deep_fried': False,
'deliciousness': 25,
'name': 'reuben'}],
expect_csv="deliciousness,is_deep_fried,name\r\n"
"25,False,reuben"),
Query(sql="DROP TABLE " + table,
status_code=status.HTTP_200_OK,
expect_json=[{'status': 'success'}],
expect_csv=''),
]
def test_post_query_with_repo(self):
repo_name = 'repo_one'
table_name = 'sandwiches'
repo_table = repo_name + '.' + table_name
queries = self._queries(repo_table)
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
url = reverse('api:query_with_repo',
kwargs={'repo_base': self.username,
'repo_name': repo_name})
for q in queries:
response = self.client.post(
url, {'query': q.sql}, follow=True, format='json')
self.assertEqual(response.status_code, q.status_code)
self.assertEqual(response.data.get('rows'), q.expect_json)
def test_post_query_csv_accept_header(self):
repo_name = 'repo_one'
table_name = 'sandwiches'
repo_table = repo_name + '.' + table_name
queries = self._queries(repo_table)
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
url = reverse('api:query',
kwargs={'repo_base': self.username})
for q in queries:
# import pdb; pdb.set_trace()
response = self.client.post(
url, {'query': q.sql}, follow=True, format='json',
**{'HTTP_ACCEPT': 'text/csv'})
self.assertEqual(response.status_code, q.status_code)
self.assertEqual(response.content.strip(), q.expect_csv)
def test_post_query_json_accept_header(self):
repo_name = 'repo_one'
table_name = 'sandwiches'
repo_table = repo_name + '.' + table_name
queries = self._queries(repo_table)
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
url = reverse('api:query',
kwargs={'repo_base': self.username})
for q in queries:
# import pdb; pdb.set_trace()
response = self.client.post(
url, {'query': q.sql}, follow=True, format='json',
**{'HTTP_ACCEPT': 'application/json'})
self.assertEqual(response.status_code, q.status_code)
self.assertEqual(response.data.get('rows'), q.expect_json)
def test_post_query_csv_suffix(self):
repo_name = 'repo_one'
table_name = 'sandwiches'
repo_table = repo_name + '.' + table_name
queries = self._queries(repo_table)
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
url = reverse('api:query',
kwargs={'repo_base': self.username}) + '.csv'
for q in queries:
# import pdb; pdb.set_trace()
response = self.client.post(
url, {'query': q.sql}, follow=True, format='json')
self.assertEqual(response.status_code, q.status_code)
self.assertEqual(response.content.strip(), q.expect_csv)
def test_post_query_json_suffix(self):
repo_name = 'repo_one'
table_name = 'sandwiches'
repo_table = repo_name + '.' + table_name
queries = self._queries(repo_table)
with DataHubManager(self.username) as manager:
manager.create_repo(repo_name)
url = reverse('api:query',
kwargs={'repo_base': self.username}) + '.json'
for q in queries:
# import pdb; pdb.set_trace()
response = self.client.post(
url, {'query': q.sql}, follow=True, format='json')
self.assertEqual(response.status_code, q.status_code)
self.assertEqual(response.data.get('rows'), q.expect_json)
# Test that pagination works
# Test that responses give metadata
| datahuborg/datahub | src/integration_tests/test_api_endpoints.py | Python | mit | 13,248 |
from django import forms
from .models import Page
class PageForm(forms.ModelForm):
class Meta:
model = Page
fields = ["content", "language", "path"]
widgets = {
"language": forms.HiddenInput(),
"path": forms.HiddenInput(),
}
| osmfj/django-restcms | restcms/forms.py | Python | bsd-2-clause | 289 |
from pos.models.user import User, UserSession
from rest_framework import serializers
class CrewSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('first_name', 'last_name', 'credit', 'card', 'is_cashier')
class CrewSessionSerializer(serializers.ModelSerializer):
class Meta:
model = UserSession
fields = ('user', 'django_user')
| nuxis/p0sX-server | p0sx/pos/serializers/user.py | Python | mit | 400 |
import json
import logging
import os
import sys
import socket
import six
from lymph.exceptions import RegistrationFailure, SocketNotCreated
from lymph.core.components import Componentized
from lymph.core.events import Event
from lymph.core.monitoring import metrics
from lymph.core.monitoring.pusher import MonitorPusher
from lymph.core.monitoring.aggregator import Aggregator
from lymph.core.services import ServiceInstance, Service
from lymph.core.rpc import ZmqRPCServer
from lymph.core.interfaces import DefaultInterface
from lymph.core.plugins import Hook
from lymph.core import trace
logger = logging.getLogger(__name__)
def create_container(config):
if 'registry' in config:
logger.warning('global `registry` configuration is deprecated. please use `container.registry` instead.')
config.set('container.registry', config.get_raw('registry'))
event_system = config.create_instance('event_system')
container = config.create_instance(
'container',
default_class='lymph.core.container:ServiceContainer',
events=event_system,
)
return container
class ServiceContainer(Componentized):
def __init__(self, rpc=None, registry=None, events=None, log_endpoint=None, service_name=None, debug=False, monitor_endpoint=None, pool=None):
if pool is None:
pool = trace.Group()
super(ServiceContainer, self).__init__(error_hook=Hook('error_hook'), pool=pool)
self.log_endpoint = log_endpoint
self.backdoor_endpoint = None
self.service_name = service_name
self.fqdn = socket.getfqdn()
self.http_request_hook = Hook('http_request_hook')
self.server = rpc
self.service_registry = registry
self.event_system = events
self.installed_interfaces = {}
self.installed_plugins = []
self.debug = debug
self.monitor_endpoint = monitor_endpoint
self.metrics_aggregator = Aggregator(self._get_metrics, service=self.service_name, host=self.fqdn)
if self.service_registry:
self.add_component(self.service_registry)
if self.event_system:
self.add_component(self.event_system)
self.event_system.install(self)
self.monitor = self.install(MonitorPusher, aggregator=self.metrics_aggregator, endpoint=self.monitor_endpoint, interval=5)
self.add_component(rpc)
rpc.request_handler = self.handle_request
self.install_interface(DefaultInterface, name='lymph')
@classmethod
def from_config(cls, config, **explicit_kwargs):
kwargs = dict(config)
kwargs.pop('class', None)
kwargs.setdefault('monitor_endpoint', os.environ.get('LYMPH_MONITOR'))
kwargs.setdefault('service_name', os.environ.get('LYMPH_SERVICE_NAME'))
kwargs['registry'] = config.create_instance('registry')
kwargs['rpc'] = config.create_instance('rpc', default_class=ZmqRPCServer, ip=kwargs.pop('ip', None), port=kwargs.pop('port', None))
kwargs['pool'] = config.create_instance('pool', default_class='lymph.core.trace:Group')
for key, value in six.iteritems(explicit_kwargs):
if value is not None:
kwargs[key] = value
return cls(**kwargs)
def excepthook(self, type, value, traceback):
logger.log(logging.CRITICAL, 'Uncaught exception', exc_info=(type, value, traceback))
self.error_hook((type, value, traceback))
@property
def endpoint(self):
return self.server.endpoint
@property
def identity(self):
return self.server.identity
def install_interface(self, cls, **kwargs):
interface = self.install(cls, **kwargs)
self.installed_interfaces[interface.name] = interface
for plugin in self.installed_plugins:
plugin.on_interface_installation(interface)
return interface
def install_plugin(self, cls, **kwargs):
plugin = self.install(cls, **kwargs)
self.installed_plugins.append(plugin)
return plugin
def get_shared_socket_fd(self, port):
fds = json.loads(os.environ.get('LYMPH_SHARED_SOCKET_FDS', '{}'))
try:
return fds[str(port)]
except KeyError:
raise SocketNotCreated
@property
def service_types(self):
return self.installed_interfaces.keys()
def subscribe(self, handler, **kwargs):
return self.event_system.subscribe(handler, **kwargs)
def unsubscribe(self, handler):
self.event_system.unsubscribe(handler)
def get_instance_description(self, interface):
description = interface.get_description()
description.update({
'endpoint': self.endpoint,
'identity': self.identity,
'log_endpoint': self.log_endpoint,
'backdoor_endpoint': self.backdoor_endpoint,
'fqdn': self.fqdn,
'ip': self.server.ip,
})
return description
def start(self, register=True):
logger.info('starting %s (%s) at %s (pid=%s)', self.service_name, ', '.join(self.service_types), self.endpoint, os.getpid())
self.on_start()
self.metrics_aggregator.add_tags(identity=self.identity)
if register:
for interface_name, interface in six.iteritems(self.installed_interfaces):
if not interface.register_with_coordinator:
continue
instance = ServiceInstance(**self.get_instance_description(interface))
try:
self.service_registry.register(interface_name, instance)
except RegistrationFailure:
logger.error("registration failed %s, %s", interface_name, interface)
self.stop()
def stop(self, **kwargs):
self.on_stop()
self.pool.kill()
def join(self):
self.pool.join()
def lookup(self, address):
if '://' not in address:
return self.service_registry.get(address)
instance = ServiceInstance(address)
return Service(address, instances=[instance])
def discover(self):
return self.service_registry.discover()
def emit_event(self, event_type, payload, headers=None, **kwargs):
headers = headers or {}
headers.setdefault('trace_id', trace.get_id())
event = Event(event_type, payload, source=self.identity, headers=headers)
self.event_system.emit(event, **kwargs)
def send_request(self, address, subject, body, headers=None):
service = self.lookup(address)
return self.server.send_request(service, subject, body, headers=headers)
def handle_request(self, channel):
interface_name, func_name = channel.request.subject.rsplit('.', 1)
try:
interface = self.installed_interfaces[interface_name]
except KeyError:
logger.warning('unsupported service type: %s', interface_name)
channel.nack(True)
return
try:
interface.handle_request(func_name, channel)
except Exception:
logger.exception('Request error:')
exc_info = sys.exc_info()
try:
self.error_hook(exc_info, extra={
'service': self.service_name,
'interface': interface_name,
'func_name': func_name,
'trace_id': trace.get_id(),
})
finally:
del exc_info
try:
channel.nack(True)
except:
logger.exception('failed to send automatic NACK')
def _get_metrics(self):
for metric in super(ServiceContainer, self)._get_metrics():
yield metric
yield metrics.RawMetric('greenlets.count', len(self.pool))
| alazaro/lymph | lymph/core/container.py | Python | apache-2.0 | 7,867 |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# yapf: disable
import os
import subprocess
import tempfile
import binary_vars
libvpx_threads = 4
INTRA_IVAL_LOW_LATENCY = 60
RAV1E_SPEED = 4
SVT_SPEED = 2
AOM_SPEED = 2
AOM_RT_SPEED = 5
RAV1E_RT_SPEED = 7
SVT_RT_SPEED = 5
def rav1e_command(job, temp_dir):
assert job['num_spatial_layers'] == 1
assert job['num_temporal_layers'] == 1
assert job['codec'] == 'av1'
assert job['encoder'] in ['rav1e-1pass', 'rav1e-rt', 'rav1e-all_intra', 'rav1e-offline']
(fd, encoded_filename) = tempfile.mkstemp(dir=temp_dir, suffix=".ivf")
os.close(fd)
(fd, statfile) = tempfile.mkstemp(dir=temp_dir, suffix=".stat")
os.close(fd)
clip = job['clip']
fps = int(clip['fps'] + 0.5)
common_params = [
'-y',
'--output', encoded_filename,
clip['y4m_file']
]
if job['param'] == 'bitrate':
assert len(job['target_bitrates_kbps'])
control_params = [
'--bitrate', job['target_bitrates_kbps'][-1]
]
else:
assert job['qp_value'] != -1
control_params = [
'--quantizer', (job['qp_value'] * 4)
]
encoder = job['encoder']
if encoder == 'rav1e-1pass':
codec_params = [
'--speed', RAV1E_SPEED,
'--low-latency',
'--keyint', INTRA_IVAL_LOW_LATENCY
]
elif encoder == 'rav1e-rt':
codec_params = [
'--low-latency',
'--speed', RAV1E_RT_SPEED,
'--keyint', INTRA_IVAL_LOW_LATENCY
]
elif encoder == 'rav1e-all_intra':
codec_params = [
'--speed', '4',
'--keyint', '1'
]
if encoder == 'rav1e-offline':
pass1_params = [
'--speed', '10',
'--tiles', 8,
'--tune', 'Psychovisual',
'--first-pass', statfile,
]
pass2_params = [
'--second-pass', statfile,
'--keyint', '60',
'--speed', '4'
]
if 'offline' in encoder:
first_pass_command = [binary_vars.RAV1E_ENC_BIN] + pass1_params + control_params + common_params
second_pass_command = [binary_vars.RAV1E_ENC_BIN] + pass2_params + control_params + common_params
command = first_pass_command + ['&&'] + second_pass_command
command = [str(flag) for flag in command]
else:
command = [binary_vars.RAV1E_ENC_BIN] + codec_params + control_params + common_params
command = [str(flag) for flag in command]
encoded_files = [{'spatial-layer': 0,
'temporal-layer': 0, 'filename': encoded_filename
}]
return command, encoded_files
def svt_command(job, temp_dir):
assert job['num_spatial_layers'] == 1
assert job['num_temporal_layers'] == 1
assert job['codec'] == 'av1'
assert job['encoder'] in ['svt-1pass', 'svt-rt', 'svt-offline', 'svt-all_intra']
(fd, encoded_filename) = tempfile.mkstemp(dir=temp_dir, suffix=".ivf")
os.close(fd)
clip = job['clip']
fps = int(clip['fps'] + 0.5)
common_params = [
'--profile', 0,
'--fps', fps,
'-w', clip['width'],
'-h', clip['height'],
'-i', clip['yuv_file'],
'-b', encoded_filename,
]
if job['param'] == 'bitrate':
assert len(job['target_bitrates_kbps'])
control_params = [
'--tbr', job['target_bitrates_kbps'][0],
'--rc', 1
]
else:
assert job['qp_value'] != -1
control_params = [
'--rc', '0',
'-q', job['qp_value'],
'--min-qp', job['qp_value'],
'--max-qp', (job['qp_value'] + 8),
'--keyint', (INTRA_IVAL_LOW_LATENCY - 1)
]
encoder = job['encoder']
if encoder == 'svt-1pass':
codec_params = [
'--preset', "8",
]
elif encoder == 'svt-rt':
codec_params = [
'--scm', 0,
'--lookahead', 0,
'--preset', SVT_RT_SPEED,
]
elif encoder == 'svt-all_intra':
codec_params = [
'--scm', 0,
'--preset', SVT_SPEED,
]
elif encoder == 'svt-offline':
(fd, statfile) = tempfile.mkstemp(dir=temp_dir, suffix='.stat')
os.close(fd)
first_pass_params = [
'--scm', 0,
'--preset', 8,
'--output-stat-file', statfile
]
second_pass_params = [
'--scm', 0,
'--preset', SVT_SPEED,
'--tile-columns', 3,
'--enable-altrefs', 1,
'--altref-nframes', 7,
'--altref-strength', 5,
'--input-stat-file', statfile
]
if 'offline' in encoder:
first_pass_command = [ binary_vars.SVT_ENC_BIN ] + first_pass_params + control_params + common_params
second_pass_command = [ binary_vars.SVT_ENC_BIN ] + second_pass_params + control_params + common_params
command = first_pass_command + ['&&'] + second_pass_command
else:
command = [binary_vars.SVT_ENC_BIN] + codec_params + control_params + common_params
command = [str(flag) for flag in command]
encoded_files = [{'spatial-layer': 0,
'temporal-layer': 0, 'filename': encoded_filename
}]
return command, encoded_files
def aom_command(job, temp_dir):
assert job['num_spatial_layers'] == 1
assert job['num_temporal_layers'] == 1
assert job['codec'] == 'av1'
assert job['encoder'] in ['aom-good', 'aom-rt', 'aom-all_intra', 'aom-offline']
(fd, first_pass_file) = tempfile.mkstemp(dir=temp_dir, suffix=".fpf")
os.close(fd)
(fd, encoded_filename) = tempfile.mkstemp(dir=temp_dir, suffix=".webm")
os.close(fd)
clip = job['clip']
fps = int(clip['fps'] + 0.5)
common_params = [
'--codec=av1',
'--width=%d' % clip['width'],
'--height=%d' % clip['height'],
'--output=%s' % encoded_filename,
clip['yuv_file']
]
if job['param'] == 'bitrate':
assert len(job['target_bitrates_kbps'])
control_params = [
'--target-bitrate=%d' % job['target_bitrates_kbps'][0],
'--end-usage=cbr'
]
else:
assert job['qp_value'] != -1
control_params = [
'--min-q=%d' % job['qp_value'],
'--max-q=%d' % (job['qp_value'] + 8),
'--end-usage=q'
]
encoder = job['encoder']
if encoder == 'aom-good':
codec_params = [
'--good',
"-p", "2",
"--lag-in-frames=25",
'--cpu-used=3',
"--auto-alt-ref=1",
"--kf-max-dist=150",
"--kf-min-dist=0",
"--drop-frame=0",
"--static-thresh=0",
"--bias-pct=50",
"--minsection-pct=0",
"--maxsection-pct=2000",
"--arnr-maxframes=7",
"--arnr-strength=5",
"--sharpness=0",
"--undershoot-pct=100",
"--overshoot-pct=100",
"--frame-parallel=0",
"--tile-columns=0",
"--profile=0"
]
elif encoder == 'aom-all_intra':
codec_params = [
'--cpu-used=4',
'--kf-max-dist=1',
'--end-usage=q'
]
elif encoder == 'aom-rt':
codec_params = [
'--cpu-used=%d' % AOM_RT_SPEED,
'--disable-warning-prompt',
'--enable-tpl-model=0',
'--deltaq-mode=0',
'--sb-size=0',
'--ivf',
'--profile=0',
'--static-thresh=0',
'--undershoot-pct=50',
'--overshoot-pct=50',
'--buf-sz=1000',
'--buf-initial-sz=500',
'--buf-optimal-sz=600',
'--max-intra-rate=300',
'--passes=1',
'--rt',
'--lag-in-frames=0',
'--noise-sensitivity=0',
'--error-resilient=1',
]
elif encoder == 'aom-offline':
codec_params = [
'--good',
"--passes=2",
'--threads=0',
"--lag-in-frames=25",
'--cpu-used=%d' % AOM_SPEED,
"--auto-alt-ref=1",
"--kf-max-dist=150",
"--kf-min-dist=0",
"--drop-frame=0",
"--static-thresh=0",
"--bias-pct=50",
"--minsection-pct=0",
"--maxsection-pct=2000",
"--arnr-maxframes=7",
"--arnr-strength=5",
"--sharpness=0",
"--undershoot-pct=25",
"--overshoot-pct=25",
"--frame-parallel=1",
"--tile-columns=3",
"--profile=0",
]
command = [binary_vars.AOM_ENC_BIN] + codec_params + control_params + common_params
encoded_files = [{'spatial-layer': 0,
'temporal-layer': 0, 'filename': encoded_filename}]
return (command, encoded_files)
def libvpx_tl_command(job, temp_dir):
# Parameters are intended to be as close as possible to realtime settings used
# in WebRTC.
assert job['num_temporal_layers'] <= 3
# TODO(pbos): Account for low resolution CPU levels (see below).
codec_cpu = 6 if job['codec'] == 'vp8' else 7
layer_strategy = 8 if job['num_temporal_layers'] == 2 else 10
outfile_prefix = '%s/out' % temp_dir
clip = job['clip']
fps = int(clip['fps'] + 0.5)
command = [
binary_vars.VPX_SVC_ENC_BIN,
clip['yuv_file'],
outfile_prefix,
job['codec'],
clip['width'],
clip['height'],
'1',
fps,
codec_cpu,
'0',
libvpx_threads,
layer_strategy
] + job['target_bitrates_kbps']
command = [str(i) for i in command]
encoded_files = [{'spatial-layer': 0, 'temporal-layer': i, 'filename': "%s_%d.ivf" % (outfile_prefix, i)} for i in range(job['num_temporal_layers'])]
return ([str(i) for i in command], encoded_files)
def libvpx_command(job, temp_dir):
# Parameters are intended to be as close as possible to realtime settings used
# in WebRTC.
if (job['num_temporal_layers'] > 1):
return libvpx_tl_command(job, temp_dir)
assert job['num_spatial_layers'] == 1
# TODO(pbos): Account for low resolutions (use -4 and 5 for CPU levels).
common_params = [
"--lag-in-frames=0",
"--error-resilient=1",
"--kf-min-dist=3000",
"--kf-max-dist=3000",
"--static-thresh=1",
"--end-usage=cbr",
"--undershoot-pct=100",
"--overshoot-pct=15",
"--buf-sz=1000",
"--buf-initial-sz=500",
"--buf-optimal-sz=600",
"--max-intra-rate=900",
"--resize-allowed=0",
"--drop-frame=0",
"--passes=1",
"--rt",
"--noise-sensitivity=0",
"--threads=%d" % libvpx_threads,
]
if job['codec'] == 'vp8':
codec_params = [
"--codec=vp8",
"--cpu-used=-6",
"--min-q=2",
"--max-q=56",
"--screen-content-mode=0",
]
elif job['codec'] == 'vp9':
codec_params = [
"--codec=vp9",
"--cpu-used=7",
"--min-q=2",
"--max-q=52",
"--aq-mode=3",
]
(fd, encoded_filename) = tempfile.mkstemp(dir=temp_dir, suffix=".webm")
os.close(fd)
clip = job['clip']
# Round FPS. For quality comparisons it's likely close enough to not be
# misrepresentative. From a quality perspective there's no point to fully
# respecting NTSC or other non-integer FPS formats here.
fps = int(clip['fps'] + 0.5)
command = [binary_vars.VPX_ENC_BIN] + codec_params + common_params + [
'--fps=%d/1' % fps,
'--target-bitrate=%d' % job['target_bitrates_kbps'][0],
'--width=%d' % clip['width'],
'--height=%d' % clip['height'],
'--output=%s' % encoded_filename,
clip['yuv_file']
]
encoded_files = [{'spatial-layer': 0, 'temporal-layer': 0, 'filename': encoded_filename}]
return (command, encoded_files)
def openh264_command(job, temp_dir):
assert job['codec'] == 'h264'
# TODO(pbos): Consider AVC support.
assert job['num_spatial_layers'] == 1
# TODO(pbos): Add temporal-layer support (-numtl).
assert job['num_temporal_layers'] == 1
(fd, encoded_filename) = tempfile.mkstemp(dir=temp_dir, suffix=".264")
os.close(fd)
clip = job['clip']
command = [
binary_vars.H264_ENC_BIN,
'-rc', 1,
'-denois', 0,
'-scene', 0,
'-bgd', 0,
'-fs', 0,
'-tarb', job['target_bitrates_kbps'][0],
'-sw', clip['width'],
'-sh', clip['height'],
'-frin', clip['fps'],
'-org', clip['yuv_file'],
'-bf', encoded_filename,
'-numl', 1,
'-dw', 0, clip['width'],
'-dh', 0, clip['height'],
'-frout', 0, clip['fps'],
'-ltarb', 0, job['target_bitrates_kbps'][0],
]
encoded_files = [{'spatial-layer': 0, 'temporal-layer': 0, 'filename': encoded_filename}]
return ([str(i) for i in command], encoded_files)
def yami_command(job, temp_dir):
assert job['num_spatial_layers'] == 1
assert job['num_temporal_layers'] == 1
(fd, encoded_filename) = tempfile.mkstemp(dir=temp_dir, suffix=".ivf")
os.close(fd)
clip = job['clip']
# Round FPS. For quality comparisons it's likely close enough to not be
# misrepresentative. From a quality perspective there's no point to fully
# respecting NTSC or other non-integer FPS formats here.
fps = int(clip['fps'] + 0.5)
command = [
binary_vars.YAMI_ENC_BIN,
'--rcmode', 'CBR',
'--ipperiod', 1,
'--intraperiod', 3000,
'-c', job['codec'].upper(),
'-i', clip['yuv_file'],
'-W', clip['width'],
'-H', clip['height'],
'-f', fps,
'-o', encoded_filename,
'-b', job['target_bitrates_kbps'][0],
]
encoded_files = [{'spatial-layer': 0, 'temporal-layer': 0, 'filename': encoded_filename}]
return ([str(i) for i in command], encoded_files)
def get_encoder_command(encoder):
encoders = [
'aom-good', 'aom-rt', 'aom-all_intra', 'aom-offline', ## AOM CONFIGS
'rav1e-1pass', 'rav1e-rt', 'rav1e-all_intra', 'rav1e-offline', ## RAV1E CONFIGS
'svt-1pass', 'svt-rt', 'svt-all_intra', 'svt-offline', ## SVT CONFIGS
'openh264', ## OPENH264 CONFIGS
'libvpx-rt', ## LIBVPX CONFIGS
'yami' ## YAMI CONFIGS
]
if encoder not in encoders:
return None
if 'aom' in encoder:
return aom_command
elif 'rav1e' in encoder:
return rav1e_command
elif 'svt' in encoder:
return svt_command
elif 'libvpx' in encoder:
return libvpx_command
elif 'openh264' in encoder:
return openh264_command
elif 'yami' in encoder:
return yami_command
| googleinterns/av1-codec-comparison | rtc-video-quality/encoder_commands.py | Python | apache-2.0 | 15,457 |
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import os,sys
import urllib
############################
# COMMAND LINE INTERFACE #
############################
import argparse
parser = argparse.ArgumentParser(description="Morfix wrapper for console")
parser.add_argument("-v", "--verbose", help="Make the output verbose", action="store_true")
parser.add_argument("word", help="Input word", action="store", type=lambda s: unicode(s,'utf-8'))
parser.add_argument("-l", help="Input language (en or heb)", action="store", default="heb")
def printv(arg1):
"""
Verbose print
"""
if VERBOSE:
print(arg1)
def heb2en(url):
result=os.popen(
'curl -s '+url+' |\
grep -B 4 "default_trans" |\
sed "\
/class.\+diber/d ; \
s/sound.trans.*$//g; s/\s\+/ /g; s/^\s\+//g ; \
s/<\/div>.*$//g ; \
s/<\/div>.*$//g ; \
/--/d ; \
/^$/d ; \
/translation_en/d ; \
s/<.*default_trans.*>// ; \
s/^<span.*word\\">//; \
s/<\/span>// ; \
" \
'
).read()
print(result)
if __name__=="__main__" :
# Parse arguments
args = parser.parse_args()
if args.verbose:
print("Making the command verbose...")
VERBOSE=True
else:
VERBOSE=False
word=args.word
printv(word)
word_urlencoded=urllib.quote_plus(word.encode("utf-8"))
url="www.morfix.co.il/"+word_urlencoded
printv(url)
if args.l=="heb":
heb2en(url)
else:
heb2en(url)
# vim:ft=python:
| alejandrogallo/dotfiles | bin/heb.py | Python | unlicense | 1,495 |
import logging
import datetime
import pymysql
from urllib.parse import urlsplit
log = logging.getLogger('tyggbot')
class LinkTrackerLink:
@classmethod
def load(cls, cursor, url):
link = cls()
cursor.execute('SELECT * FROM `tb_link_data` WHERE `url`=%s', [url])
row = cursor.fetchone()
if row:
# We found a link matching this URL in the database!
link.id = row['id']
link.url = row['url']
link.times_linked = row['times_linked']
link.first_linked = row['first_linked']
link.last_linked = row['last_linked']
link.needs_sync = False
else:
# No link was found with this URL, create a new one!
link.id = -1
link.url = url
link.times_linked = 0
link.first_linked = datetime.datetime.now()
link.last_linked = datetime.datetime.now()
link.needs_sync = False
return link
def increment(self):
self.times_linked += 1
self.last_linked = datetime.datetime.now()
self.needs_sync = True
def sync(self, cursor):
_first_linked = self.first_linked.strftime('%Y-%m-%d %H:%M:%S')
_last_linked = self.last_linked.strftime('%Y-%m-%d %H:%M:%S')
if self.id == -1:
cursor.execute('INSERT INTO `tb_link_data` (`url`, `times_linked`, `first_linked`, `last_linked`) VALUES (%s, %s, %s, %s)',
[self.url, self.times_linked, _first_linked, _last_linked])
self.id = cursor.lastrowid
else:
cursor.execute('UPDATE `tb_link_data` SET `times_linked`=%s, `last_linked`=%s WHERE `id`=%s',
[self.times_linked, _last_linked, self.id])
class LinkTracker:
def __init__(self, sqlconn):
self.sqlconn = sqlconn
self.links = {}
def add(self, url):
url_data = urlsplit(url)
if url_data.netloc[:4] == 'www.':
netloc = url_data.netloc[4:]
else:
netloc = url_data.netloc
if url_data.path.endswith('/'):
path = url_data.path[:-1]
else:
path = url_data.path
if len(url_data.query) > 0:
query = '?' + url_data.query
else:
query = ''
url = netloc + path + query
if url not in self.links:
self.links[url] = LinkTrackerLink.load(self.sqlconn.cursor(pymysql.cursors.DictCursor), url)
self.links[url].increment()
def sync(self):
self.sqlconn.autocommit(False)
cursor = self.sqlconn.cursor()
for link in [link for k, link in self.links.items() if link.needs_sync]:
link.sync(cursor)
cursor.close()
self.sqlconn.autocommit(True)
| 0rmi/tyggbot | models/linktracker.py | Python | mit | 2,792 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp import SUPERUSER_ID, api
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."),
}
_defaults = {
'active': True,
}
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
def _location_owner(self, cr, uid, location, context=None):
''' Return the company owning the location if any '''
return location and (location.usage == 'internal') and location.company_id or False
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.location_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.location_id
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
if context is None:
context = {}
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive)
def _name_get(self, cr, uid, location, context=None):
name = location.name
while location.location_id and location.usage != 'view':
location = location.location_id
name = location.name + '/' + name
return name
def name_get(self, cr, uid, ids, context=None):
res = []
for location in self.browse(cr, uid, ids, context=context):
res.append((location.id, self._name_get(cr, uid, location, context=context)))
return res
_columns = {
'name': fields.char('Location Name', required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([
('supplier', 'Supplier Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations
""", select=True),
'complete_name': fields.function(_complete_name, type='char', string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'),
'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'loc_barcode': fields.char('Location Barcode'),
}
_defaults = {
'active': True,
'usage': 'internal',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'scrap_location': False,
}
_sql_constraints = [('loc_barcode_company_uniq', 'unique (loc_barcode,company_id)', 'The barcode for a location must be unique per company !')]
def create(self, cr, uid, default, context=None):
if not default.get('loc_barcode', False):
default.update({'loc_barcode': default.get('complete_name', False)})
return super(stock_location, self).create(cr, uid, default, context=context)
def get_putaway_strategy(self, cr, uid, location, product, context=None):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
putaway_obj = self.pool.get('product.putaway')
loc = location
while loc:
if loc.putaway_strategy_id:
res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context)
if res:
return res
loc = loc.location_id
def _default_removal_strategy(self, cr, uid, context=None):
return 'fifo'
def get_removal_strategy(self, cr, uid, location, product, context=None):
''' Returns the removal strategy to consider for the given product and location.
:param location: browse record (stock.location)
:param product: browse record (product.product)
:rtype: char
'''
if product.categ_id.removal_strategy_id:
return product.categ_id.removal_strategy_id.method
loc = location
while loc:
if loc.removal_strategy_id:
return loc.removal_strategy_id.method
loc = loc.location_id
return self._default_removal_strategy(cr, uid, context=context)
def get_warehouse(self, cr, uid, location, context=None):
"""
Returns warehouse id of warehouse that contains location
:param location: browse record (stock.location)
"""
wh_obj = self.pool.get("stock.warehouse")
whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left),
('view_location_id.parent_right', '>=', location.parent_left)], context=context)
return whs and whs[0] or False
#----------------------------------------------------------
# Routes
#----------------------------------------------------------
class stock_location_route(osv.osv):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
_columns = {
'name': fields.char('Route Name', required=True),
'sequence': fields.integer('Sequence'),
'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Pull Rules', copy=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."),
'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True),
'product_selectable': fields.boolean('Applicable on Product'),
'product_categ_selectable': fields.boolean('Applicable on Product Category'),
'warehouse_selectable': fields.boolean('Applicable on Warehouse'),
'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'),
'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplier Warehouse'),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this route is shared between all companies'),
}
_defaults = {
'sequence': lambda self, cr, uid, ctx: 0,
'active': True,
'product_selectable': True,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c),
}
def write(self, cr, uid, ids, vals, context=None):
'''when a route is deactivated, deactivate also its pull and push rules'''
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
push_ids = []
pull_ids = []
for route in self.browse(cr, uid, ids, context=context):
if route.push_ids:
push_ids += [r.id for r in route.push_ids if r.active != vals['active']]
if route.pull_ids:
pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']]
if push_ids:
self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context)
if pull_ids:
self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context)
return res
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
"""
Quants are the smallest unit of stock physical instances
"""
_name = "stock.quant"
_description = "Quants"
def _get_quant_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for q in self.browse(cr, uid, ids, context=context):
res[q.id] = q.product_id.code or ''
if q.lot_id:
res[q.id] = q.lot_id.name
res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name
return res
def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None):
context = dict(context or {})
res = {}
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for quant in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if quant.company_id.id != uid_company_id:
#if the company of the quant is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = quant.company_id.id
quant = self.browse(cr, uid, quant.id, context=context)
res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context)
return res
def _get_inventory_value(self, cr, uid, quant, context=None):
return quant.product_id.standard_price * quant.qty
_columns = {
'name': fields.function(_get_quant_name, type='char', string='Identifier'),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True),
'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True),
'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True),
'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True),
'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True),
'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True),
'cost': fields.float('Unit Cost'),
'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'in_date': fields.datetime('Incoming Date', readonly=True, select=True),
'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant'),
'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True),
'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True),
# Used for negative quants to reconcile after compensated by a new positive one
'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True),
'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True),
'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True,
help="Technical field used to record the destination location of a move that created a negative quant"),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c),
}
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by'''
res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if 'inventory_value' in fields:
for line in res:
if '__domain' in line:
lines = self.search(cr, uid, line['__domain'], context=context)
inv_value = 0.0
for line2 in self.browse(cr, uid, lines, context=context):
inv_value += line2.inventory_value
line['inventory_value'] = inv_value
return res
def action_view_quant_history(self, cr, uid, ids, context=None):
'''
This function returns an action that display the history of the quant, which
mean all the stock moves that lead to this quant creation with this quant quantity.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context={})[0]
move_ids = []
for quant in self.browse(cr, uid, ids, context=context):
move_ids += [move.id for move in quant.history_ids]
result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]"
return result
def quants_reserve(self, cr, uid, quants, move, link=False, context=None):
'''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state
is also set to 'assigned'
:param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument
:param move: browse record
:param link: browse record (stock.move.operation.link)
'''
toreserve = []
reserved_availability = move.reserved_availability
#split quants if needed
for quant, qty in quants:
if qty <= 0.0 or (quant and quant.qty <= 0.0):
raise osv.except_osv(_('Error!'), _('You can not reserve a negative quantity or a negative quant.'))
if not quant:
continue
self._quant_split(cr, uid, quant, qty, context=context)
toreserve.append(quant.id)
reserved_availability += quant.qty
#reserve quants
if toreserve:
self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context)
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
#check if move'state needs to be set as 'assigned'
if reserved_availability == move.product_qty and move.state in ('confirmed', 'waiting'):
self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context)
elif reserved_availability > 0 and not move.partially_available:
self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context)
def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, context=None):
"""Moves all given stock.quant in the given destination location. Unreserve from current move.
:param quants: list of tuple(browse record(stock.quant) or None, quantity to move)
:param move: browse record (stock.move)
:param location_to: browse record (stock.location) depicting where the quants have to be moved
:param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created
:param lot_id: ID of the lot that must be set on the quants to move
:param owner_id: ID of the partner that must own the quants to move
:param src_package_id: ID of the package that contains the quants to move
:param dest_package_id: ID of the package that must be set on the moved quant
"""
quants_reconcile = []
to_move_quants = []
self._check_location(cr, uid, location_to, context=context)
for quant, qty in quants:
if not quant:
#If quant is None, we will create a quant to move (and potentially a negative counterpart too)
quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context)
else:
self._quant_split(cr, uid, quant, qty, context=context)
quant.refresh()
to_move_quants.append(quant)
quants_reconcile.append(quant)
if to_move_quants:
to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id]
self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, context=context)
self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context)
if location_to.usage == 'internal':
if self.search(cr, uid, [('product_id', '=', move.product_id.id), ('qty','<', 0)], limit=1, context=context):
for quant in quants_reconcile:
quant.refresh()
self._quant_reconcile_negative(cr, uid, quant, move, context=context)
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
vals = {'location_id': location_dest_id.id,
'history_ids': [(4, move.id)],
'package_id': dest_package_id,
'reservation_id': False}
self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context)
def quants_get_prefered_domain(self, cr, uid, location, product, qty, domain=None, prefered_domain_list=[], restrict_lot_id=False, restrict_partner_id=False, context=None):
''' This function tries to find quants in the given location for the given domain, by trying to first limit
the choice on the quants that match the first item of prefered_domain_list as well. But if the qty requested is not reached
it tries to find the remaining quantity by looping on the prefered_domain_list (tries with the second item and so on).
Make sure the quants aren't found twice => all the domains of prefered_domain_list should be orthogonal
'''
if domain is None:
domain = []
quants = [(None, qty)]
#don't look for quants in location that are of type production, supplier or inventory.
if location.usage in ['inventory', 'production', 'supplier']:
return quants
res_qty = qty
if not prefered_domain_list:
return self.quants_get(cr, uid, location, product, qty, domain=domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for prefered_domain in prefered_domain_list:
if res_qty > 0:
#try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the prefered order
quants.pop()
tmp_quants = self.quants_get(cr, uid, location, product, res_qty, domain=domain + prefered_domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for quant in tmp_quants:
if quant[0]:
res_qty -= quant[1]
quants += tmp_quants
return quants
def quants_get(self, cr, uid, location, product, qty, domain=None, restrict_lot_id=False, restrict_partner_id=False, context=None):
"""
Use the removal strategies of product to search for the correct quants
If you inherit, put the super at the end of your method.
:location: browse record of the parent location where the quants have to be found
:product: browse record of the product to find
:qty in UoM of product
"""
result = []
domain = domain or [('qty', '>', 0.0)]
if restrict_partner_id:
domain += [('owner_id', '=', restrict_partner_id)]
if restrict_lot_id:
domain += [('lot_id', '=', restrict_lot_id)]
if location:
removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, location, product, context=context)
result += self.apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context)
return result
def apply_removal_strategy(self, cr, uid, location, product, quantity, domain, removal_strategy, context=None):
if removal_strategy == 'fifo':
order = 'in_date, id'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
elif removal_strategy == 'lifo':
order = 'in_date desc, id desc'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
raise osv.except_osv(_('Error!'), _('Removal strategy %s not implemented.' % (removal_strategy,)))
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,
force_location_from=False, force_location_to=False, context=None):
'''Create a quant in the destination location and create a negative quant in the source location if it's an internal location.
'''
if context is None:
context = {}
price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context)
location = force_location_to or move.location_dest_id
vals = {
'product_id': move.product_id.id,
'location_id': location.id,
'qty': qty,
'cost': price_unit,
'history_ids': [(4, move.id)],
'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': move.company_id.id,
'lot_id': lot_id,
'owner_id': owner_id,
'package_id': dest_package_id,
}
if move.location_id.usage == 'internal':
#if we were trying to move something from an internal location and reach here (quant creation),
#it means that a negative quant has to be created as well.
negative_vals = vals.copy()
negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id
negative_vals['qty'] = -qty
negative_vals['cost'] = price_unit
negative_vals['negative_move_id'] = move.id
negative_vals['package_id'] = src_package_id
negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context)
vals.update({'propagated_from_id': negative_quant_id})
#create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants
quant_id = self.create(cr, SUPERUSER_ID, vals, context=context)
return self.browse(cr, uid, quant_id, context=context)
def _quant_split(self, cr, uid, quant, qty, context=None):
context = context or {}
if (quant.qty > 0 and quant.qty <= qty) or (quant.qty <= 0 and quant.qty >= qty):
return False
new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': quant.qty - qty}, context=context)
self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty}, context=context)
quant.refresh()
return self.browse(cr, uid, new_quant, context=context)
def _get_latest_move(self, cr, uid, quant, context=None):
move = False
for m in quant.history_ids:
if not move or m.date > move.date:
move = m
return move
@api.cr_uid_ids_context
def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None):
path = []
for move in solving_quant.history_ids:
path.append((4, move.id))
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context)
def _quant_reconcile_negative(self, cr, uid, quant, move, context=None):
"""
When new quant arrive in a location, try to reconcile it with
negative quants. If it's possible, apply the cost of the new
quant to the conter-part of the negative quant.
"""
solving_quant = quant
dom = [('qty', '<', 0)]
if quant.lot_id:
dom += [('lot_id', '=', quant.lot_id.id)]
dom += [('owner_id', '=', quant.owner_id.id)]
dom += [('package_id', '=', quant.package_id.id)]
quants = self.quants_get(cr, uid, quant.location_id, quant.product_id, quant.qty, dom, context=context)
for quant_neg, qty in quants:
if not quant_neg:
continue
to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context)
if not to_solve_quant_ids:
continue
solving_qty = qty
solved_quant_ids = []
for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context):
if solving_qty <= 0:
continue
solved_quant_ids.append(to_solve_quant.id)
self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context)
solving_qty -= min(solving_qty, to_solve_quant.qty)
remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context)
remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context)
#if the reconciliation was not complete, we need to link together the remaining parts
if remaining_neg_quant:
remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context)
if remaining_to_solve_quant_ids:
self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context)
#delete the reconciled quants, as it is replaced by the solved quants
self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context)
#price update + accounting entries adjustments
self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context)
#merge history (and cost?)
self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context)
self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context)
solving_quant = remaining_solving_quant
def _price_update(self, cr, uid, ids, newprice, context=None):
self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context)
def quants_unreserve(self, cr, uid, move, context=None):
related_quants = [x.id for x in move.reserved_quant_ids]
if related_quants:
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
if move.partially_available:
self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context)
self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context)
def _quants_get_order(self, cr, uid, location, product, quantity, domain=[], orderby='in_date', context=None):
''' Implementation of removal strategies
If it can not reserve, it will return a tuple (None, qty)
'''
if context is None:
context = {}
domain += location and [('location_id', 'child_of', location.id)] or []
domain += [('product_id', '=', product.id)]
if context.get('force_company'):
domain += [('company_id', '=', context.get('force_company'))]
else:
domain += [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)]
res = []
offset = 0
while quantity > 0:
quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context)
if not quants:
res.append((None, quantity))
break
for quant in self.browse(cr, uid, quants, context=context):
if quantity >= abs(quant.qty):
res += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif quantity != 0:
res += [(quant, quantity)]
quantity = 0
break
offset += 10
return res
def _check_location(self, cr, uid, location, context=None):
if location.usage == 'view':
raise osv.except_osv(_('Error'), _('You cannot move to a location of type view %s.') % (location.name))
return True
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "priority desc, date asc, id desc"
def _set_min_date(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context)
def _set_priority(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'priority': value}, context=context)
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False, 'priority': '1'}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected),
max(priority)
from
stock_move
where
picking_id IN %s
group by
picking_id""", (tuple(ids),))
for pick, dt1, dt2, prio in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
res[pick]['priority'] = prio
return res
def create(self, cr, user, vals, context=None):
context = context or {}
if ('name' not in vals) or (vals.get('name') in ('/', False)):
ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False))
sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id
vals['name'] = self.pool.get('ir.sequence').get_id(cr, user, sequence_id, 'id', context=context)
return super(stock_picking, self).create(cr, user, vals, context)
def _state_get(self, cr, uid, ids, field_name, arg, context=None):
'''The state of a picking depends on the state of its related stock.move
draft: the picking has no line or any one of the lines is draft
done, draft, cancel: all lines are done / draft / cancel
confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial)
'''
res = {}
for pick in self.browse(cr, uid, ids, context=context):
if (not pick.move_lines) or any([x.state == 'draft' for x in pick.move_lines]):
res[pick.id] = 'draft'
continue
if all([x.state == 'cancel' for x in pick.move_lines]):
res[pick.id] = 'cancel'
continue
if all([x.state in ('cancel', 'done') for x in pick.move_lines]):
res[pick.id] = 'done'
continue
order = {'confirmed': 0, 'waiting': 1, 'assigned': 2}
order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'}
lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')]
if pick.move_type == 'one':
res[pick.id] = order_inv[min(lst)]
else:
#we are in the case of partial delivery, so if all move are assigned, picking
#should be assign too, else if one of the move is assigned, or partially available, picking should be
#in partially available state, otherwise, picking is in waiting or confirmed state
res[pick.id] = order_inv[max(lst)]
if not all(x == 2 for x in lst):
if any(x == 2 for x in lst):
res[pick.id] = 'partially_available'
else:
#if all moves aren't assigned, check if we have one product partially available
for move in pick.move_lines:
if move.partially_available:
res[pick.id] = 'partially_available'
break
return res
def _get_pickings(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
res.add(move.picking_id.id)
return list(res)
def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
if pick.pack_operation_ids:
res[pick.id] = True
return res
def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
for move in pick.move_lines:
if move.reserved_quant_ids:
res[pick.id] = True
continue
return res
def check_group_lot(self, cr, uid, context=None):
""" This function will return true if we have the setting to use lots activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot')
def check_group_pack(self, cr, uid, context=None):
""" This function will return true if we have the setting to use package activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot')
def action_assign_owner(self, cr, uid, ids, context=None):
for picking in self.browse(cr, uid, ids, context=context):
packop_ids = [op.id for op in picking.pack_operation_ids]
self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context)
_columns = {
'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False),
'note': fields.text('Notes', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.function(_state_get, type="selection", copy=False,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type'], 20),
'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)},
selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], string='Status', readonly=True, select=True, track_visibility='onchange',
help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Partially Available: some products are available and reserved\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority',
store={'stock.move': (_get_pickings, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves",
track_visibility='onchange', required=True),
'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date,
store={'stock.move': (_get_pickings, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'),
'max_date': fields.function(get_min_max_date, multi="min_max_date",
store={'stock.move': (_get_pickings, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=True),
'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Quant already reserved ?', help='technical field used to know if there is already at least one quant reserved on moves of a given picking'),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'),
'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Pack Operation Exists?', help='technical field for attrs in view'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True),
'picking_type_code': fields.related('picking_type_id', 'code', type='char', string='Picking Type Code', help="Technical field used to display the correct label on print button in the picking view"),
'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"),
# Used to search on pickings
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False),
'location_id': fields.related('move_lines', 'location_id', type='many2one', relation='stock.location', string='Location', readonly=True),
'location_dest_id': fields.related('move_lines', 'location_dest_id', type='many2one', relation='stock.location', string='Destination Location', readonly=True),
'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10),
'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10),
}),
}
_defaults = {
'name': '/',
'state': 'draft',
'move_type': 'direct',
'priority': '1', # normal
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c),
'recompute_pack_op': True,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'),
]
def do_print_picking(self, cr, uid, ids, context=None):
'''This function prints the picking list'''
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context)
def action_confirm(self, cr, uid, ids, context=None):
todo = []
todo_force_assign = []
for picking in self.browse(cr, uid, ids, context=context):
if picking.location_id.usage in ('supplier', 'inventory', 'production'):
todo_force_assign.append(picking.id)
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
if todo_force_assign:
self.force_assign(cr, uid, todo_force_assign, context=context)
return True
def action_assign(self, cr, uid, ids, context=None):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
if pick.state == 'draft':
self.action_confirm(cr, uid, [pick.id], context=context)
pick.refresh()
#skip the moves that don't need to be checked
move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')]
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Nothing to check the availability for.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context)
return True
def force_assign(self, cr, uid, ids, context=None):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context)
#pack_operation might have changed and need to be recomputed
self.write(cr, uid, ids, {'recompute_pack_op': True}, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
return True
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done by processing the Stock Moves of the Picking
Normally that happens when the button "Done" is pressed on a Picking view.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context))
elif move.state in ('assigned', 'confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
#on picking deletion, cancel its move then unlink them too
move_obj = self.pool.get('stock.move')
context = context or {}
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [move.id for move in pick.move_lines]
move_obj.action_cancel(cr, uid, move_ids, context=context)
move_obj.unlink(cr, uid, move_ids, context=context)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
res = super(stock_picking, self).write(cr, uid, ids, vals, context=context)
#if we changed the move lines or the pack operations, we need to recompute the remaining quantities of both
if 'move_lines' in vals or 'pack_operation_ids' in vals:
self.do_recompute_remaining_quantities(cr, uid, ids, context=context)
return res
def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None):
""" Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines.
"""
if not backorder_moves:
backorder_moves = picking.move_lines
backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')]
if 'do_only_split' in context and context['do_only_split']:
backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])]
if backorder_move_ids:
backorder_id = self.copy(cr, uid, picking.id, {
'name': '/',
'move_lines': [],
'pack_operation_ids': [],
'backorder_id': picking.id,
})
backorder = self.browse(cr, uid, backorder_id, context=context)
self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context)
move_obj = self.pool.get("stock.move")
move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context)
self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.action_confirm(cr, uid, [backorder_id], context=context)
return backorder_id
return False
@api.cr_uid_ids_context
def recheck_availability(self, cr, uid, picking_ids, context=None):
self.action_assign(cr, uid, picking_ids, context=context)
self.do_prepare_partial(cr, uid, picking_ids, context=context)
def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None):
"""This method searches for the higher level packages that can be moved as a single operation, given a list of quants
to move and their suggested destination, and returns the list of matching packages.
"""
# Try to find as much as possible top-level packages that can be moved
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
top_lvl_packages = set()
quants_to_compare = quants_suggested_locations.keys()
for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])):
loop = True
test_pack = pack
good_pack = False
pack_destination = False
while loop:
pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context)
all_in = True
for quant in quant_obj.browse(cr, uid, pack_quants, context=context):
# If the quant is not in the quants to compare and not in the common location
if not quant in quants_to_compare:
all_in = False
break
else:
#if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation)
if not pack_destination:
pack_destination = quants_suggested_locations[quant]
elif pack_destination != quants_suggested_locations[quant]:
all_in = False
break
if all_in:
good_pack = test_pack
if test_pack.parent_id:
test_pack = test_pack.parent_id
else:
#stop the loop when there's no parent package anymore
loop = False
else:
#stop the loop when the package test_pack is not totally reserved for moves of this picking
#(some quants may be reserved for other picking or not reserved at all)
loop = False
if good_pack:
top_lvl_packages.add(good_pack)
return list(top_lvl_packages)
def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None):
""" returns a list of dict, ready to be used in create() of stock.pack.operation.
:param picking: browse record (stock.picking)
:param quants: browse record list (stock.quant). List of quants associated to the picking
:param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking
"""
def _picking_putaway_apply(product):
location = False
# Search putaway strategy
if product_putaway_strats.get(product.id):
location = product_putaway_strats[product.id]
else:
location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context)
product_putaway_strats[product.id] = location
return location or picking.location_dest_id.id
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
vals = []
qtys_grouped = {}
#for each quant of the picking, find the suggested location
quants_suggested_locations = {}
product_putaway_strats = {}
for quant in quants:
if quant.qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(quant.product_id)
quants_suggested_locations[quant] = suggested_location_id
#find the packages we can movei as a whole
top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context)
# and then create pack operations for the top-level packages found
for pack in top_lvl_packages:
pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context)
pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context)
vals.append({
'picking_id': picking.id,
'package_id': pack.id,
'product_qty': 1.0,
'location_id': pack.location_id.id,
'location_dest_id': quants_suggested_locations[pack_quants[0]],
})
#remove the quants inside the package so that they are excluded from the rest of the computation
for quant in pack_quants:
del quants_suggested_locations[quant]
# Go through all remaining reserved quants and group by product, package, lot, owner, source location and dest location
for quant, dest_location_id in quants_suggested_locations.items():
key = (quant.product_id.id, quant.package_id.id, quant.lot_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += quant.qty
else:
qtys_grouped[key] = quant.qty
# Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)
for product, qty in forced_qties.items():
if qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(product)
key = (product.id, False, False, False, picking.location_id.id, suggested_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += qty
else:
qtys_grouped[key] = qty
# Create the necessary operations for the grouped quants and remaining qtys
for key, qty in qtys_grouped.items():
vals.append({
'picking_id': picking.id,
'product_qty': qty,
'product_id': key[0],
'package_id': key[1],
'lot_id': key[2],
'owner_id': key[3],
'location_id': key[4],
'location_dest_id': key[5],
'product_uom_id': self.pool.get("product.product").browse(cr, uid, key[0], context=context).uom_id.id,
})
return vals
@api.cr_uid_ids_context
def open_barcode_interface(self, cr, uid, picking_ids, context=None):
final_url="/stock/barcode/#action=stock.ui&picking_id="+str(picking_ids[0])
return {'type': 'ir.actions.act_url', 'url':final_url, 'target': 'self',}
@api.cr_uid_ids_context
def do_partial_open_barcode(self, cr, uid, picking_ids, context=None):
self.do_prepare_partial(cr, uid, picking_ids, context=context)
return self.open_barcode_interface(cr, uid, picking_ids, context=context)
@api.cr_uid_ids_context
def do_prepare_partial(self, cr, uid, picking_ids, context=None):
context = context or {}
pack_operation_obj = self.pool.get('stock.pack.operation')
#used to avoid recomputing the remaining quantities at each new pack operation created
ctx = context.copy()
ctx['no_recompute'] = True
#get list of existing operations and delete them
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
for picking in self.browse(cr, uid, picking_ids, context=context):
forced_qties = {} # Quantity remaining after calculating reserved quants
picking_quants = []
#Calculate packages, reserved quants, qtys of this picking's moves
for move in picking.move_lines:
if move.state not in ('assigned', 'confirmed'):
continue
move_quants = move.reserved_quant_ids
picking_quants += move_quants
forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0
#if we used force_assign() on the move, or if the move is incomming, forced_qty > 0
if forced_qty:
if forced_qties.get(move.product_id):
forced_qties[move.product_id] += forced_qty
else:
forced_qties[move.product_id] = forced_qty
for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context):
pack_operation_obj.create(cr, uid, vals, context=ctx)
#recompute the remaining quantities all at once
self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context)
self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context)
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, picking_ids, context=None):
"""
Will remove all quants for picking in picking_ids
"""
moves_to_unreserve = []
pack_line_to_unreserve = []
for picking in self.browse(cr, uid, picking_ids, context=context):
moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')]
pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids]
if moves_to_unreserve:
if pack_line_to_unreserve:
self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context)
self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context)
def recompute_remaining_qty(self, cr, uid, picking, context=None):
def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False):
move_dict = prod2move_ids[product_id][index]
qty_on_link = min(move_dict['remaining_qty'], qty_to_assign)
self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context)
if move_dict['remaining_qty'] == qty_on_link:
prod2move_ids[product_id].pop(index)
else:
move_dict['remaining_qty'] -= qty_on_link
return qty_on_link
def _create_link_for_quant(operation_id, quant, qty):
"""create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity"""
if not quant.reservation_id.id:
return _create_link_for_product(operation_id, quant.product_id.id, qty)
qty_on_link = 0
for i in range(0, len(prod2move_ids[quant.product_id.id])):
if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id:
continue
qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id)
break
return qty_on_link
def _create_link_for_product(operation_id, product_id, qty):
'''method that creates the link between a given operation and move(s) of given product, for the given quantity.
Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)'''
qty_to_assign = qty
if prod2move_ids.get(product_id):
while prod2move_ids[product_id] and qty_to_assign > 0:
qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False)
qty_to_assign -= qty_on_link
return qty_to_assign == 0
uom_obj = self.pool.get('product.uom')
package_obj = self.pool.get('stock.quant.package')
quant_obj = self.pool.get('stock.quant')
quants_in_package_done = set()
prod2move_ids = {}
still_to_do = []
#make a dictionary giving for each product, the moves and related quantity that can be used in operation links
for move in picking.move_lines:
if not prod2move_ids.get(move.product_id.id):
prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}]
else:
prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty})
need_rereserve = False
#sort the operations in order to give higher priority to those with a package, then a serial number
operations = picking.pack_operation_ids
operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
#delete existing operations to start again from scratch
cr.execute("DELETE FROM stock_move_operation_link WHERE operation_id in %s", (tuple([x.id for x in operations]),))
#1) first, try to create links when quants can be identified without any doubt
for ops in operations:
#for each operation, create the links with the stock move by seeking on the matching reserved quants,
#and deffer the operation if there is some ambiguity on the move to select
if ops.package_id and not ops.product_id:
#entire package
quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context)
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
remaining_qty_on_quant = quant.qty
if quant.reservation_id:
#avoid quants being counted twice
quants_in_package_done.add(quant.id)
qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty)
remaining_qty_on_quant -= qty_on_link
if remaining_qty_on_quant:
still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant))
need_rereserve = True
elif ops.product_id.id:
#Check moves with same product
qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for move_dict in prod2move_ids.get(ops.product_id.id, []):
move = move_dict['move']
for quant in move.reserved_quant_ids:
if not qty_to_assign > 0:
break
if quant.id in quants_in_package_done:
continue
#check if the quant is matching the operation details
if ops.package_id:
flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False
else:
flag = not quant.package_id.id
flag = flag and ((ops.lot_id and ops.lot_id.id == quant.lot_id.id) or not ops.lot_id)
flag = flag and (ops.owner_id.id == quant.owner_id.id)
if flag:
max_qty_on_link = min(quant.qty, qty_to_assign)
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
if qty_to_assign > 0:
#qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed
#all the quants (because they leave no choice on their related move and needs to be processed with higher priority)
still_to_do += [(ops, ops.product_id.id, qty_to_assign)]
need_rereserve = True
#2) then, process the remaining part
all_op_processed = True
for ops, product_id, remaining_qty in still_to_do:
all_op_processed = all_op_processed and _create_link_for_product(ops.id, product_id, remaining_qty)
return (need_rereserve, all_op_processed)
def picking_recompute_remaining_quantities(self, cr, uid, picking, context=None):
need_rereserve = False
all_op_processed = True
if picking.pack_operation_ids:
need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, context=context)
return need_rereserve, all_op_processed
@api.cr_uid_ids_context
def do_recompute_remaining_quantities(self, cr, uid, picking_ids, context=None):
for picking in self.browse(cr, uid, picking_ids, context=context):
if picking.pack_operation_ids:
self.recompute_remaining_qty(cr, uid, picking, context=context)
def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None):
"""
Creates an extra move when there is no corresponding original move to be copied
"""
picking = op.picking_id
res = {
'picking_id': picking.id,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'product_id': product.id,
'product_uom': product.uom_id.id,
'product_uom_qty': remaining_qty,
'name': _('Extra Move: ') + product.name,
'state': 'draft',
}
return res
def _create_extra_moves(self, cr, uid, picking, context=None):
'''This function creates move lines on a picking, at the time of do_transfer, based on
unexpected product transfers (or exceeding quantities) found in the pack operations.
'''
move_obj = self.pool.get('stock.move')
operation_obj = self.pool.get('stock.pack.operation')
moves = []
for op in picking.pack_operation_ids:
for product_id, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items():
if remaining_qty > 0:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context)
moves.append(move_obj.create(cr, uid, vals, context=context))
if moves:
move_obj.action_confirm(cr, uid, moves, context=context)
return moves
def rereserve_pick(self, cr, uid, ids, context=None):
"""
This can be used to provide a button that rereserves taking into account the existing pack operations
"""
for pick in self.browse(cr, uid, ids, context=context):
self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines], context=context)
def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None):
""" Unreserve quants then try to reassign quants."""
stock_move_obj = self.pool.get('stock.move')
if not move_ids:
self.do_unreserve(cr, uid, [picking.id], context=context)
self.action_assign(cr, uid, [picking.id], context=context)
else:
stock_move_obj.do_unreserve(cr, uid, move_ids, context=context)
stock_move_obj.action_assign(cr, uid, move_ids, context=context)
@api.cr_uid_ids_context
def do_enter_transfer_details(self, cr, uid, picking, context=None):
if not context:
context = {}
context.update({
'active_model': self._name,
'active_ids': picking,
'active_id': len(picking) and picking[0] or False
})
created_id = self.pool['stock.transfer_details'].create(cr, uid, {'picking_id': len(picking) and picking[0] or False}, context)
return self.pool['stock.transfer_details'].wizard_view(cr, uid, created_id, context)
@api.cr_uid_ids_context
def do_transfer(self, cr, uid, picking_ids, context=None):
"""
If no pack operation, we do simple action_done of the picking
Otherwise, do the pack operations
"""
if not context:
context = {}
stock_move_obj = self.pool.get('stock.move')
for picking in self.browse(cr, uid, picking_ids, context=context):
if not picking.pack_operation_ids:
self.action_done(cr, uid, [picking.id], context=context)
continue
else:
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context)
#create extra moves in the picking (unexpected product moves coming from pack operations)
todo_move_ids = []
if not all_op_processed:
todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context)
picking.refresh()
#split move lines eventually
toassign_move_ids = []
for move in picking.move_lines:
remaining_qty = move.remaining_qty
if move.state in ('done', 'cancel'):
#ignore stock moves cancelled or already done
continue
elif move.state == 'draft':
toassign_move_ids.append(move.id)
if remaining_qty == 0:
if move.state in ('draft', 'assigned', 'confirmed'):
todo_move_ids.append(move.id)
elif remaining_qty > 0 and remaining_qty < move.product_qty:
new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=context)
todo_move_ids.append(move.id)
#Assign move as it was assigned before
toassign_move_ids.append(new_move)
if need_rereserve or not all_op_processed:
if not picking.location_id.usage in ("supplier", "production", "inventory"):
self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context)
self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context)
if todo_move_ids and not context.get('do_only_split'):
self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context)
elif context.get('do_only_split'):
context = dict(context, split=todo_move_ids)
picking.refresh()
self._create_backorder(cr, uid, picking, context=context)
if toassign_move_ids:
stock_move_obj.action_assign(cr, uid, toassign_move_ids, context=context)
return True
@api.cr_uid_ids_context
def do_split(self, cr, uid, picking_ids, context=None):
""" just split the picking (create a backorder) without making it 'done' """
if context is None:
context = {}
ctx = context.copy()
ctx['do_only_split'] = True
return self.do_transfer(cr, uid, picking_ids, context=ctx)
def get_next_picking_for_ui(self, cr, uid, context=None):
""" returns the next pickings to process. Used in the barcode scanner UI"""
if context is None:
context = {}
domain = [('state', 'in', ('assigned', 'partially_available'))]
if context.get('default_picking_type_id'):
domain.append(('picking_type_id', '=', context['default_picking_type_id']))
return self.search(cr, uid, domain, context=context)
def action_done_from_ui(self, cr, uid, picking_id, context=None):
""" called when button 'done' is pushed in the barcode scanner UI """
#write qty_done into field product_qty for every package_operation before doing the transfer
pack_op_obj = self.pool.get('stock.pack.operation')
for operation in self.browse(cr, uid, picking_id, context=context).pack_operation_ids:
pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context)
self.do_transfer(cr, uid, [picking_id], context=context)
#return id of next picking to work on
return self.get_next_picking_for_ui(cr, uid, context=context)
@api.cr_uid_ids_context
def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None):
""" Create a package with the current pack_operation_ids of the picking that aren't yet in a pack.
Used in the barcode scanner UI and the normal interface as well.
operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack"""
if operation_filter_ids == None:
operation_filter_ids = []
stock_operation_obj = self.pool.get('stock.pack.operation')
package_obj = self.pool.get('stock.quant.package')
stock_move_obj = self.pool.get('stock.move')
for picking_id in picking_ids:
operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)]
if operation_filter_ids != []:
operation_search_domain.append(('id', 'in', operation_filter_ids))
operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context)
pack_operation_ids = []
if operation_ids:
for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context):
#If we haven't done all qty in operation, we have to split into 2 operation
op = operation
if (operation.qty_done < operation.product_qty):
new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context)
stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0, 'lot_id': False}, context=context)
op = stock_operation_obj.browse(cr, uid, new_operation, context=context)
pack_operation_ids.append(op.id)
if op.product_id and op.location_id and op.location_dest_id:
stock_move_obj.check_tracking_product(cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context)
package_id = package_obj.create(cr, uid, {}, context=context)
stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context)
return True
def process_product_id_from_ui(self, cr, uid, picking_id, product_id, op_id, increment=True, context=None):
return self.pool.get('stock.pack.operation')._search_and_increment(cr, uid, picking_id, [('product_id', '=', product_id),('id', '=', op_id)], increment=increment, context=context)
def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None):
'''This function is called each time there barcode scanner reads an input'''
lot_obj = self.pool.get('stock.production.lot')
package_obj = self.pool.get('stock.quant.package')
product_obj = self.pool.get('product.product')
stock_operation_obj = self.pool.get('stock.pack.operation')
stock_location_obj = self.pool.get('stock.location')
answer = {'filter_loc': False, 'operation_id': False}
#check if the barcode correspond to a location
matching_location_ids = stock_location_obj.search(cr, uid, [('loc_barcode', '=', barcode_str)], context=context)
if matching_location_ids:
#if we have a location, return immediatly with the location name
location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None)
answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None)
answer['filter_loc_id'] = matching_location_ids[0]
return answer
#check if the barcode correspond to a product
matching_product_ids = product_obj.search(cr, uid, ['|', ('ean13', '=', barcode_str), ('default_code', '=', barcode_str)], context=context)
if matching_product_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', matching_product_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a lot
matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_lot_ids:
lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context)
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a package
matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_package_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('package_id', '=', matching_package_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
return answer
class stock_production_lot(osv.osv):
_name = 'stock.production.lot'
_inherit = ['mail.thread']
_description = 'Lot/Serial'
_columns = {
'name': fields.char('Serial Number', required=True, help="Unique Serial Number"),
'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True),
'create_date': fields.datetime('Creation Date'),
}
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref, product_id)', 'The combination of serial number, internal reference and product must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of lots
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
quant_obj = self.pool.get("stock.quant")
quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context)
moves = set()
for quant in quant_obj.browse(cr, uid, quants, context=context):
moves |= {move.id for move in quant.history_ids}
if moves:
return {
'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]",
'name': _('Traceability'),
'view_mode': 'tree,form',
'view_type': 'form',
'context': {'tree_view_ref': 'stock.view_move_tree'},
'res_model': 'stock.move',
'type': 'ir.actions.act_window',
}
return False
# ----------------------------------------------------
# Move
# ----------------------------------------------------
class stock_move(osv.osv):
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
return move.price_unit or move.product_id.standard_price
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _quantity_normalize(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, round=False, context=context)
return res
def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for move in self.browse(cr, uid, ids, context=context):
qty = move.product_qty
for record in move.linked_move_operation_ids:
qty -= record.qty
#converting the remaining quantity in the move UoM
res[move.id] = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, round=False, context=context)
return res
def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id]
else:
res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id]
return res
def _get_product_availability(self, cr, uid, ids, field_name, args, context=None):
quant_obj = self.pool.get('stock.quant')
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = move.product_qty
else:
sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context)
quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context)
availability = 0
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
availability += quant.qty
res[move.id] = min(move.product_qty, availability)
return res
def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None):
settings_obj = self.pool.get('stock.config.settings')
uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, '')
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal':
res[move.id] = '' # 'not applicable' or 'n/a' could work too
continue
total_available = min(move.product_qty, move.reserved_availability + move.availability)
total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, context=context)
info = str(total_available)
#look in the settings if we need to display the UoM name or not
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_uom:
info += ' ' + move.product_uom.name
if move.reserved_availability:
if move.reserved_availability != total_available:
#some of the available quantity is assigned and some are available but not reserved
reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, context=context)
info += _(' (%s reserved)') % str(reserved_available)
else:
#all available quantity is assigned
info += _(' (reserved)')
res[move.id] = info
return res
def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, 0)
for move in self.browse(cr, uid, ids, context=context):
res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids])
return res
def _get_move(self, cr, uid, ids, context=None):
res = set()
for quant in self.browse(cr, uid, ids, context=context):
if quant.reservation_id:
res.add(quant.reservation_id.id)
return list(res)
def _get_move_ids(self, cr, uid, ids, context=None):
res = []
for picking in self.browse(cr, uid, ids, context=context):
res += [x.id for x in picking.move_lines]
return res
def _get_moves_from_prod(self, cr, uid, ids, context=None):
if ids:
return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context)
return []
def _set_product_qty(self, cr, uid, id, field, value, arg, context=None):
""" The meaning of product_qty field changed lately and is now a functional field computing the quantity
in the default product UoM. This code has been added to raise an error if a write is made given a value
for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to
detect errors.
"""
raise osv.except_osv(_('Programming Error!'), _('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.'))
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', '<>', 'service')], states={'done': [('readonly', True)]}),
'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, _type='float', store={
'stock.move': (lambda self, cr, uid, ids, ctx: ids, ['product_id', 'product_uom_qty', 'product_uom'], 20),
'product.product': (_get_moves_from_prod, ['uom_id'], 20),
}, string='Quantity',
digits_compute=dp.get_precision('Product Unit of Measure'),
help='Quantity in the default UoM of the product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_packaging': fields.many2one('product.packaging', 'Prefered Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False),
'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True, states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True, copy=False,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False),
'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False),
'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.char("Source"),
'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True,
help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True),
'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants'),
'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity',
digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]},),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The pull rule that created this stock move'),
'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'inventory_id': fields.many2one('stock.inventory', 'Inventory'),
'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'),
'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False),
'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'),
'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'),
'availability': fields.function(_get_product_availability, type='float', string='Quantity Available', readonly=True, help='Quantity in stock that can still be reserved for this move'),
'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"),
'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."),
}
def _default_location_destination(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_dest_id and pick_type.default_location_dest_id.id or False
return False
def _default_location_source(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_src_id and pick_type.default_location_src_id.id or False
return False
def _default_destination_address(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.partner_id.id
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'state': 'draft',
'priority': '1',
'product_uom_qty': 1.0,
'scrapped': False,
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': fields.datetime.now,
'procure_method': 'make_to_stock',
'propagate': True,
'partially_available': False,
}
def _check_uom(self, cr, uid, ids, context=None):
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id:
return False
return True
_constraints = [
(_check_uom,
'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.',
['product_uom']),
]
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, move_ids, context=None):
quant_obj = self.pool.get("stock.quant")
for move in self.browse(cr, uid, move_ids, context=context):
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Operation Forbidden!'), _('Cannot unreserve a done move'))
quant_obj.quants_unreserve(cr, uid, move, context=context)
if self.find_move_ancestors(cr, uid, move, context=context):
self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context)
else:
self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context)
def _prepare_procurement_from_move(self, cr, uid, move, context=None):
origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or "/")
group_id = move.group_id and move.group_id.id or False
if move.rule_id:
if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id:
group_id = move.rule_id.group_id.id
elif move.rule_id.group_propagation_option == 'none':
group_id = False
return {
'name': move.rule_id and move.rule_id.name or "/",
'origin': origin,
'company_id': move.company_id and move.company_id.id or False,
'date_planned': move.date,
'product_id': move.product_id.id,
'product_qty': move.product_qty,
'product_uom': move.product_uom.id,
'product_uos_qty': (move.product_uos and move.product_uos_qty) or move.product_qty,
'product_uos': (move.product_uos and move.product_uos.id) or move.product_uom.id,
'location_id': move.location_id.id,
'move_dest_id': move.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in move.route_ids],
'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False),
'priority': move.priority,
}
def _push_apply(self, cr, uid, moves, context=None):
push_obj = self.pool.get("stock.location.path")
for move in moves:
#1) if the move is already chained, there is no need to check push rules
#2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way
# to receive goods without triggering the push rules again (which would duplicate chained operations)
if not move.move_dest_id and not move.origin_returned_move_id:
domain = [('location_from_id', '=', move.location_dest_id.id)]
#priority goes to the route defined on the product and product category
route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids]
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#then we search on the warehouse if a rule can apply
wh_route_ids = []
if move.warehouse_id:
wh_route_ids = [x.id for x in move.warehouse_id.route_ids]
elif move.picking_type_id and move.picking_type_id.warehouse_id:
wh_route_ids = [x.id for x in move.picking_type_id.warehouse_id.route_ids]
if wh_route_ids:
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#if no specialized push rule has been found yet, we try to find a general one (without route)
rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
if rules:
rule = push_obj.browse(cr, uid, rules[0], context=context)
push_obj._apply(cr, uid, rule, move, context=context)
return True
def _create_procurement(self, cr, uid, move, context=None):
""" This will create a procurement order """
return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context))
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
# Check that we do not modify a stock.move which is done
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
propagated_changes_dict = {}
#propagation of quantity change
if vals.get('product_uom_qty'):
propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty']
if vals.get('product_uom_id'):
propagated_changes_dict['product_uom_id'] = vals['product_uom_id']
#propagation of expected date:
propagated_date_field = False
if vals.get('date_expected'):
#propagate any manual change of the expected date
propagated_date_field = 'date_expected'
elif (vals.get('state', '') == 'done' and vals.get('date')):
#propagate also any delta observed when setting the move as done
propagated_date_field = 'date'
if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict):
#any propagation is (maybe) needed
for move in self.browse(cr, uid, ids, context=context):
if move.move_dest_id and move.propagate:
if 'date_expected' in propagated_changes_dict:
propagated_changes_dict.pop('date_expected')
if propagated_date_field:
current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT)
delta = new_date - current_date
if abs(delta.days) >= move.company_id.propagation_minimum_delta:
old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
propagated_changes_dict['date_expected'] = new_move_date
#For pushed moves as well as for pulled moves, propagate by recursive call of write().
#Note that, for pulled moves we intentionally don't propagate on the procurement.
if propagated_changes_dict:
self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context)
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <= 0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: Odoo will not "
"automatically generate a back order.")})
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uom_qty': 0.00
}
if (not product_id) or (product_uos_qty <= 0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# No warning if the quantity was decreased to avoid double warnings:
# The clients should call onchange_quantity too anyway
if product_uos and product_uom and (product_uom != product_uos):
result['product_uom_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_uom_qty'] = product_uos_qty
return {'value': result}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_uom_qty': 1.00,
'product_uos_qty': self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
@api.cr_uid_ids_context
def _picking_assign(self, cr, uid, move_ids, procurement_group, location_from, location_to, context=None):
"""Assign a picking on the given move_ids, which is a list of move supposed to share the same procurement_group, location_from and location_to
(and company). Those attributes are also given as parameters.
"""
pick_obj = self.pool.get("stock.picking")
picks = pick_obj.search(cr, uid, [
('group_id', '=', procurement_group),
('location_id', '=', location_from),
('location_dest_id', '=', location_to),
('state', 'in', ['draft', 'confirmed', 'waiting'])], context=context)
if picks:
pick = picks[0]
else:
move = self.browse(cr, uid, move_ids, context=context)[0]
values = {
'origin': move.origin,
'company_id': move.company_id and move.company_id.id or False,
'move_type': move.group_id and move.group_id.move_type or 'direct',
'partner_id': move.partner_id.id or False,
'picking_type_id': move.picking_type_id and move.picking_type_id.id or False,
}
pick = pick_obj.create(cr, uid, values, context=context)
return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context)
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': {'date': date_expected}}
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
if not move.price_unit:
price = move.product_id.standard_price
self.write(cr, uid, [move.id], {'price_unit': price})
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move or put it in waiting if it's linked to another move.
@return: List of ids.
"""
if isinstance(ids, (int, long)):
ids = [ids]
states = {
'confirmed': [],
'waiting': []
}
to_assign = {}
for move in self.browse(cr, uid, ids, context=context):
self.attribute_price(cr, uid, move, context=context)
state = 'confirmed'
#if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available)
if move.move_orig_ids:
state = 'waiting'
#if the move is split and some of the ancestor was preceeded, then it's waiting as well
elif move.split_from:
move2 = move.split_from
while move2 and state != 'waiting':
if move2.move_orig_ids:
state = 'waiting'
move2 = move2.split_from
states[state].append(move.id)
if not move.picking_id and move.picking_type_id:
key = (move.group_id.id, move.location_id.id, move.location_dest_id.id)
if key not in to_assign:
to_assign[key] = []
to_assign[key].append(move.id)
for move in self.browse(cr, uid, states['confirmed'], context=context):
if move.procure_method == 'make_to_order':
self._create_procurement(cr, uid, move, context=context)
states['waiting'].append(move.id)
states['confirmed'].remove(move.id)
for state, write_ids in states.items():
if len(write_ids):
self.write(cr, uid, write_ids, {'state': state})
#assign picking in batch for all confirmed move that share the same details
for key, move_ids in to_assign.items():
procurement_group, location_from, location_to = key
self._picking_assign(cr, uid, move_ids, procurement_group, location_from, location_to, context=context)
moves = self.browse(cr, uid, ids, context=context)
self._push_apply(cr, uid, moves, context=context)
return ids
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'assigned'}, context=context)
def check_tracking_product(self, cr, uid, product, lot_id, location, location_dest, context=None):
check = False
if product.track_all and not location_dest.usage == 'inventory':
check = True
elif product.track_incoming and location.usage in ('supplier', 'transit', 'inventory') and location_dest.usage == 'internal':
check = True
elif product.track_outgoing and location_dest.usage in ('customer', 'transit') and location.usage == 'internal':
check = True
if check and not lot_id:
raise osv.except_osv(_('Warning!'), _('You must assign a serial number for the product %s') % (product.name))
def check_tracking(self, cr, uid, move, lot_id, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
self.check_tracking_product(cr, uid, move.product_id, lot_id, move.location_id, move.location_dest_id, context=context)
def action_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
to_assign_moves = []
main_domain = {}
todo_moves = []
operations = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.append(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.append(move.id)
continue
else:
todo_moves.append(move)
#we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
qty = record.qty
if qty:
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, qty, domain=domain, prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
for move in todo_moves:
if move.linked_move_operation_ids:
continue
move.refresh()
#then if the move isn't totally assigned, try to find quants without any specific domain
if move.state != 'assigned':
qty_already_assigned = move.reserved_availability
qty = move.product_qty - qty_already_assigned
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain[move.id], prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
#force assignation of consumable products and incoming from supplier/inventory/production
if to_assign_moves:
self.force_assign(cr, uid, to_assign_moves, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
procurement_obj = self.pool.get('procurement.order')
context = context or {}
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
raise osv.except_osv(_('Operation Forbidden!'),
_('You cannot cancel a stock move that has been set to \'Done\'.'))
if move.reserved_quant_ids:
self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context)
if context.get('cancel_procurement'):
if move.propagate:
procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context)
procurement_obj.cancel(cr, uid, procurement_ids, context=context)
else:
if move.move_dest_id:
if move.propagate:
self.action_cancel(cr, uid, [move.move_dest_id.id], context=context)
elif move.move_dest_id.state == 'waiting':
#If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if move.procurement_id:
# Does the same as procurement check, only eliminating a refresh
procs_to_check.append(move.procurement_id.id)
res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if procs_to_check:
procurement_obj.check(cr, uid, procs_to_check, context=context)
return res
def _check_package_from_moves(self, cr, uid, ids, context=None):
pack_obj = self.pool.get("stock.quant.package")
packs = set()
for move in self.browse(cr, uid, ids, context=context):
packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0])
return pack_obj._check_location_constraint(cr, uid, list(packs), context=context)
def find_move_ancestors(self, cr, uid, move, context=None):
'''Find the first level ancestors of given move '''
ancestors = []
move2 = move
while move2:
ancestors += [x.id for x in move2.move_orig_ids]
#loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them)
move2 = not move2.move_orig_ids and move2.split_from or False
return ancestors
@api.cr_uid_ids_context
def recalculate_move_state(self, cr, uid, move_ids, context=None):
'''Recompute the state of moves given because their reserved quants were used to fulfill another operation'''
for move in self.browse(cr, uid, move_ids, context=context):
vals = {}
reserved_quant_ids = move.reserved_quant_ids
if len(reserved_quant_ids) > 0 and not move.partially_available:
vals['partially_available'] = True
if len(reserved_quant_ids) == 0 and move.partially_available:
vals['partially_available'] = False
if move.state == 'assigned':
if self.find_move_ancestors(cr, uid, move, context=context):
vals['state'] = 'waiting'
else:
vals['state'] = 'confirmed'
if vals:
self.write(cr, uid, [move.id], vals, context=context)
def action_done(self, cr, uid, ids, context=None):
""" Process completely the moves given as ids and if all moves are done, it will finish the picking.
"""
context = context or {}
picking_obj = self.pool.get("stock.picking")
quant_obj = self.pool.get("stock.quant")
todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"]
if todo:
ids = self.action_confirm(cr, uid, todo, context=context)
pickings = set()
procurement_ids = []
#Search operations that are linked to the moves
operations = set()
move_qty = {}
for move in self.browse(cr, uid, ids, context=context):
move_qty[move.id] = move.product_qty
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
#Sort operations according to entire packages first, then package + lot, package only, lot only
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
if ops.picking_id:
pickings.add(ops.picking_id.id)
main_domain = [('qty', '>', 0)]
for record in ops.linked_move_operation_ids:
move = record.move_id
self.check_tracking(cr, uid, move, not ops.product_id and ops.package_id.id or ops.lot_id.id, context=context)
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
dom = main_domain + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, record.qty, domain=dom, prefered_domain_list=prefered_domain_list,
restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
if ops.result_package_id.id:
#if a result package is given, all quants go there
quant_dest_package_id = ops.result_package_id.id
elif ops.product_id and ops.package_id:
#if a package and a product is given, we will remove quants from the pack.
quant_dest_package_id = False
else:
#otherwise we keep the current pack of the quant, which may mean None
quant_dest_package_id = ops.package_id.id
quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=ops.lot_id.id, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=context)
# Handle pack in pack
if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id:
self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context)
move_qty[move.id] -= record.qty
#Check for remaining qtys and unreserve/check move_dest_id in
move_dest_ids = set()
for move in self.browse(cr, uid, ids, context=context):
if move_qty[move.id] > 0: # (=In case no pack operations in picking)
main_domain = [('qty', '>', 0)]
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
self.check_tracking(cr, uid, move, move.restrict_lot_id.id, context=context)
qty = move_qty[move.id]
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context)
# If the move has a destination, add it to the list to reserve
if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):
move_dest_ids.add(move.move_dest_id.id)
if move.procurement_id:
procurement_ids.append(move.procurement_id.id)
#unreserve the quants and make them available for other operations/moves
quant_obj.quants_unreserve(cr, uid, move, context=context)
# Check the packages have been placed in the correct locations
self._check_package_from_moves(cr, uid, ids, context=context)
#set the move as done
self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.pool.get('procurement.order').check(cr, uid, procurement_ids, context=context)
#assign destination moves
if move_dest_ids:
self.action_assign(cr, uid, list(move_dest_ids), context=context)
#check picking state to set the date_done is needed
done_picking = []
for picking in picking_obj.browse(cr, uid, list(pickings), context=context):
if picking.state == 'done' and not picking.date_done:
done_picking.append(picking.id)
if done_picking:
picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('draft', 'cancel'):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(cr, uid, ids, context=context)
def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
#quantity should be given in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
#Previously used to prevent scraping from virtual location but not necessary anymore
#if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
#raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_uom_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
self.action_done(cr, uid, res, context=context)
return res
def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Splits qty from move move into a new move
:param move: browse record
:param qty: float. quantity to split (given in product UoM)
:param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot.
:param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner.
:param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move
returns the ID of the backorder move created
"""
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Error'), _('You cannot split a move done'))
if move.state == 'draft':
#we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in
#case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode.
raise osv.except_osv(_('Error'), _('You cannot split a draft move. It needs to be confirmed first.'))
if move.product_qty <= qty or qty == 0:
return move.id
uom_obj = self.pool.get('product.uom')
context = context or {}
uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom)
uos_qty = uom_qty * move.product_uos_qty / move.product_uom_qty
defaults = {
'product_uom_qty': uom_qty,
'product_uos_qty': uos_qty,
'procure_method': 'make_to_stock',
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
'split_from': move.id,
'procurement_id': move.procurement_id.id,
'move_dest_id': move.move_dest_id.id,
}
if context.get('source_location_id'):
defaults['location_id'] = context['source_location_id']
new_move = self.copy(cr, uid, move.id, defaults)
ctx = context.copy()
ctx['do_not_propagate'] = True
self.write(cr, uid, [move.id], {
'product_uom_qty': move.product_uom_qty - uom_qty,
'product_uos_qty': move.product_uos_qty - uos_qty,
}, context=ctx)
if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'):
new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context)
self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context)
#returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and
#thus the result of action_confirm should always be a list of 1 element length)
return self.action_confirm(cr, uid, [new_move], context=context)[0]
def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None):
"""
Returns the code the picking type should have. This can easily be used
to check if a move is internal or not
move, location_id and location_dest_id are browse records
"""
code = 'internal'
src_loc = location_id or move.location_id
dest_loc = location_dest_id or move.location_dest_id
if src_loc.usage == 'internal' and dest_loc.usage != 'internal':
code = 'outgoing'
if src_loc.usage != 'internal' and dest_loc.usage == 'internal':
code = 'incoming'
return code
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = False
if inv.move_ids:
res[inv.id] = True
return res
def _get_available_filters(self, cr, uid, context=None):
"""
This function will return the list of filter allowed according to the options checked
in 'Settings\Warehouse'.
:rtype: list of tuple
"""
#default available choices
res_filter = [('none', _('All products')), ('product', _('One product only'))]
settings_obj = self.pool.get('stock.config.settings')
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
#If we don't have updated config until now, all fields are by default false and so should be not dipslayed
if not config_ids:
return res_filter
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_stock_tracking_owner:
res_filter.append(('owner', _('One owner only')))
res_filter.append(('product_owner', _('One product for a specific owner')))
if stock_settings.group_stock_tracking_lot:
res_filter.append(('lot', _('One Lot/Serial Number')))
if stock_settings.group_stock_packaging:
res_filter.append(('pack', _('A Pack')))
return res_filter
def _get_total_qty(self, cr, uid, ids, field_name, args, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = sum([x.product_qty for x in inv.line_ids])
return res
INVENTORY_STATE_SELECTION = [
('draft', 'Draft'),
('cancel', 'Cancelled'),
('confirm', 'In Progress'),
('done', 'Validated'),
]
_columns = {
'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."),
'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."),
'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True),
'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}),
'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."),
'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."),
'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."),
'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False),
'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string=' Stock Move Exists?', help='technical field for attrs in view'),
'filter': fields.selection(_get_available_filters, 'Selection Filter', required=True),
'total_qty': fields.function(_get_total_qty, type="float"),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'date': fields.datetime.now,
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'location_id': _default_stock_location,
'filter': 'none',
}
def reset_real_qty(self, cr, uid, ids, context=None):
inventory = self.browse(cr, uid, ids[0], context=context)
line_ids = [line.id for line in inventory.line_ids]
self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0})
return True
def _inventory_line_hook(self, cr, uid, inventory_line, move_vals):
""" Creates a stock move from an inventory line
@param inventory_line:
@param move_vals:
@return:
"""
return self.pool.get('stock.move').create(cr, uid, move_vals)
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
for inventory_line in inv.line_ids:
if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty:
raise osv.except_osv(_('Warning'), _('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s' % (inventory_line.product_id.name, inventory_line.product_qty)))
self.action_check(cr, uid, [inv.id], context=context)
inv.refresh()
self.write(cr, uid, [inv.id], {'state': 'done'}, context=context)
self.post_inventory(cr, uid, inv, context=context)
return True
def post_inventory(self, cr, uid, inv, context=None):
#The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory
#as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior
#as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want).
move_obj = self.pool.get('stock.move')
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids], context=context)
def _create_stock_move(self, cr, uid, inventory, todo_line, context=None):
stock_move_obj = self.pool.get('stock.move')
product_obj = self.pool.get('product.product')
inventory_location_id = product_obj.browse(cr, uid, todo_line['product_id'], context=context).property_stock_inventory.id
vals = {
'name': _('INV:') + (inventory.name or ''),
'product_id': todo_line['product_id'],
'product_uom': todo_line['product_uom_id'],
'date': inventory.date,
'company_id': inventory.company_id.id,
'inventory_id': inventory.id,
'state': 'assigned',
'restrict_lot_id': todo_line.get('prod_lot_id'),
'restrict_partner_id': todo_line.get('partner_id'),
}
if todo_line['product_qty'] < 0:
#found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = todo_line['location_id']
vals['product_uom_qty'] = -todo_line['product_qty']
else:
#found less than expected
vals['location_id'] = todo_line['location_id']
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = todo_line['product_qty']
return stock_move_obj.create(cr, uid, vals, context=context)
def action_check(self, cr, uid, ids, context=None):
""" Checks the inventory and computes the stock move to do
@return: True
"""
inventory_line_obj = self.pool.get('stock.inventory.line')
stock_move_obj = self.pool.get('stock.move')
for inventory in self.browse(cr, uid, ids, context=context):
#first remove the existing stock moves linked to this inventory
move_ids = [move.id for move in inventory.move_ids]
stock_move_obj.unlink(cr, uid, move_ids, context=context)
for line in inventory.line_ids:
#compare the checked quantities on inventory lines to the theorical one
inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context)
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
self.action_cancel_draft(cr, uid, ids, context=context)
def prepare_inventory(self, cr, uid, ids, context=None):
inventory_line_obj = self.pool.get('stock.inventory.line')
for inventory in self.browse(cr, uid, ids, context=context):
#clean the existing inventory lines before redoing an inventory proposal
line_ids = [line.id for line in inventory.line_ids]
inventory_line_obj.unlink(cr, uid, line_ids, context=context)
#compute the inventory lines and create them
vals = self._get_inventory_lines(cr, uid, inventory, context=context)
for product_line in vals:
inventory_line_obj.create(cr, uid, product_line, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
def _get_inventory_lines(self, cr, uid, inventory, context=None):
location_obj = self.pool.get('stock.location')
product_obj = self.pool.get('product.product')
location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context)
domain = ' location_id in %s'
args = (tuple(location_ids),)
if inventory.partner_id:
domain += ' and owner_id = %s'
args += (inventory.partner_id.id,)
if inventory.lot_id:
domain += ' and lot_id = %s'
args += (inventory.lot_id.id,)
if inventory.product_id:
domain += ' and product_id = %s'
args += (inventory.product_id.id,)
if inventory.package_id:
domain += ' and package_id = %s'
args += (inventory.package_id.id,)
cr.execute('''
SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id
FROM stock_quant WHERE''' + domain + '''
GROUP BY product_id, location_id, lot_id, package_id, partner_id
''', args)
vals = []
for product_line in cr.dictfetchall():
#replace the None the dictionary by False, because falsy values are tested later on
for key, value in product_line.items():
if not value:
product_line[key] = False
product_line['inventory_id'] = inventory.id
product_line['theoretical_qty'] = product_line['product_qty']
if product_line['product_id']:
product = product_obj.browse(cr, uid, product_line['product_id'], context=context)
product_line['product_uom_id'] = product.uom_id.id
vals.append(product_line)
return vals
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_order = "inventory_id, location_name, product_code, product_name, prodlot_name"
def _get_product_name_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context)
def _get_location_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context)
def _get_prodlot_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context)
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, select=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'package_id': fields.many2one('stock.quant.package', 'Pack', select=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True),
'theoretical_qty': fields.float('Theoretical Quantity', readonly=True),
'partner_id': fields.many2one('res.partner', 'Owner'),
'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={
'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}),
'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={
'stock.production.lot': (_get_prodlot_change, ['name'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}),
}
_defaults = {
'product_qty': 1,
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
product_obj = self.pool.get('product.product')
if 'product_id' in values and not 'product_uom_id' in values:
values['product_uom_id'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id
return super(stock_inventory_line, self).create(cr, uid, values, context=context)
def _resolve_inventory_line(self, cr, uid, inventory_line, context=None):
stock_move_obj = self.pool.get('stock.move')
diff = inventory_line.theoretical_qty - inventory_line.product_qty
if not diff:
return
#each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move
vals = {
'name': _('INV:') + (inventory_line.inventory_id.name or ''),
'product_id': inventory_line.product_id.id,
'product_uom': inventory_line.product_uom_id.id,
'date': inventory_line.inventory_id.date,
'company_id': inventory_line.inventory_id.company_id.id,
'inventory_id': inventory_line.inventory_id.id,
'state': 'confirmed',
'restrict_lot_id': inventory_line.prod_lot_id.id,
'restrict_partner_id': inventory_line.partner_id.id,
}
inventory_location_id = inventory_line.product_id.property_stock_inventory.id
if diff < 0:
#found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = inventory_line.location_id.id
vals['product_uom_qty'] = -diff
else:
#found less than expected
vals['location_id'] = inventory_line.location_id.id
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = diff
return stock_move_obj.create(cr, uid, vals, context=context)
def restrict_change(self, cr, uid, ids, theoretical_qty, context=None):
if ids and theoretical_qty:
#if the user try to modify a line prepared by openerp, reject the change and display an error message explaining how he should do
old_value = self.browse(cr, uid, ids[0], context=context)
return {
'value': {
'product_id': old_value.product_id.id,
'product_uom_id': old_value.product_uom_id.id,
'location_id': old_value.location_id.id,
'prod_lot_id': old_value.prod_lot_id.id,
'package_id': old_value.package_id.id,
'partner_id': old_value.partner_id.id,
},
'warning': {
'title': _('Error'),
'message': _('You can only change the checked quantity of an existing inventory line. If you want modify a data, please set the checked quantity to 0 and create a new inventory line.')
}
}
return {}
def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None):
""" Changes UoM
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if ids and theoretical_qty:
return self.restrict_change(cr, uid, ids, theoretical_qty, context=context)
if not product:
return {'value': {'product_uom_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context)
return {'value': {'product_uom_id': uom or obj_product.uom_id.id}}
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Warehouse Name', required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True),
'partner_id': fields.many2one('res.partner', 'Address'),
'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True),
'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"),
'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'),
'reception_steps': fields.selection([
('one_step', 'Receive goods directly in stock (1 step)'),
('two_steps', 'Unload in input location then go to stock (2 steps)'),
('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments',
help="Default incoming route to follow", required=True),
'delivery_steps': fields.selection([
('ship_only', 'Ship directly from stock (Ship only)'),
('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'),
('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings',
help="Default outgoing route to follow", required=True),
'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'),
'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'),
'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'),
'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'),
'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'),
'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'),
'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'),
'out_type_id': fields.many2one('stock.picking.type', 'Out Type'),
'in_type_id': fields.many2one('stock.picking.type', 'In Type'),
'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'),
'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'),
'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'),
'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'),
'resupply_from_wh': fields.boolean('Resupply From Other Warehouses'),
'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'),
'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes',
help="Routes will be created for these resupply warehouses and you can select them on products and product categories"),
'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"),
}
def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None):
resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))])
if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id
resupply_wh_ids.add(default_resupply_wh_id)
resupply_wh_ids = list(resupply_wh_ids)
return {'value': {'resupply_wh_ids': resupply_wh_ids}}
def _get_external_transit_location(self, cr, uid, warehouse, context=None):
''' returns browse record of inter company transit location, if found'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1]
except:
return False
return location_obj.browse(cr, uid, inter_wh_loc, context=context)
def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None):
return {
'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'supplied_wh_id': warehouse.id,
'supplier_wh_id': wh.id,
}
def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None):
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
#create route selectable on the product to resupply the warehouse from another one
external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context)
internal_transit_location = warehouse.company_id.internal_transit_location_id
input_loc = warehouse.wh_input_stock_loc_id
if warehouse.reception_steps == 'one_step':
input_loc = warehouse.lot_stock_id
for wh in supplier_warehouses:
transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location
if transit_location:
output_loc = wh.wh_output_stock_loc_id
if wh.delivery_steps == 'ship_only':
output_loc = wh.lot_stock_id
# Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists)
mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0]
pull_obj.create(cr, uid, mto_pull_vals, context=context)
inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context)
inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context)
values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)]
pull_rules_list = self._get_supply_pull_rules(cr, uid, warehouse, values, inter_wh_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#if the warehouse is also set as default resupply method, assign this route automatically to the warehouse
if default_resupply_wh and default_resupply_wh.id == wh.id:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context)
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'reception_steps': 'one_step',
'delivery_steps': 'ship_only',
}
_sql_constraints = [
('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'),
('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'),
]
def _get_partner_locations(self, cr, uid, ids, context=None):
''' returns a tuple made of the browse record of customer location and the browse record of supplier location'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1]
supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1]
except:
customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context)
customer_loc = customer_loc and customer_loc[0] or False
supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context)
supplier_loc = supplier_loc and supplier_loc[0] or False
if not (customer_loc and supplier_loc):
raise osv.except_osv(_('Error!'), _('Can\'t find any customer or supplier location.'))
return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context)
def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
location_obj = self.pool.get('stock.location')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
if warehouse.reception_steps != new_reception_step:
location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context)
if new_reception_step != 'one_step':
location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context)
if new_reception_step == 'three_steps':
location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context)
if warehouse.delivery_steps != new_delivery_step:
location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id, warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context)
if new_delivery_step != 'ship_only':
location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context)
if new_delivery_step == 'pick_pack_ship':
location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context)
return True
def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'product_categ_selectable': True,
'product_selectable': False,
'sequence': 10,
}
def _get_supply_pull_rules(self, cr, uid, supplied_warehouse, values, new_route_id, context=None):
pull_rules_list = []
for from_loc, dest_loc, pick_type_id, warehouse in values:
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS
'warehouse_id': supplied_warehouse.id,
'propagate_warehouse_id': warehouse.id,
})
return pull_rules_list
def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None):
first_rule = True
push_rules_list = []
pull_rules_list = []
for from_loc, dest_loc, pick_type_id in values:
push_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_from_id': from_loc.id,
'location_dest_id': dest_loc.id,
'route_id': new_route_id,
'auto': 'manual',
'picking_type_id': pick_type_id,
'active': active,
'warehouse_id': warehouse.id,
})
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order',
'active': active,
'warehouse_id': warehouse.id,
})
first_rule = False
return push_rules_list, pull_rules_list
def _get_mto_route(self, cr, uid, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1]
except:
mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context)
mto_route_id = mto_route_id and mto_route_id[0] or False
if not mto_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Make To Order route.'))
return mto_route_id
def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None):
""" Checks that the moves from the different """
pull_obj = self.pool.get('procurement.rule')
mto_route_id = self._get_mto_route(cr, uid, context=context)
rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context)
pull_obj.unlink(cr, uid, rules, context=context)
def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None):
mto_route_id = self._get_mto_route(cr, uid, context=context)
res = []
for value in values:
from_loc, dest_loc, pick_type_id = value
res += [{
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': mto_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': 'make_to_order',
'active': True,
'warehouse_id': warehouse.id,
}]
return res
def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step',
'sequence': 20,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
wh_route_ids = []
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#create reception route and rules
route_name, values = routes_dict[warehouse.reception_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
reception_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, reception_route_id))
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTS route and pull rules for delivery and a specific route MTO to be set on the product
route_name, values = routes_dict[warehouse.delivery_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
#create the route and its pull rules
delivery_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, delivery_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTO pull rule and link it to the generic MTO route
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context)
#create a route for cross dock operations, that can be set on products and product categories
route_name, values = routes_dict['crossdock']
crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context)
crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context)
wh_route_ids.append((4, crossdock_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context)
for pull_rule in pull_rules_list:
# Fixed cross-dock is logically mto
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create route selectable on the product to resupply the warehouse from another one
self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context)
#return routes and mto pull rule to store on the warehouse
return {
'route_ids': wh_route_ids,
'mto_pull_id': mto_pull_id,
'reception_route_id': reception_route_id,
'delivery_route_id': delivery_route_id,
'crossdock_route_id': crossdock_route_id,
}
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
picking_type_obj = self.pool.get('stock.picking.type')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
route_obj = self.pool.get('stock.location.route')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
#change the default source and destination location and (de)activate picking types
input_loc = warehouse.wh_input_stock_loc_id
if new_reception_step == 'one_step':
input_loc = warehouse.lot_stock_id
output_loc = warehouse.wh_output_stock_loc_id
if new_delivery_step == 'ship_only':
output_loc = warehouse.lot_stock_id
picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, {'active': new_delivery_step != 'ship_only'}, context=context)
picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context)
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context)
route_name, values = routes_dict[new_delivery_step]
route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context)
#create the pull rules
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context)
push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context)
route_name, values = routes_dict[new_reception_step]
route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context)
#change MTO rule
dummy, values = routes_dict[new_delivery_step]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context)
return True
def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None):
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
#create new sequences
in_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context)
out_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context)
pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context)
pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context)
int_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context)
wh_stock_loc = warehouse.lot_stock_id
wh_input_stock_loc = warehouse.wh_input_stock_loc_id
wh_output_stock_loc = warehouse.wh_output_stock_loc_id
wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, warehouse.id, context=context)
#create in, out, internal picking types for warehouse
input_loc = wh_input_stock_loc
if warehouse.reception_steps == 'one_step':
input_loc = wh_stock_loc
output_loc = wh_output_stock_loc
if warehouse.delivery_steps == 'ship_only':
output_loc = wh_stock_loc
#choose the next available color for the picking types of this warehouse
color = 0
available_colors = [c%9 for c in range(3, 12)] # put flashy colors first
all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color')
#don't use sets to preserve the list order
for x in all_used_colors:
if x['color'] in available_colors:
available_colors.remove(x['color'])
if available_colors:
color = available_colors[0]
#order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship.
max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc')
max_sequence = max_sequence and max_sequence[0]['sequence'] or 0
in_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Receipts'),
'warehouse_id': warehouse.id,
'code': 'incoming',
'sequence_id': in_seq_id,
'default_location_src_id': supplier_loc.id,
'default_location_dest_id': input_loc.id,
'sequence': max_sequence + 1,
'color': color}, context=context)
out_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Delivery Orders'),
'warehouse_id': warehouse.id,
'code': 'outgoing',
'sequence_id': out_seq_id,
'return_picking_type_id': in_type_id,
'default_location_src_id': output_loc.id,
'default_location_dest_id': customer_loc.id,
'sequence': max_sequence + 4,
'color': color}, context=context)
picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context)
int_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Internal Transfers'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': int_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': wh_stock_loc.id,
'active': True,
'sequence': max_sequence + 2,
'color': color}, context=context)
pack_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pack'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pack_seq_id,
'default_location_src_id': wh_pack_stock_loc.id,
'default_location_dest_id': output_loc.id,
'active': warehouse.delivery_steps == 'pick_pack_ship',
'sequence': max_sequence + 3,
'color': color}, context=context)
pick_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pick'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pick_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': wh_pack_stock_loc.id,
'active': warehouse.delivery_steps != 'ship_only',
'sequence': max_sequence + 2,
'color': color}, context=context)
#write picking types on WH
vals = {
'in_type_id': in_type_id,
'out_type_id': out_type_id,
'pack_type_id': pack_type_id,
'pick_type_id': pick_type_id,
'int_type_id': int_type_id,
}
super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals is None:
vals = {}
data_obj = self.pool.get('ir.model.data')
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
location_obj = self.pool.get('stock.location')
#create view location for warehouse
loc_vals = {
'name': _(vals.get('code')),
'usage': 'view',
'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context)
vals['view_location_id'] = wh_loc_id
#create all location
def_values = self.default_get(cr, uid, {'reception_steps', 'delivery_steps'})
reception_steps = vals.get('reception_steps', def_values['reception_steps'])
delivery_steps = vals.get('delivery_steps', def_values['delivery_steps'])
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
sub_locations = [
{'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'},
{'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'},
{'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'},
{'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'},
{'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'},
]
for values in sub_locations:
loc_vals = {
'name': values['name'],
'usage': 'internal',
'location_id': wh_loc_id,
'active': values['active'],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive)
vals[values['field']] = location_id
#create WH
new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context)
warehouse = self.browse(cr, uid, new_id, context=context)
self.create_sequences_and_picking_types(cr, uid, warehouse, context=context)
warehouse.refresh()
#create routes and push/pull rules
new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context)
self.write(cr, uid, warehouse.id, new_objects_dict, context=context)
return new_id
def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None):
return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name
def _format_routename(self, cr, uid, obj, name, context=None):
return obj.name + ': ' + name
def get_routes_dict(self, cr, uid, ids, warehouse, context=None):
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context)
return {
'one_step': (_('Receipt in 1 step'), []),
'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]),
'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
}
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
location_obj = self.pool.get('stock.location')
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
#rename location
location_id = warehouse.lot_stock_id.location_id.id
location_obj.write(cr, uid, location_id, {'name': code}, context=context)
#rename route and push-pull rules
for route in warehouse.route_ids:
route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context)
for pull in route.pull_ids:
pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
for push in route.push_ids:
push_obj.write(cr, uid, push.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
#change the mto pull rule name
if warehouse.mto_pull_id.id:
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None):
""" Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """
#Check routes that are being delivered by this warehouse and change the rule going to transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context)
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context)
if pulls:
pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context)
# Create or clean MTO rules
mto_route_id = self._get_mto_route(cr, uid, context=context)
if not change_to_multiple:
# If single delivery we should create the necessary MTO rules for the resupply
# pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
pull_recs = pull_obj.browse(cr, uid, pulls, context=context)
transfer_locs = list(set([x.location_id for x in pull_recs]))
vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context)
for mto_pull_val in mto_pull_vals:
pull_obj.create(cr, uid, mto_pull_val, context=context)
else:
# We need to delete all the MTO pull rules, otherwise they risk to be used in the system
pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
if pulls:
pull_obj.unlink(cr, uid, pulls, context=context)
def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None):
"""
Will check if the resupply routes to this warehouse follow the changes of number of receipt steps
"""
#Check routes that are being delivered by this warehouse and change the rule coming from transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context)
pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')])
if pulls:
pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context)
def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None):
if reception_new:
old_val = warehouse.reception_steps
new_val = reception_new
change_to_one = (old_val != 'one_step' and new_val == 'one_step')
change_to_multiple = (old_val == 'one_step' and new_val != 'one_step')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id
self._check_reception_resupply(cr, uid, warehouse, new_location, context=context)
if delivery_new:
old_val = warehouse.delivery_steps
new_val = delivery_new
change_to_one = (old_val != 'ship_only' and new_val == 'ship_only')
change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id
self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
seq_obj = self.pool.get('ir.sequence')
route_obj = self.pool.get('stock.location.route')
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
for warehouse in self.browse(cr, uid, ids, context=context_with_inactive):
#first of all, check if we need to delete and recreate route
if vals.get('reception_steps') or vals.get('delivery_steps'):
#activate and deactivate location according to reception and delivery option
self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context)
# switch between route
self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive)
# Check if we need to change something to resupply warehouses and associated MTO rules
self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context)
warehouse.refresh()
if vals.get('code') or vals.get('name'):
name = warehouse.name
#rename sequence
if vals.get('name'):
name = vals.get('name', warehouse.name)
self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive)
if warehouse.in_type_id:
seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '\IN\\'}, context=context)
seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '\OUT\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '\PACK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '\PICK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '\INT\\'}, context=context)
if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'):
for cmd in vals.get('resupply_wh_ids'):
if cmd[0] == 6:
new_ids = set(cmd[2])
old_ids = set([wh.id for wh in warehouse.resupply_wh_ids])
to_add_wh_ids = new_ids - old_ids
if to_add_wh_ids:
supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context)
self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context)
to_remove_wh_ids = old_ids - new_ids
if to_remove_wh_ids:
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context)
if to_remove_route_ids:
route_obj.unlink(cr, uid, to_remove_route_ids, context=context)
else:
#not implemented
pass
if 'default_resupply_wh_id' in vals:
if vals.get('default_resupply_wh_id') == warehouse.id:
raise osv.except_osv(_('Warning'),_('The default resupply warehouse should be different than the warehouse itself!'))
if warehouse.default_resupply_wh_id:
#remove the existing resupplying route on the warehouse
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context)
for inter_wh_route_id in to_remove_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]})
if vals.get('default_resupply_wh_id'):
#assign the new resupplying route on all products
to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context)
for inter_wh_route_id in to_assign_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]})
return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get("stock.location.route")
all_routes = [route.id for route in warehouse.route_ids]
all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context)
all_routes += [warehouse.mto_pull_id.route_id.id]
return all_routes
def view_all_routes_for_wh(self, cr, uid, ids, context=None):
all_routes = []
for wh in self.browse(cr, uid, ids, context=context):
all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context)
domain = [('id', 'in', all_routes)]
return {
'name': _('Warehouse\'s Routes'),
'domain': domain,
'res_model': 'stock.location.route',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'tree,form',
'view_type': 'form',
'limit': 20
}
class stock_location_path(osv.osv):
_name = "stock.location.path"
_description = "Pushed Flows"
_order = "name"
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids, context=context):
res += [x.id for x in route.push_ids]
return res
_columns = {
'name': fields.char('Operation Name', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'route_id': fields.many2one('stock.location.route', 'Route'),
'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True),
'delay': fields.integer('Delay (days)', help="Number of days to do this transition"),
'picking_type_id': fields.many2one('stock.picking.type', 'Type of the new Operation', required=True, help="This is the picking type associated with the different pickings"),
'auto': fields.selection(
[('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')],
'Automatic Move',
required=True, select=1,
help="This is used to define paths the product has to follow within the location tree.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'auto': 'auto',
'delay': 0,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c),
'propagate': True,
'active': True,
}
def _prepare_push_apply(self, cr, uid, rule, move, context=None):
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {
'location_id': move.location_dest_id.id,
'location_dest_id': rule.location_dest_id.id,
'date': newdate,
'company_id': rule.company_id and rule.company_id.id or False,
'date_expected': newdate,
'picking_id': False,
'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False,
'propagate': rule.propagate,
'push_rule_id': rule.id,
'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False,
}
def _apply(self, cr, uid, rule, move, context=None):
move_obj = self.pool.get('stock.move')
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if rule.auto == 'transparent':
old_dest_location = move.location_dest_id.id
move_obj.write(cr, uid, [move.id], {
'date': newdate,
'date_expected': newdate,
'location_dest_id': rule.location_dest_id.id
})
move.refresh()
#avoid looping if a push rule is not well configured
if rule.location_dest_id.id != old_dest_location:
#call again push_apply to see if a next step is defined
move_obj._push_apply(cr, uid, [move], context=context)
else:
vals = self._prepare_push_apply(cr, uid, rule, move, context=context)
move_id = move_obj.copy(cr, uid, move.id, vals, context=context)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': move_id,
})
move_obj.action_confirm(cr, uid, [move_id], context=None)
# -------------------------
# Packaging related stuff
# -------------------------
from openerp.report import report_sxw
class stock_package(osv.osv):
"""
These are the packages, containing quants and/or other packages
"""
_name = "stock.quant.package"
_description = "Physical Packages"
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.parent_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.parent_id
return res
def _get_packages(self, cr, uid, ids, context=None):
"""Returns packages from quants for store"""
res = set()
for quant in self.browse(cr, uid, ids, context=context):
if quant.package_id:
res.add(quant.package_id.id)
return list(res)
def _get_packages_to_relocate(self, cr, uid, ids, context=None):
res = set()
for pack in self.browse(cr, uid, ids, context=context):
res.add(pack.id)
if pack.parent_id:
res.add(pack.parent_id.id)
return list(res)
def _get_package_info(self, cr, uid, ids, name, args, context=None):
default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids)
for pack in self.browse(cr, uid, ids, context=context):
if pack.quant_ids:
res[pack.id]['location_id'] = pack.quant_ids[0].location_id.id
res[pack.id]['owner_id'] = pack.quant_ids[0].owner_id and pack.quant_ids[0].owner_id.id or False
res[pack.id]['company_id'] = pack.quant_ids[0].company_id.id
elif pack.children_ids:
res[pack.id]['location_id'] = pack.children_ids[0].location_id and pack.children_ids[0].location_id.id or False
res[pack.id]['owner_id'] = pack.children_ids[0].owner_id and pack.children_ids[0].owner_id.id or False
res[pack.id]['company_id'] = pack.children_ids[0].company_id and pack.children_ids[0].company_id.id or False
return res
_columns = {
'name': fields.char('Package Reference', select=True, copy=False),
'complete_name': fields.function(_complete_name, type='char', string="Package Name",),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True),
'ul_id': fields.many2one('product.ul', 'Logistic Unit'),
'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package",
store={
'stock.quant': (_get_packages, ['location_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True),
'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True),
'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True),
'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package",
store={
'stock.quant': (_get_packages, ['company_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package",
store={
'stock.quant': (_get_packages, ['owner_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
}
_defaults = {
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.quant.package') or _('Unknown Pack')
}
def _check_location_constraint(self, cr, uid, packs, context=None):
'''checks that all quants in a package are stored in the same location. This function cannot be used
as a constraint because it needs to be checked on pack operations (they may not call write on the
package)
'''
quant_obj = self.pool.get('stock.quant')
for pack in packs:
parent = pack
while parent.parent_id:
parent = parent.parent_id
quant_ids = self.get_content(cr, uid, [parent.id], context=context)
quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0]
location_id = quants and quants[0].location_id.id or False
if not [quant.location_id.id == location_id for quant in quants]:
raise osv.except_osv(_('Error'), _('Everything inside a package should be in the same location'))
return True
def action_print(self, cr, uid, ids, context=None):
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context)
def unpack(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for package in self.browse(cr, uid, ids, context=context):
quant_ids = [quant.id for quant in package.quant_ids]
quant_obj.write(cr, uid, quant_ids, {'package_id': package.parent_id.id or False}, context=context)
children_package_ids = [child_package.id for child_package in package.children_ids]
self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context)
#delete current package since it contains nothing anymore
self.unlink(cr, uid, ids, context=context)
return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context)
def get_content(self, cr, uid, ids, context=None):
child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context)
def get_content_package(self, cr, uid, ids, context=None):
quants_ids = self.get_content(cr, uid, ids, context=context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context)
res['domain'] = [('id', 'in', quants_ids)]
return res
def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None):
''' find the total of given product 'product_id' inside the given package 'package_id'''
quant_obj = self.pool.get('stock.quant')
all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context)
total = 0
for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context):
if quant.product_id.id == product_id:
total += quant.qty
return total
def _get_all_products_quantities(self, cr, uid, package_id, context=None):
'''This function computes the different product quantities for the given package
'''
quant_obj = self.pool.get('stock.quant')
res = {}
for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)):
if quant.product_id.id not in res:
res[quant.product_id.id] = 0
res[quant.product_id.id] += quant.qty
return res
def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None):
stock_pack_operation_obj = self.pool.get('stock.pack.operation')
if default is None:
default = {}
new_package_id = self.copy(cr, uid, id, default_pack_values, context=context)
default['result_package_id'] = new_package_id
op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context)
for op_id in op_ids:
stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context)
class stock_pack_operation(osv.osv):
_name = "stock.pack.operation"
_description = "Packing Operation"
def _get_remaining_prod_quantities(self, cr, uid, operation, context=None):
'''Get the remaining quantities per product on an operation with a package. This function returns a dictionary'''
#if the operation doesn't concern a package, it's not relevant to call this function
if not operation.package_id or operation.product_id:
return {operation.product_id.id: operation.remaining_qty}
#get the total of products the package contains
res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context)
#reduce by the quantities linked to a move
for record in operation.linked_move_operation_ids:
if record.move_id.product_id.id not in res:
res[record.move_id.product_id.id] = 0
res[record.move_id.product_id.id] -= record.qty
return res
def _get_remaining_qty(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for ops in self.browse(cr, uid, ids, context=context):
res[ops.id] = 0
if ops.package_id and not ops.product_id:
#dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products).
#should use _get_remaining_prod_quantities instead
continue
else:
qty = ops.product_qty
if ops.product_uom_id:
qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for record in ops.linked_move_operation_ids:
qty -= record.qty
#converting the remaining quantity in the pack operation UoM
if ops.product_uom_id:
qty = uom_obj._compute_qty_obj(cr, uid, ops.product_id.uom_id, qty, ops.product_uom_id, context=context)
res[ops.id] = qty
return res
def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context)
if product_id and not product_uom_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value']['product_uom_id'] = product.uom_id.id
return res
def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = {'value': {}}
uom_obj = self.pool.get('product.uom')
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product_uom_id or product.uom_id.id
selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context)
if selected_uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {
'title': _('Warning: wrong UoM!'),
'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name)
}
if product_qty and 'warning' not in res:
rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True)
if rounded_qty != product_qty:
res['warning'] = {
'title': _('Warning: wrong quantity!'),
'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name)
}
return res
_columns = {
'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True),
'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'qty_done': fields.float('Quantity Processed', digits_compute=dp.get_precision('Product Unit of Measure')),
'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2
'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'),
'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'),
'date': fields.datetime('Date', required=True),
'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"),
#'update_cost': fields.boolean('Need cost update'),
'cost': fields.float("Cost", help="Unit Cost for this product line"),
'currency': fields.many2one('res.currency', string="Currency", help="Currency in which Unit cost is expressed", ondelete='CASCADE'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Qty'),
'location_id': fields.many2one('stock.location', 'Source Location', required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True),
'processed': fields.selection([('true','Yes'), ('false','No')],'Has been processed?', required=True),
}
_defaults = {
'date': fields.date.context_today,
'qty_done': 0,
'processed': lambda *a: 'false',
}
def write(self, cr, uid, ids, vals, context=None):
context = context or {}
res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context)
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get("no_recompute"):
pickings = vals.get('picking_id') and [vals['picking_id']] or list(set([x.picking_id.id for x in self.browse(cr, uid, ids, context=context)]))
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, pickings, context=context)
return res
def create(self, cr, uid, vals, context=None):
context = context or {}
res_id = super(stock_pack_operation, self).create(cr, uid, vals, context=context)
if vals.get("picking_id") and not context.get("no_recompute"):
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, [vals['picking_id']], context=context)
return res_id
def action_drop_down(self, cr, uid, ids, context=None):
''' Used by barcode interface to say that pack_operation has been moved from src location
to destination location, if qty_done is less than product_qty than we have to split the
operation in two to process the one with the qty moved
'''
processed_ids = []
move_obj = self.pool.get("stock.move")
for pack_op in self.browse(cr, uid, ids, context=None):
if pack_op.product_id and pack_op.location_id and pack_op.location_dest_id:
move_obj.check_tracking_product(cr, uid, pack_op.product_id, pack_op.lot_id.id, pack_op.location_id, pack_op.location_dest_id, context=context)
op = pack_op.id
if pack_op.qty_done < pack_op.product_qty:
# we split the operation in two
op = self.copy(cr, uid, pack_op.id, {'product_qty': pack_op.qty_done, 'qty_done': pack_op.qty_done}, context=context)
self.write(cr, uid, [pack_op.id], {'product_qty': pack_op.product_qty - pack_op.qty_done, 'qty_done': 0, 'lot_id': False}, context=context)
processed_ids.append(op)
self.write(cr, uid, processed_ids, {'processed': 'true'}, context=context)
def create_and_assign_lot(self, cr, uid, id, name, context=None):
''' Used by barcode interface to create a new lot and assign it to the operation
'''
obj = self.browse(cr,uid,id,context)
product_id = obj.product_id.id
val = {'product_id': product_id}
new_lot_id = False
if name:
lots = self.pool.get('stock.production.lot').search(cr, uid, ['&', ('name', '=', name), ('product_id', '=', product_id)], context=context)
if lots:
new_lot_id = lots[0]
val.update({'name': name})
if not new_lot_id:
new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context)
self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context)
def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None):
'''Search for an operation with given 'domain' in a picking, if it exists increment the qty (+1) otherwise create it
:param domain: list of tuple directly reusable as a domain
context can receive a key 'current_package_id' with the package to consider for this operation
returns True
'''
if context is None:
context = {}
#if current_package_id is given in the context, we increase the number of items in this package
package_clause = [('result_package_id', '=', context.get('current_package_id', False))]
existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause, context=context)
todo_operation_ids = []
if existing_operation_ids:
if filter_visible:
todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids]
else:
todo_operation_ids = existing_operation_ids
if todo_operation_ids:
#existing operation found for the given domain and picking => increment its quantity
operation_id = todo_operation_ids[0]
op_obj = self.browse(cr, uid, operation_id, context=context)
qty = op_obj.qty_done
if increment:
qty += 1
else:
qty -= 1 if qty >= 1 else 0
if qty == 0 and op_obj.product_qty == 0:
#we have a line with 0 qty set, so delete it
self.unlink(cr, uid, [operation_id], context=context)
return False
self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context)
else:
#no existing operation found for the given domain and picking => create a new one
picking_obj = self.pool.get("stock.picking")
picking = picking_obj.browse(cr, uid, picking_id, context=context)
values = {
'picking_id': picking_id,
'product_qty': 0,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'qty_done': 1,
}
for key in domain:
var_name, dummy, value = key
uom_id = False
if var_name == 'product_id':
uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id
update_dict = {var_name: value}
if uom_id:
update_dict['product_uom_id'] = uom_id
values.update(update_dict)
operation_id = self.create(cr, uid, values, context=context)
return operation_id
class stock_move_operation_link(osv.osv):
"""
Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects
"""
_name = "stock.move.operation.link"
_description = "Link between stock moves and pack operations"
_columns = {
'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."),
'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"),
'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"),
'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"),
}
def get_specific_domain(self, cr, uid, record, context=None):
'''Returns the specific domain to consider for quant selection in action_assign() or action_done() of stock.move,
having the record given as parameter making the link between the stock move and a pack operation'''
op = record.operation_id
domain = []
if op.package_id and op.product_id:
#if removing a product from a box, we restrict the choice of quants to this box
domain.append(('package_id', '=', op.package_id.id))
elif op.package_id:
#if moving a box, we allow to take everything from inside boxes as well
domain.append(('package_id', 'child_of', [op.package_id.id]))
else:
#if not given any information about package, we don't open boxes
domain.append(('package_id', '=', False))
#if lot info is given, we restrict choice to this lot otherwise we can take any
if op.lot_id:
domain.append(('lot_id', '=', op.lot_id.id))
#if owner info is given, we restrict to this owner otherwise we restrict to no owner
if op.owner_id:
domain.append(('owner_id', '=', op.owner_id.id))
else:
domain.append(('owner_id', '=', False))
return domain
class stock_warehouse_orderpoint(osv.osv):
"""
Defines Minimum stock rules.
"""
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
def subtract_procurements(self, cr, uid, orderpoint, context=None):
'''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it.
'''
qty = 0
uom_obj = self.pool.get("product.uom")
for procurement in orderpoint.procurement_ids:
if procurement.state in ('cancel', 'done'):
continue
procurement_qty = uom_obj._compute_qty_obj(cr, uid, procurement.product_uom, procurement.product_qty, procurement.product_id.uom_id, context=context)
for move in procurement.move_ids:
#need to add the moves in draft as they aren't in the virtual quantity + moves that have not been created yet
if move.state not in ('draft'):
#if move is already confirmed, assigned or done, the virtual stock is already taking this into account so it shouldn't be deducted
procurement_qty -= move.product_qty
qty += procurement_qty
return qty
def _check_product_uom(self, cr, uid, ids, context=None):
'''
Check if the UoM has the same category as the product standard UoM
'''
if not context:
context = {}
for rule in self.browse(cr, uid, ids, context=context):
if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id:
return False
return True
def action_view_proc_to_process(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
proc_ids = self.pool.get('procurement.order').search(cr, uid, [('orderpoint_id', 'in', ids), ('state', 'not in', ('done', 'cancel'))], context=context)
result = mod_obj.get_object_reference(cr, uid, 'procurement', 'do_view_procurements')
if not result:
return False
result = act_obj.read(cr, uid, [result[1]], context=context)[0]
result['domain'] = "[('id', 'in', [" + ','.join(map(str, proc_ids)) + "])]"
return result
_columns = {
'name': fields.char('Name', required=True, copy=False),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."),
'logic': fields.selection([('max', 'Order to Max'), ('price', 'Best price (not yet active!)')], 'Reordering Mode', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]),
'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True),
'product_min_qty': fields.float('Minimum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\
"a procurement to bring the forecasted quantity to the Max Quantity."),
'product_max_qty': fields.float('Maximum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity, Odoo generates "\
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."),
'qty_multiple': fields.float('Qty Multiple', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "),
'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'),
'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': lambda *a: 1,
'logic': lambda *a: 'max',
'qty_multiple': lambda *a: 1,
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.orderpoint') or '',
'product_uom': lambda self, cr, uid, context: context.get('product_uom', False),
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context)
}
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'),
]
_constraints = [
(_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']),
]
def default_get(self, cr, uid, fields, context=None):
warehouse_obj = self.pool.get('stock.warehouse')
res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context)
# default 'warehouse_id' and 'location_id'
if 'warehouse_id' not in res:
warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or []
res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False
if 'location_id' not in res:
res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False
return res
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
""" Finds location id for changed warehouse.
@param warehouse_id: Changed id of warehouse.
@return: Dictionary of values.
"""
if warehouse_id:
w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
v = {'location_id': w.lot_stock_id.id}
return {'value': v}
return {}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM for changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]}
v = {'product_uom': prod.uom_id.id}
return {'value': v, 'domain': d}
return {'domain': {'product_uom': []}}
class stock_picking_type(osv.osv):
_name = "stock.picking.type"
_description = "The picking type determines the picking view"
_order = 'sequence'
def open_barcode_interface(self, cr, uid, ids, context=None):
final_url = "/stock/barcode/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0'
return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'}
def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None):
picking_obj = self.pool.get('stock.picking')
res = {}
for picking_type_id in ids:
#get last 10 pickings of this type
picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context)
tristates = []
for picking in picking_obj.browse(cr, uid, picking_ids, context=context):
if picking.date_done > picking.date:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1})
elif picking.backorder_id:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0})
else:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1})
res[picking_type_id] = json.dumps(tristates)
return res
def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None):
obj = self.pool.get('stock.picking')
domains = {
'count_picking_draft': [('state', '=', 'draft')],
'count_picking_waiting': [('state', '=', 'confirmed')],
'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))],
'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))],
}
result = {}
for field in domains:
data = obj.read_group(cr, uid, domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)],
['picking_type_id'], ['picking_type_id'], context=context)
count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data))
for tid in ids:
result.setdefault(tid, {})[field] = count.get(tid, 0)
for tid in ids:
if result[tid]['count_picking']:
result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking']
result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking']
else:
result[tid]['rate_picking_late'] = 0
result[tid]['rate_picking_backorders'] = 0
return result
def onchange_picking_code(self, cr, uid, ids, picking_code=False):
if not picking_code:
return False
obj_data = self.pool.get('ir.model.data')
stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock')
result = {
'default_location_src_id': stock_loc,
'default_location_dest_id': stock_loc,
}
if picking_code == 'incoming':
result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers')
elif picking_code == 'outgoing':
result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers')
return {'value': result}
def _get_name(self, cr, uid, ids, field_names, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def name_get(self, cr, uid, ids, context=None):
"""Overides orm name_get method to display 'Warehouse_name: PickingType_name' """
if context is None:
context = {}
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.warehouse_id:
name = record.warehouse_id.name + ': ' +name
if context.get('special_shortened_wh_name'):
if record.warehouse_id:
name = record.warehouse_id.name
else:
name = _('Customer') + ' (' + record.name + ')'
res.append((record.id, name))
return res
def _default_warehouse(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
return res and res[0] or False
_columns = {
'name': fields.char('Picking Type Name', translate=True, required=True),
'complete_name': fields.function(_get_name, type='char', string='Name'),
'color': fields.integer('Color'),
'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"),
'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True),
'default_location_src_id': fields.many2one('stock.location', 'Default Source Location'),
'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location'),
'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True),
'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'),
'active': fields.boolean('Active'),
# Statistics for the kanban view
'last_done_picking': fields.function(_get_tristate_values,
type='char',
string='Last 10 Done Pickings'),
'count_picking_draft': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_ready': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_waiting': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
}
_defaults = {
'warehouse_id': _default_warehouse,
'active': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| StefanRijnhart/odoo | addons/stock/stock.py | Python | agpl-3.0 | 253,130 |
import curses
import functools
from stem.control import EventType, Controller
from stem.util import str_tools
# colors that curses can handle
COLOR_LIST = {
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"cyan": curses.COLOR_CYAN,
"magenta": curses.COLOR_MAGENTA,
"black": curses.COLOR_BLACK,
"white": curses.COLOR_WHITE,
}
GRAPH_WIDTH = 40
GRAPH_HEIGHT = 8
DOWNLOAD_COLOR = "green"
UPLOAD_COLOR = "blue"
def main():
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
try:
# This makes curses initialize and call draw_bandwidth_graph() with a
# reference to the screen, followed by additional arguments (in this
# case just the controller).
curses.wrapper(draw_bandwidth_graph, controller)
except KeyboardInterrupt:
pass # the user hit ctrl+c
def draw_bandwidth_graph(stdscr, controller):
window = Window(stdscr)
# (downloaded, uploaded) tuples for the last 40 seconds
bandwidth_rates = [(0, 0)] * GRAPH_WIDTH
# Making a partial that wraps the window and bandwidth_rates with a function
# for Tor to call when it gets a BW event. This causes the 'window' and
# 'bandwidth_rates' to be provided as the first two arguments whenever
# 'bw_event_handler()' is called.
bw_event_handler = functools.partial(_handle_bandwidth_event, window, bandwidth_rates)
# Registering this listener with Tor. Tor reports a BW event each second.
controller.add_event_listener(bw_event_handler, EventType.BW)
# Pause the main thread until the user hits any key... and no, don't you dare
# ask where the 'any' key is. :P
stdscr.getch()
def _handle_bandwidth_event(window, bandwidth_rates, event):
# callback for when tor provides us with a BW event
bandwidth_rates.insert(0, (event.read, event.written))
bandwidth_rates = bandwidth_rates[:GRAPH_WIDTH] # truncate old values
_render_graph(window, bandwidth_rates)
def _render_graph(window, bandwidth_rates):
window.erase()
download_rates = [entry[0] for entry in bandwidth_rates]
upload_rates = [entry[1] for entry in bandwidth_rates]
# show the latest values at the top
label = "Downloaded (%s/s):" % str_tools.size_label(download_rates[0], 1)
window.addstr(0, 1, label, DOWNLOAD_COLOR, curses.A_BOLD)
label = "Uploaded (%s/s):" % str_tools.size_label(upload_rates[0], 1)
window.addstr(0, GRAPH_WIDTH + 7, label, UPLOAD_COLOR, curses.A_BOLD)
# draw the graph bounds in KB
max_download_rate = max(download_rates)
max_upload_rate = max(upload_rates)
window.addstr(1, 1, "%4i" % (max_download_rate / 1024), DOWNLOAD_COLOR)
window.addstr(GRAPH_HEIGHT, 1, " 0", DOWNLOAD_COLOR)
window.addstr(1, GRAPH_WIDTH + 7, "%4i" % (max_upload_rate / 1024), UPLOAD_COLOR)
window.addstr(GRAPH_HEIGHT, GRAPH_WIDTH + 7, " 0", UPLOAD_COLOR)
# draw the graph
for col in range(GRAPH_WIDTH):
col_height = GRAPH_HEIGHT * download_rates[col] / max(max_download_rate, 1)
for row in range(col_height):
window.addstr(GRAPH_HEIGHT - row, col + 6, " ", DOWNLOAD_COLOR, curses.A_STANDOUT)
col_height = GRAPH_HEIGHT * upload_rates[col] / max(max_upload_rate, 1)
for row in range(col_height):
window.addstr(GRAPH_HEIGHT - row, col + GRAPH_WIDTH + 12, " ", UPLOAD_COLOR, curses.A_STANDOUT)
window.refresh()
class Window(object):
"""
Simple wrapper for the curses standard screen object.
"""
def __init__(self, stdscr):
self._stdscr = stdscr
# Mappings of names to the curses color attribute. Initially these all
# reference black text, but if the terminal can handle color then
# they're set with that foreground color.
self._colors = dict([(color, 0) for color in COLOR_LIST])
# allows for background transparency
try:
curses.use_default_colors()
except curses.error:
pass
# makes the cursor invisible
try:
curses.curs_set(0)
except curses.error:
pass
# initializes colors if the terminal can handle them
try:
if curses.has_colors():
color_pair = 1
for name, foreground in COLOR_LIST.items():
background = -1 # allows for default (possibly transparent) background
curses.init_pair(color_pair, foreground, background)
self._colors[name] = curses.color_pair(color_pair)
color_pair += 1
except curses.error:
pass
def addstr(self, y, x, msg, color = None, attr = curses.A_NORMAL):
# Curses throws an error if we try to draw a message that spans out of the
# window's bounds (... seriously?), so doing our best to avoid that.
if color is not None:
if color not in self._colors:
recognized_colors = ", ".join(self._colors.keys())
raise ValueError("The '%s' color isn't recognized: %s" % (color, recognized_colors))
attr |= self._colors[color]
max_y, max_x = self._stdscr.getmaxyx()
if max_x > x and max_y > y:
try:
self._stdscr.addstr(y, x, msg[:max_x - x], attr)
except:
pass # maybe an edge case while resizing the window
def erase(self):
self._stdscr.erase()
def refresh(self):
self._stdscr.refresh()
if __name__ == '__main__':
main()
| tparks5/tor-stem | docs/_static/example/event_listening.py | Python | lgpl-3.0 | 5,286 |
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.metrics import classification_report, confusion_matrix
from imblearn.under_sampling import RandomUnderSampler
import numpy as np
import dill as pickle
import pandas as pd
from evaltestcvbs import EvalTestCVBS as Eval
import information_gain_ratio as igr
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
def evaluate_model(model, X_train, y_train):
'''
INPUT
- model: this is a classification model from sklearn
- X_train: 2d array of the features
- y_train: 1d array of the target
OUTPUT
- information about the model's accuracy using 10
fold cross validation
- model: the fit model
Returns the model
'''
print(np.mean(cross_val_score(model, X_train, y_train,
cv=10, n_jobs=-1, verbose=10)))
model.fit(X_train, y_train)
return model
def balance_classes(sm, X, y):
'''
INPUT
- sm: imblearn oversampling/undersampling method
- X: 2d array of features
- y: 1d array of targets
OUTPUT
- X (balanced feature set)
- y (balanced feature set)
Returns X and y after being fit with the resampling method
'''
X, y = sm.fit_sample(X, y)
return X, y
def view_classification_report(model, X_test, y_test):
'''
INPUT
- model: an sklearn classifier model that has already been fit
- X_test: 2d array of the features
- y_test: 1d array of the target
OUTPUT
- information on the classifiaction performance of the model
Returns none
'''
print(classification_report(y_test, model.predict(X_test)))
def write_model_to_pkl(model, model_name):
'''
INPUT
- model_name: str, this is the name of the model
- model: the sklearn classification model that will be saved
OUTPUT
- saves the model to a pkl file
Returns None
'''
with open('models/{}_model.pkl'.format(model_name), 'w+') as f:
pickle.dump(model, f)
def view_feature_importances(df, model):
'''
INPUT
- df: dataframe which has the original data
- model: this is the sklearn classification model that has
already been fit (work with tree based models)
OUTPUT
- prints the feature importances in descending order
Returns nothing
'''
columns = df.columns
features = model.feature_importances_
featimps = []
for column, feature in zip(columns, features):
featimps.append([column, feature])
print(pd.DataFrame(featimps, columns=['Features',
'Importances']).sort_values(by='Importances',
ascending=False))
def gridsearch(paramgrid, model, X_train, y_train):
'''
INPUT
- paramgrid: dictionary of lists containing parmeters and
hypermarameters
- X_train: 2d array of features
- y_train: 1d array of class labels
OUTPUT
- best_model: a fit sklearn classifier with the best parameters
- the gridsearch object
Performs grid search cross validation and
returns the best model and the gridsearch object
'''
gridsearch = GridSearchCV(model,
paramgrid,
n_jobs=-1,
verbose=10,
cv=10)
gridsearch.fit(X_train, y_train)
best_model = gridsearch.best_estimator_
print('these are the parameters of the best model')
print(best_model)
print('\nthese is the best score')
print(gridsearch.best_score_)
return best_model, gridsearch
def get_igr_attribute_weights(X_train_b, y_train_b, df):
'''
INPUT
- X_train_b: 2d array of features from balanced class values
- y_train b: 1d array of balanced y values
- df: original dataframe from which data was loaded
OUTPUT
- numpy array
Returns an array of the different attribute weights
'''
bdf = pd.DataFrame(X_train_b, columns=df.columns)
weights = []
for attribute in bdf.columns:
weights.append(igr.information_gain_ratio_categorical(attribute,
bdf,
y_train_b))
return np.array(weights)
if __name__ == "__main__":
df = pd.read_csv('data/training_df.csv')
df.drop('Unnamed: 0', axis=1, inplace=True)
user_id_array = df.pop('id')
y = df.pop('label')
y = y.values
X = df.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
X_train_b, y_train_b = balance_classes(RandomUnderSampler(),
X_train, y_train)
X_test_b, y_test_b = balance_classes(RandomUnderSampler(),
X_test, y_test)
weights = get_igr_attribute_weights(X_train_b, y_train_b, df)
X_train_bw = X_train_b * weights
# paramgrid = {'n_estimators': [1000],
# 'loss': ['exponential'],
# 'max_features': ['auto'],
# 'min_samples_split': [22],
# 'min_samples_leaf': [5],
# 'max_depth': [3],
# 'subsample': [.5]}
# paramgrid = {'n_estimators': [200],
# 'max_features': ['auto'],
# 'criterion': ['gini', 'entropy'],
# 'min_samples_split': [15, 16, 17, 18, 19, 20, 21, 22, 23],
# 'min_samples_leaf': [5, 6, 7, 8],
# 'max_depth': [12, 13, 14, 15, 16, 17],
# 'bootstrap': [True]}
# paramgrid = {'kernel': ['rbf'],
# 'gamma': [.01, 'auto', 1.0, 5.0, 10.0, 11, 12, 13],
# 'C': [.001, .01, .1, 1, 5]}
# model = SVC(probability=True)
model = RandomForestClassifier(n_jobs=-1)
# model = GradientBoostingClassifier()
# model, gridsearch = gridsearch(paramgrid, model, X_train_bw, y_train_b)
model = evaluate_model(model, X_train_bw, y_train_b)
print("\nthis is the model performance on the training data\n")
view_classification_report(model, X_train_b, y_train_b)
confusion_matrix(y_train_b, model.predict(X_train_b))
print("this is the model performance on the test data\n")
view_classification_report(model, X_test_b, y_test_b)
confusion_matrix(y_test_b, model.predict(X_test_b))
print("this is the model performance on different split ratios\n")
etcb = Eval(model, .05, .5, .05, 100)
etcb.evaluate_data(X_test_b, y_test_b)
etcb.plot_performance()
# print("\nthese are the model feature importances\n")
# view_feature_importances(df, model)
print(model)
# write_model_to_pkl(model, 'tuned_gboostc')
| brityboy/BotBoosted | src/classification_model.py | Python | mit | 7,022 |
# -*- coding: utf-8 -*-
import os
import os.path
import sys
import re
import time
import math
import shutil
import calendar
import boto.ec2
import boto.ec2.blockdevicemapping
import boto.ec2.networkinterface
from nixops.backends import MachineDefinition, MachineState
from nixops.nix_expr import Function, Call, RawValue
from nixops.resources.ebs_volume import EBSVolumeState
from nixops.resources.elastic_ip import ElasticIPState
import nixops.resources.ec2_common
import nixops.util
import nixops.ec2_utils
import nixops.known_hosts
from xml import etree
class EC2InstanceDisappeared(Exception):
pass
class EC2Definition(MachineDefinition):
"""Definition of an EC2 machine."""
@classmethod
def get_type(cls):
return "ec2"
def __init__(self, xml, config):
MachineDefinition.__init__(self, xml, config)
self.access_key_id = config["ec2"]["accessKeyId"]
self.region = config["ec2"]["region"]
self.zone = config["ec2"]["zone"]
self.ami = config["ec2"]["ami"]
if self.ami == "":
raise Exception("no AMI defined for EC2 machine ‘{0}’".format(self.name))
self.instance_type = config["ec2"]["instanceType"]
self.key_pair = config["ec2"]["keyPair"]
self.private_key = config["ec2"]["privateKey"]
self.security_groups = config["ec2"]["securityGroups"]
self.placement_group = config["ec2"]["placementGroup"]
self.instance_profile = config["ec2"]["instanceProfile"]
self.tags = config["ec2"]["tags"]
self.root_disk_size = config["ec2"]["ebsInitialRootDiskSize"]
self.spot_instance_price = config["ec2"]["spotInstancePrice"]
self.ebs_optimized = config["ec2"]["ebsOptimized"]
self.subnet_id = config["ec2"]["subnetId"]
self.associate_public_ip_address = config["ec2"]["associatePublicIpAddress"]
self.use_private_ip_address = config["ec2"]["usePrivateIpAddress"]
self.security_group_ids = config["ec2"]["securityGroupIds"]
self.block_device_mapping = {_xvd_to_sd(k): v for k, v in config["ec2"]["blockDeviceMapping"].iteritems()}
self.elastic_ipv4 = config["ec2"]["elasticIPv4"]
self.dns_hostname = config["route53"]["hostName"]
self.dns_ttl = config["route53"]["ttl"]
self.route53_access_key_id = config["route53"]["accessKeyId"]
self.route53_use_public_dns_name = config["route53"]["usePublicDNSName"]
def show_type(self):
return "{0} [{1}]".format(self.get_type(), self.region or self.zone or "???")
def host_key_type(self):
return "ed25519" if nixops.util.parse_nixos_version(self.config["nixosRelease"]) >= ["15", "09"] else "dsa"
class EC2State(MachineState, nixops.resources.ec2_common.EC2CommonState):
"""State of an EC2 machine."""
@classmethod
def get_type(cls):
return "ec2"
state = nixops.util.attr_property("state", MachineState.MISSING, int) # override
# We need to store this in machine state so wait_for_ip knows what to wait for
# Really it seems like this whole class should be parameterized by its definition.
# (or the state shouldn't be doing the polling)
public_ipv4 = nixops.util.attr_property("publicIpv4", None)
private_ipv4 = nixops.util.attr_property("privateIpv4", None)
public_dns_name = nixops.util.attr_property("publicDnsName", None)
use_private_ip_address = nixops.util.attr_property("ec2.usePrivateIpAddress", False, type=bool)
associate_public_ip_address = nixops.util.attr_property("ec2.associatePublicIpAddress", False, type=bool)
elastic_ipv4 = nixops.util.attr_property("ec2.elasticIpv4", None)
access_key_id = nixops.util.attr_property("ec2.accessKeyId", None)
region = nixops.util.attr_property("ec2.region", None)
zone = nixops.util.attr_property("ec2.zone", None)
ami = nixops.util.attr_property("ec2.ami", None)
instance_type = nixops.util.attr_property("ec2.instanceType", None)
key_pair = nixops.util.attr_property("ec2.keyPair", None)
public_host_key = nixops.util.attr_property("ec2.publicHostKey", None)
private_host_key = nixops.util.attr_property("ec2.privateHostKey", None)
private_key_file = nixops.util.attr_property("ec2.privateKeyFile", None)
instance_profile = nixops.util.attr_property("ec2.instanceProfile", None)
security_groups = nixops.util.attr_property("ec2.securityGroups", None, 'json')
placement_group = nixops.util.attr_property("ec2.placementGroup", None, 'json')
block_device_mapping = nixops.util.attr_property("ec2.blockDeviceMapping", {}, 'json')
root_device_type = nixops.util.attr_property("ec2.rootDeviceType", None)
backups = nixops.util.attr_property("ec2.backups", {}, 'json')
dns_hostname = nixops.util.attr_property("route53.hostName", None)
dns_ttl = nixops.util.attr_property("route53.ttl", None, int)
route53_access_key_id = nixops.util.attr_property("route53.accessKeyId", None)
client_token = nixops.util.attr_property("ec2.clientToken", None)
spot_instance_request_id = nixops.util.attr_property("ec2.spotInstanceRequestId", None)
spot_instance_price = nixops.util.attr_property("ec2.spotInstancePrice", None)
subnet_id = nixops.util.attr_property("ec2.subnetId", None)
first_boot = nixops.util.attr_property("ec2.firstBoot", True, type=bool)
virtualization_type = nixops.util.attr_property("ec2.virtualizationType", None)
def __init__(self, depl, name, id):
MachineState.__init__(self, depl, name, id)
self._conn = None
self._conn_vpc = None
self._conn_route53 = None
self._cached_instance = None
def _reset_state(self):
"""Discard all state pertaining to an instance."""
with self.depl._db:
self.state = MachineState.MISSING
self.associate_public_ip_address = None
self.use_private_ip_address = None
self.vm_id = None
self.public_ipv4 = None
self.private_ipv4 = None
self.public_dns_name = None
self.elastic_ipv4 = None
self.region = None
self.zone = None
self.ami = None
self.instance_type = None
self.key_pair = None
self.public_host_key = None
self.private_host_key = None
self.instance_profile = None
self.security_groups = None
self.placement_group = None
self.tags = {}
self.block_device_mapping = {}
self.root_device_type = None
self.backups = {}
self.dns_hostname = None
self.dns_ttl = None
self.subnet_id = None
self.client_token = None
self.spot_instance_request_id = None
def get_ssh_name(self):
retVal = None
if self.use_private_ip_address:
if not self.private_ipv4:
raise Exception("EC2 machine '{0}' does not have a private IPv4 address (yet)".format(self.name))
retVal = self.private_ipv4
else:
if not self.public_ipv4:
raise Exception("EC2 machine ‘{0}’ does not have a public IPv4 address (yet)".format(self.name))
retVal = self.public_ipv4
return retVal
def get_ssh_private_key_file(self):
if self.private_key_file: return self.private_key_file
if self._ssh_private_key_file: return self._ssh_private_key_file
for r in self.depl.active_resources.itervalues():
if isinstance(r, nixops.resources.ec2_keypair.EC2KeyPairState) and \
r.state == nixops.resources.ec2_keypair.EC2KeyPairState.UP and \
r.keypair_name == self.key_pair:
return self.write_ssh_private_key(r.private_key)
return None
def get_ssh_flags(self, scp=False):
file = self.get_ssh_private_key_file()
return super(EC2State, self).get_ssh_flags(scp) + (["-i", file] if file else [])
def get_physical_spec(self):
block_device_mapping = {}
for k, v in self.block_device_mapping.items():
if (v.get('encrypt', False)
and v.get('encryptionType', "luks") == "luks"
and v.get('passphrase', "") == ""
and v.get('generatedKey', "") != ""):
block_device_mapping[_sd_to_xvd(k)] = {
'passphrase': Call(RawValue("pkgs.lib.mkOverride 10"),
v['generatedKey']),
}
return {
'imports': [
RawValue("<nixpkgs/nixos/modules/virtualisation/amazon-image.nix>")
],
('deployment', 'ec2', 'blockDeviceMapping'): block_device_mapping,
('deployment', 'ec2', 'instanceId'): self.vm_id,
('ec2', 'hvm'): self.virtualization_type == "hvm",
}
def get_physical_backup_spec(self, backupid):
val = {}
if backupid in self.backups:
for dev, snap in self.backups[backupid].items():
if not dev.startswith("/dev/sda"):
val[_sd_to_xvd(dev)] = { 'disk': Call(RawValue("pkgs.lib.mkOverride 10"), snap)}
val = { ('deployment', 'ec2', 'blockDeviceMapping'): val }
else:
val = RawValue("{{}} /* No backup found for id '{0}' */".format(backupid))
return Function("{ config, pkgs, ... }", val)
def get_keys(self):
keys = MachineState.get_keys(self)
# Ugly: we have to add the generated keys because they're not
# there in the first evaluation (though they are present in
# the final nix-build). Had to hardcode the default here to
# make the old way of defining keys work.
for k, v in self.block_device_mapping.items():
if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") != "" and v.get('encryptionType', "luks") == "luks":
keys["luks-" + _sd_to_xvd(k).replace('/dev/', '')] = { 'text': v['generatedKey'], 'group': 'root', 'permissions': '0600', 'user': 'root'}
return keys
def show_type(self):
s = super(EC2State, self).show_type()
if self.zone or self.region: s = "{0} [{1}; {2}]".format(s, self.zone or self.region, self.instance_type)
return s
@property
def resource_id(self):
return self.vm_id
def address_to(self, m):
if isinstance(m, EC2State): # FIXME: only if we're in the same region
return m.private_ipv4
return MachineState.address_to(self, m)
def connect(self):
if self._conn: return self._conn
self._conn = nixops.ec2_utils.connect(self.region, self.access_key_id)
return self._conn
def connect_vpc(self):
if self._conn_vpc:
return self._conn_vpc
self._conn_vpc = nixops.ec2_utils.connect_vpc(self.region, self.access_key_id)
return self._conn_vpc
def connect_route53(self):
if self._conn_route53:
return
# Get the secret access key from the environment or from ~/.ec2-keys.
(access_key_id, secret_access_key) = nixops.ec2_utils.fetch_aws_secret_key(self.route53_access_key_id)
self._conn_route53 = boto.connect_route53(access_key_id, secret_access_key)
def _get_spot_instance_request_by_id(self, request_id, allow_missing=False):
"""Get spot instance request object by id."""
self.connect()
result = self._conn.get_all_spot_instance_requests([request_id])
if len(result) == 0:
if allow_missing:
return None
raise EC2InstanceDisappeared("Spot instance request ‘{0}’ disappeared!".format(request_id))
return result[0]
def _get_instance(self, instance_id=None, allow_missing=False, update=False):
"""Get instance object for this machine, with caching"""
if not instance_id: instance_id = self.vm_id
assert instance_id
if not self._cached_instance:
self.connect()
try:
instances = self._conn.get_only_instances([instance_id])
except boto.exception.EC2ResponseError as e:
if allow_missing and e.error_code == "InvalidInstanceID.NotFound":
instances = []
else:
raise
if len(instances) == 0:
if allow_missing:
return None
raise EC2InstanceDisappeared("EC2 instance ‘{0}’ disappeared!".format(instance_id))
self._cached_instance = instances[0]
elif update:
self._cached_instance.update()
if self._cached_instance.launch_time:
self.start_time = calendar.timegm(time.strptime(self._cached_instance.launch_time, "%Y-%m-%dT%H:%M:%S.000Z"))
return self._cached_instance
def _get_snapshot_by_id(self, snapshot_id):
"""Get snapshot object by instance id."""
self.connect()
snapshots = self._conn.get_all_snapshots([snapshot_id])
if len(snapshots) != 1:
raise Exception("unable to find snapshot ‘{0}’".format(snapshot_id))
return snapshots[0]
def _wait_for_ip(self):
self.log_start("waiting for IP address... ".format(self.name))
def _instance_ip_ready(ins):
ready = True
if self.associate_public_ip_address and not ins.ip_address:
ready = False
if self.use_private_ip_address and not ins.private_ip_address:
ready = False
return ready
while True:
instance = self._get_instance(update=True)
self.log_continue("[{0}] ".format(instance.state))
if instance.state not in {"pending", "running", "scheduling", "launching", "stopped"}:
raise Exception("EC2 instance ‘{0}’ failed to start (state is ‘{1}’)".format(self.vm_id, instance.state))
if instance.state != "running":
time.sleep(3)
continue
if _instance_ip_ready(instance):
break
time.sleep(3)
self.log_end("{0} / {1}".format(instance.ip_address, instance.private_ip_address))
with self.depl._db:
self.private_ipv4 = instance.private_ip_address
self.public_ipv4 = instance.ip_address
self.public_dns_name = instance.public_dns_name
self.ssh_pinged = False
nixops.known_hosts.update(self.public_ipv4, self._ip_for_ssh_key(), self.public_host_key)
def _ip_for_ssh_key(self):
if self.use_private_ip_address:
return self.private_ipv4
else:
return self.public_ipv4
def _booted_from_ebs(self):
return self.root_device_type == "ebs"
def update_block_device_mapping(self, k, v):
x = self.block_device_mapping
if v == None:
x.pop(k, None)
else:
x[k] = v
self.block_device_mapping = x
def get_backups(self):
if not self.region: return {}
self.connect()
backups = {}
current_volumes = set([v['volumeId'] for v in self.block_device_mapping.values()])
for b_id, b in self.backups.items():
backups[b_id] = {}
backup_status = "complete"
info = []
for k, v in self.block_device_mapping.items():
if not k in b.keys():
backup_status = "incomplete"
info.append("{0} - {1} - Not available in backup".format(self.name, _sd_to_xvd(k)))
else:
snapshot_id = b[k]
try:
snapshot = self._get_snapshot_by_id(snapshot_id)
snapshot_status = snapshot.update()
info.append("progress[{0},{1},{2}] = {3}".format(self.name, _sd_to_xvd(k), snapshot_id, snapshot_status))
if snapshot_status != '100%':
backup_status = "running"
except boto.exception.EC2ResponseError as e:
if e.error_code != "InvalidSnapshot.NotFound": raise
info.append("{0} - {1} - {2} - Snapshot has disappeared".format(self.name, _sd_to_xvd(k), snapshot_id))
backup_status = "unavailable"
backups[b_id]['status'] = backup_status
backups[b_id]['info'] = info
return backups
def remove_backup(self, backup_id, keep_physical=False):
self.log('removing backup {0}'.format(backup_id))
self.connect()
_backups = self.backups
if not backup_id in _backups.keys():
self.warn('backup {0} not found, skipping'.format(backup_id))
else:
if not keep_physical:
for dev, snapshot_id in _backups[backup_id].items():
snapshot = None
try:
snapshot = self._get_snapshot_by_id(snapshot_id)
except:
self.warn('snapshot {0} not found, skipping'.format(snapshot_id))
if not snapshot is None:
self.log('removing snapshot {0}'.format(snapshot_id))
self._retry(lambda: snapshot.delete())
_backups.pop(backup_id)
self.backups = _backups
def backup(self, defn, backup_id):
self.connect()
self.log("backing up machine ‘{0}’ using id ‘{1}’".format(self.name, backup_id))
backup = {}
_backups = self.backups
for k, v in self.block_device_mapping.items():
snapshot = self._retry(lambda: self._conn.create_snapshot(volume_id=v['volumeId']))
self.log("+ created snapshot of volume ‘{0}’: ‘{1}’".format(v['volumeId'], snapshot.id))
snapshot_tags = {}
snapshot_tags.update(defn.tags)
snapshot_tags.update(self.get_common_tags())
snapshot_tags['Name'] = "{0} - {3} [{1} - {2}]".format(self.depl.description, self.name, k, backup_id)
self._retry(lambda: self._conn.create_tags([snapshot.id], snapshot_tags))
backup[k] = snapshot.id
_backups[backup_id] = backup
self.backups = _backups
def restore(self, defn, backup_id, devices=[]):
self.stop()
self.log("restoring machine ‘{0}’ to backup ‘{1}’".format(self.name, backup_id))
for d in devices:
self.log(" - {0}".format(d))
for k, v in self.block_device_mapping.items():
if devices == [] or _sd_to_xvd(k) in devices:
# detach disks
volume = nixops.ec2_utils.get_volume_by_id(self.connect(), v['volumeId'])
if volume and volume.update() == "in-use":
self.log("detaching volume from ‘{0}’".format(self.name))
volume.detach()
# attach backup disks
snapshot_id = self.backups[backup_id][k]
self.log("creating volume from snapshot ‘{0}’".format(snapshot_id))
new_volume = self._conn.create_volume(size=0, snapshot=snapshot_id, zone=self.zone)
# Check if original volume is available, aka detached from the machine.
if volume:
nixops.ec2_utils.wait_for_volume_available(self._conn, volume.id, self.logger)
# Check if new volume is available.
nixops.ec2_utils.wait_for_volume_available(self._conn, new_volume.id, self.logger)
self.log("attaching volume ‘{0}’ to ‘{1}’".format(new_volume.id, self.name))
new_volume.attach(self.vm_id, k)
new_v = self.block_device_mapping[k]
if v.get('partOfImage', False) or v.get('charonDeleteOnTermination', False) or v.get('deleteOnTermination', False):
new_v['charonDeleteOnTermination'] = True
self._delete_volume(v['volumeId'], True)
new_v['volumeId'] = new_volume.id
self.update_block_device_mapping(k, new_v)
def create_after(self, resources, defn):
# EC2 instances can require key pairs, IAM roles, security
# groups, EBS volumes and elastic IPs. FIXME: only depend on
# the specific key pair / role needed for this instance.
return {r for r in resources if
isinstance(r, nixops.resources.ec2_keypair.EC2KeyPairState) or
isinstance(r, nixops.resources.iam_role.IAMRoleState) or
isinstance(r, nixops.resources.ec2_security_group.EC2SecurityGroupState) or
isinstance(r, nixops.resources.ec2_placement_group.EC2PlacementGroupState) or
isinstance(r, nixops.resources.ebs_volume.EBSVolumeState) or
isinstance(r, nixops.resources.elastic_ip.ElasticIPState)}
def attach_volume(self, device, volume_id):
volume = nixops.ec2_utils.get_volume_by_id(self.connect(), volume_id)
if volume.status == "in-use" and \
self.vm_id != volume.attach_data.instance_id and \
self.depl.logger.confirm("volume ‘{0}’ is in use by instance ‘{1}’, "
"are you sure you want to attach this volume?".format(volume_id, volume.attach_data.instance_id)):
self.log_start("detaching volume ‘{0}’ from instance ‘{1}’... ".format(volume_id, volume.attach_data.instance_id))
volume.detach()
def check_available():
res = volume.update()
self.log_continue("[{0}] ".format(res))
return res == 'available'
nixops.util.check_wait(check_available)
self.log_end('')
if volume.update() != "available":
self.log("force detaching volume ‘{0}’ from instance ‘{1}’...".format(volume_id, volume.attach_data.instance_id))
volume.detach(True)
nixops.util.check_wait(check_available)
self.log_start("attaching volume ‘{0}’ as ‘{1}’... ".format(volume_id, _sd_to_xvd(device)))
if self.vm_id != volume.attach_data.instance_id:
# Attach it.
self._conn.attach_volume(volume_id, self.vm_id, device)
def check_attached():
volume.update()
res = volume.attach_data.status
self.log_continue("[{0}] ".format(res or "not-attached"))
return res == 'attached'
# If volume is not in attached state, wait for it before going on.
if volume.attach_data.status != "attached":
nixops.util.check_wait(check_attached)
# Wait until the device is visible in the instance.
def check_dev():
res = self.run_command("test -e {0}".format(_sd_to_xvd(device)), check=False)
return res == 0
nixops.util.check_wait(check_dev)
self.log_end('')
def _assign_elastic_ip(self, elastic_ipv4, check):
instance = self._get_instance()
# Assign or release an elastic IP address, if given.
if (self.elastic_ipv4 or "") != elastic_ipv4 or (instance.ip_address != elastic_ipv4) or check:
if elastic_ipv4 != "":
# wait until machine is in running state
self.log_start("waiting for machine to be in running state... ".format(self.name))
while True:
self.log_continue("[{0}] ".format(instance.state))
if instance.state == "running":
break
if instance.state not in {"running", "pending"}:
raise Exception(
"EC2 instance ‘{0}’ failed to reach running state (state is ‘{1}’)"
.format(self.vm_id, instance.state))
time.sleep(3)
instance = self._get_instance(update=True)
self.log_end("")
addresses = self._conn.get_all_addresses(addresses=[elastic_ipv4])
if addresses[0].instance_id != "" \
and addresses[0].instance_id is not None \
and addresses[0].instance_id != self.vm_id \
and not self.depl.logger.confirm(
"are you sure you want to associate IP address ‘{0}’, which is currently in use by instance ‘{1}’?".format(
elastic_ipv4, addresses[0].instance_id)):
raise Exception("elastic IP ‘{0}’ already in use...".format(elastic_ipv4))
else:
self.log("associating IP address ‘{0}’...".format(elastic_ipv4))
addresses[0].associate(self.vm_id)
self.log_start("waiting for address to be associated with this machine... ")
instance = self._get_instance(update=True)
while True:
self.log_continue("[{0}] ".format(instance.ip_address))
if instance.ip_address == elastic_ipv4:
break
time.sleep(3)
instance = self._get_instance(update=True)
self.log_end("")
nixops.known_hosts.update(self.public_ipv4, elastic_ipv4, self.public_host_key)
with self.depl._db:
self.elastic_ipv4 = elastic_ipv4
self.public_ipv4 = elastic_ipv4
self.ssh_pinged = False
elif self.elastic_ipv4 != None:
self.log("disassociating IP address ‘{0}’...".format(self.elastic_ipv4))
self._conn.disassociate_address(public_ip=self.elastic_ipv4)
with self.depl._db:
self.elastic_ipv4 = None
self.public_ipv4 = None
self.ssh_pinged = False
def _get_network_interfaces(self, defn):
groups = defn.security_group_ids
sg_names = filter(lambda g: not g.startswith('sg-'), defn.security_group_ids)
if sg_names != []:
self.connect_vpc()
vpc_id = self._conn_vpc.get_all_subnets([defn.subnet_id])[0].vpc_id
groups = map(lambda g: nixops.ec2_utils.name_to_security_group(self._conn, g, vpc_id), defn.security_group_ids)
return boto.ec2.networkinterface.NetworkInterfaceCollection(
boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=defn.subnet_id,
associate_public_ip_address=defn.associate_public_ip_address,
groups=groups
)
)
def create_instance(self, defn, zone, devmap, user_data, ebs_optimized):
common_args = dict(
instance_type=defn.instance_type,
placement=zone,
key_name=defn.key_pair,
placement_group=defn.placement_group,
block_device_map=devmap,
user_data=user_data,
image_id=defn.ami,
ebs_optimized=ebs_optimized
)
if defn.instance_profile.startswith("arn:") :
common_args['instance_profile_arn'] = defn.instance_profile
else:
common_args['instance_profile_name'] = defn.instance_profile
if defn.subnet_id != "":
if defn.security_groups != [] and defn.security_groups != ["default"]:
raise Exception("‘deployment.ec2.securityGroups’ is incompatible with ‘deployment.ec2.subnetId’")
common_args['network_interfaces'] = self._get_network_interfaces(defn)
else:
common_args['security_groups'] = defn.security_groups
if defn.spot_instance_price:
if self.spot_instance_request_id is None:
# FIXME: Should use a client token here, but
# request_spot_instances doesn't support one.
request = self._retry(
lambda: self._conn.request_spot_instances(price=defn.spot_instance_price/100.0, **common_args)
)[0]
with self.depl._db:
self.spot_instance_price = defn.spot_instance_price
self.spot_instance_request_id = request.id
common_tags = self.get_common_tags()
tags = {'Name': "{0} [{1}]".format(self.depl.description, self.name)}
tags.update(defn.tags)
tags.update(common_tags)
self._retry(lambda: self._conn.create_tags([self.spot_instance_request_id], tags))
self.log_start("waiting for spot instance request ‘{0}’ to be fulfilled... ".format(self.spot_instance_request_id))
while True:
request = self._get_spot_instance_request_by_id(self.spot_instance_request_id)
self.log_continue("[{0}] ".format(request.status.code))
if request.status.code == "fulfilled": break
time.sleep(3)
self.log_end("")
instance = self._retry(lambda: self._get_instance(instance_id=request.instance_id))
return instance
else:
# Use a client token to ensure that instance creation is
# idempotent; i.e., if we get interrupted before recording
# the instance ID, we'll get the same instance ID on the
# next run.
if not self.client_token:
with self.depl._db:
self.client_token = nixops.util.generate_random_string(length=48) # = 64 ASCII chars
self.state = self.STARTING
reservation = self._retry(lambda: self._conn.run_instances(
client_token=self.client_token, **common_args), error_codes = ['InvalidParameterValue', 'UnauthorizedOperation' ])
assert len(reservation.instances) == 1
return reservation.instances[0]
def _cancel_spot_request(self):
if self.spot_instance_request_id is None: return
self.log_start("cancelling spot instance request ‘{0}’... ".format(self.spot_instance_request_id))
# Cancel the request.
request = self._get_spot_instance_request_by_id(self.spot_instance_request_id, allow_missing=True)
if request is not None:
request.cancel()
# Wait until it's really cancelled. It's possible that the
# request got fulfilled while we were cancelling it. In that
# case, record the instance ID.
while True:
request = self._get_spot_instance_request_by_id(self.spot_instance_request_id, allow_missing=True)
if request is None: break
self.log_continue("[{0}] ".format(request.status.code))
if request.instance_id is not None and request.instance_id != self.vm_id:
if self.vm_id is not None:
raise Exception("spot instance request got fulfilled unexpectedly as instance ‘{0}’".format(request.instance_id))
self.vm_id = request.instance_id
if request.state != 'open': break
time.sleep(3)
self.log_end("")
self.spot_instance_request_id = None
def after_activation(self, defn):
# Detach volumes that are no longer in the deployment spec.
for k, v in self.block_device_mapping.items():
if k not in defn.block_device_mapping and not v.get('partOfImage', False):
if v.get('disk', '').startswith("ephemeral"):
raise Exception("cannot detach ephemeral device ‘{0}’ from EC2 instance ‘{1}’"
.format(_sd_to_xvd(k), self.name))
assert v.get('volumeId', None)
self.log("detaching device ‘{0}’...".format(_sd_to_xvd(k)))
volumes = self._conn.get_all_volumes([],
filters={'attachment.instance-id': self.vm_id, 'attachment.device': k, 'volume-id': v['volumeId']})
assert len(volumes) <= 1
if len(volumes) == 1:
device = _sd_to_xvd(k)
if v.get('encrypt', False) and v.get('encryptionType', "luks") == "luks":
dm = device.replace("/dev/", "/dev/mapper/")
self.run_command("umount -l {0}".format(dm), check=False)
self.run_command("cryptsetup luksClose {0}".format(device.replace("/dev/", "")), check=False)
else:
self.run_command("umount -l {0}".format(device), check=False)
if not self._conn.detach_volume(volumes[0].id, instance_id=self.vm_id, device=k):
raise Exception("unable to detach volume ‘{0}’ from EC2 machine ‘{1}’".format(v['volumeId'], self.name))
# FIXME: Wait until the volume is actually detached.
if v.get('charonDeleteOnTermination', False) or v.get('deleteOnTermination', False):
self._delete_volume(v['volumeId'])
self.update_block_device_mapping(k, None)
def create(self, defn, check, allow_reboot, allow_recreate):
assert isinstance(defn, EC2Definition)
if self.state != self.UP:
check = True
self.set_common_state(defn)
# Figure out the access key.
self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id()
if not self.access_key_id:
raise Exception("please set ‘deployment.ec2.accessKeyId’, $EC2_ACCESS_KEY or $AWS_ACCESS_KEY_ID")
self.private_key_file = defn.private_key or None
if self.region is None:
self.region = defn.region
elif self.region != defn.region:
self.warn("cannot change region of a running instance")
self.connect()
# Stop the instance (if allowed) to change instance attributes
# such as the type.
if self.vm_id and allow_reboot and self._booted_from_ebs() and self.instance_type != defn.instance_type:
self.stop()
check = True
# Check whether the instance hasn't been killed behind our
# backs. Restart stopped instances.
if self.vm_id and check:
instance = self._get_instance(allow_missing=True)
if instance is None or instance.state in {"shutting-down", "terminated"}:
if not allow_recreate:
raise Exception("EC2 instance ‘{0}’ went away; use ‘--allow-recreate’ to create a new one".format(self.name))
self.log("EC2 instance went away (state ‘{0}’), will recreate".format(instance.state if instance else "gone"))
self._reset_state()
self.region = defn.region
elif instance.state == "stopped":
self.log("EC2 instance was stopped, restarting...")
# Modify the instance type, if desired.
if self.instance_type != defn.instance_type:
self.log("changing instance type from ‘{0}’ to ‘{1}’...".format(self.instance_type, defn.instance_type))
instance.modify_attribute("instanceType", defn.instance_type)
self.instance_type = defn.instance_type
# When we restart, we'll probably get a new IP. So forget the current one.
self.public_ipv4 = None
self.private_ipv4 = None
instance.start()
self.state = self.STARTING
resize_root = False
# Create the instance.
if not self.vm_id:
self.log("creating EC2 instance (AMI ‘{0}’, type ‘{1}’, region ‘{2}’)...".format(
defn.ami, defn.instance_type, self.region))
if not self.client_token and not self.spot_instance_request_id:
self._reset_state()
self.region = defn.region
self.connect()
# Figure out whether this AMI is EBS-backed.
amis = self._conn.get_all_images([defn.ami])
if len(amis) == 0:
raise Exception("AMI ‘{0}’ does not exist in region ‘{1}’".format(defn.ami, self.region))
ami = self._conn.get_all_images([defn.ami])[0]
self.root_device_type = ami.root_device_type
# Check if we need to resize the root disk
resize_root = defn.root_disk_size != 0 and ami.root_device_type == 'ebs'
# Set the initial block device mapping to the ephemeral
# devices defined in the spec. These cannot be changed
# later.
devmap = boto.ec2.blockdevicemapping.BlockDeviceMapping()
devs_mapped = {}
for k, v in defn.block_device_mapping.iteritems():
if re.match("/dev/sd[a-e]", k) and not v['disk'].startswith("ephemeral"):
raise Exception("non-ephemeral disk not allowed on device ‘{0}’; use /dev/xvdf or higher".format(_sd_to_xvd(k)))
if v['disk'].startswith("ephemeral"):
devmap[k] = boto.ec2.blockdevicemapping.BlockDeviceType(ephemeral_name=v['disk'])
self.update_block_device_mapping(k, v)
root_device = ami.root_device_name
if resize_root:
devmap[root_device] = ami.block_device_mapping[root_device]
devmap[root_device].size = defn.root_disk_size
devmap[root_device].encrypted = None
# If we're attaching any EBS volumes, then make sure that
# we create the instance in the right placement zone.
zone = defn.zone or None
for k, v in defn.block_device_mapping.iteritems():
if not v['disk'].startswith("vol-"): continue
# Make note of the placement zone of the volume.
volume = nixops.ec2_utils.get_volume_by_id(self._conn, v['disk'])
if not zone:
self.log("starting EC2 instance in zone ‘{0}’ due to volume ‘{1}’".format(
volume.zone, v['disk']))
zone = volume.zone
elif zone != volume.zone:
raise Exception("unable to start EC2 instance ‘{0}’ in zone ‘{1}’ because volume ‘{2}’ is in zone ‘{3}’"
.format(self.name, zone, v['disk'], volume.zone))
# Do we want an EBS-optimized instance?
prefer_ebs_optimized = False
for k, v in defn.block_device_mapping.iteritems():
if v['volumeType'] != "standard":
prefer_ebs_optimized = True
# if we have PIOPS volume and instance type supports EBS Optimized flags, then use ebs_optimized
ebs_optimized = prefer_ebs_optimized and defn.ebs_optimized
# Generate a public/private host key.
if not self.public_host_key:
(private, public) = nixops.util.create_key_pair(type=defn.host_key_type())
with self.depl._db:
self.public_host_key = public
self.private_host_key = private
user_data = "SSH_HOST_{2}_KEY_PUB:{0}\nSSH_HOST_{2}_KEY:{1}\n".format(
self.public_host_key, self.private_host_key.replace("\n", "|"),
defn.host_key_type().upper())
instance = self.create_instance(defn, zone, devmap, user_data, ebs_optimized)
with self.depl._db:
self.vm_id = instance.id
self.ami = defn.ami
self.instance_type = defn.instance_type
self.key_pair = defn.key_pair
self.security_groups = defn.security_groups
self.placement_group = defn.placement_group
self.zone = instance.placement
self.client_token = None
self.private_host_key = None
# Cancel spot instance request, it isn't needed after the
# instance has been provisioned.
self._cancel_spot_request()
# There is a short time window during which EC2 doesn't
# know the instance ID yet. So wait until it does.
if self.state != self.UP or check:
while True:
if self._get_instance(allow_missing=True): break
self.log("EC2 instance ‘{0}’ not known yet, waiting...".format(self.vm_id))
time.sleep(3)
if not self.virtualization_type:
self.virtualization_type = self._get_instance().virtualization_type
# Warn about some EC2 options that we cannot update for an existing instance.
if self.instance_type != defn.instance_type:
self.warn("cannot change type of a running instance (use ‘--allow-reboot’)")
if defn.zone and self.zone != defn.zone:
self.warn("cannot change availability zone of a running instance")
if set(defn.security_groups) != set(self.security_groups):
self.warn(
'cannot change security groups of an existing instance (from [{0}] to [{1}])'.format(
", ".join(set(self.security_groups)),
", ".join(set(defn.security_groups)))
)
if defn.placement_group != (self.placement_group or ""):
self.warn(
'cannot change placement group of an existing instance (from ‘{0}’ to ‘{1}’)'.format(
self.placement_group or "",
defn.placement_group)
)
# Reapply tags if they have changed.
common_tags = defn.tags
if defn.owners != []:
common_tags['Owners'] = ", ".join(defn.owners)
self.update_tags(self.vm_id, user_tags=common_tags, check=check)
# Assign the elastic IP. If necessary, dereference the resource.
elastic_ipv4 = defn.elastic_ipv4
if elastic_ipv4.startswith("res-"):
res = self.depl.get_typed_resource(elastic_ipv4[4:], "elastic-ip")
elastic_ipv4 = res.public_ipv4
self._assign_elastic_ip(elastic_ipv4, check)
with self.depl._db:
self.use_private_ip_address = defn.use_private_ip_address
self.associate_public_ip_address = defn.associate_public_ip_address
# Wait for the IP address.
if (self.associate_public_ip_address and not self.public_ipv4) \
or \
(self.use_private_ip_address and not self.private_ipv4) \
or \
check:
self._wait_for_ip()
if defn.dns_hostname:
self._update_route53(defn)
# Wait until the instance is reachable via SSH.
self.wait_for_ssh(check=check)
# Generate a new host key on the instance and restart
# sshd. This is necessary because we can't count on the
# instance data to remain secret. FIXME: not atomic.
if "NixOps auto-generated key" in self.public_host_key:
self.log("replacing temporary host key...")
key_type = defn.host_key_type()
new_key = self.run_command(
"rm -f /etc/ssh/ssh_host_{0}_key*; systemctl restart sshd; cat /etc/ssh/ssh_host_{0}_key.pub"
.format(key_type),
capture_stdout=True).rstrip()
self.public_host_key = new_key
nixops.known_hosts.update(None, self._ip_for_ssh_key(), self.public_host_key)
# Resize the root filesystem. On NixOS >= 15.09, this is done
# by the initrd.
if resize_root and nixops.util.parse_nixos_version(defn.config["nixosRelease"]) < ["15", "09"]:
self.log('resizing root disk...')
self.run_command("resize2fs {0}".format(_sd_to_xvd(root_device)))
# Add disks that were in the original device mapping of image.
if self.first_boot:
for k, dm in self._get_instance().block_device_mapping.items():
if k not in self.block_device_mapping and dm.volume_id:
bdm = {'volumeId': dm.volume_id, 'partOfImage': True}
self.update_block_device_mapping(k, bdm)
self.first_boot = False
# Detect if volumes were manually detached. If so, reattach
# them.
for k, v in self.block_device_mapping.items():
if k not in self._get_instance().block_device_mapping.keys() and not v.get('needsAttach', False) and v.get('volumeId', None):
self.warn("device ‘{0}’ was manually detached!".format(_sd_to_xvd(k)))
v['needsAttach'] = True
self.update_block_device_mapping(k, v)
# Detect if volumes were manually destroyed.
for k, v in self.block_device_mapping.items():
if v.get('needsAttach', False):
volume = nixops.ec2_utils.get_volume_by_id(self._conn, v['volumeId'], allow_missing=True)
if volume: continue
if not allow_recreate:
raise Exception("volume ‘{0}’ (used by EC2 instance ‘{1}’) no longer exists; "
"run ‘nixops stop’, then ‘nixops deploy --allow-recreate’ to create a new, empty volume"
.format(v['volumeId'], self.name))
self.warn("volume ‘{0}’ has disappeared; will create an empty volume to replace it".format(v['volumeId']))
self.update_block_device_mapping(k, None)
# Create missing volumes.
for k, v in defn.block_device_mapping.iteritems():
volume = None
if v['disk'] == '':
if k in self.block_device_mapping: continue
self.log("creating EBS volume of {0} GiB...".format(v['size']))
ebs_encrypt = v.get('encryptionType', "luks") == "ebs"
volume = self._conn.create_volume(size=v['size'], zone=self.zone, volume_type=v['volumeType'], iops=v['iops'], encrypted=ebs_encrypt)
v['volumeId'] = volume.id
elif v['disk'].startswith("vol-"):
if k in self.block_device_mapping:
cur_volume_id = self.block_device_mapping[k]['volumeId']
if cur_volume_id != v['disk']:
raise Exception("cannot attach EBS volume ‘{0}’ to ‘{1}’ because volume ‘{2}’ is already attached there".format(v['disk'], k, cur_volume_id))
continue
v['volumeId'] = v['disk']
elif v['disk'].startswith("res-"):
res_name = v['disk'][4:]
res = self.depl.get_typed_resource(res_name, "ebs-volume")
if res.state != self.UP:
raise Exception("EBS volume ‘{0}’ has not been created yet".format(res_name))
assert res.volume_id
if k in self.block_device_mapping:
cur_volume_id = self.block_device_mapping[k]['volumeId']
if cur_volume_id != res.volume_id:
raise Exception("cannot attach EBS volume ‘{0}’ to ‘{1}’ because volume ‘{2}’ is already attached there".format(res_name, k, cur_volume_id))
continue
v['volumeId'] = res.volume_id
elif v['disk'].startswith("snap-"):
if k in self.block_device_mapping: continue
self.log("creating volume from snapshot ‘{0}’...".format(v['disk']))
volume = self._conn.create_volume(size=v['size'], snapshot=v['disk'], zone=self.zone, volume_type=v['volumeType'], iops=v['iops'])
v['volumeId'] = volume.id
else:
if k in self.block_device_mapping:
v['needsAttach'] = False
self.update_block_device_mapping(k, v)
continue
raise Exception("adding device mapping ‘{0}’ to a running instance is not (yet) supported".format(v['disk']))
# ‘charonDeleteOnTermination’ denotes whether we have to
# delete the volume. This is distinct from
# ‘deleteOnTermination’ for backwards compatibility with
# the time that we still used auto-created volumes.
v['charonDeleteOnTermination'] = v['deleteOnTermination']
v['needsAttach'] = True
self.update_block_device_mapping(k, v)
# Wait for volume to get to available state for newly
# created volumes only (EC2 sometimes returns weird
# temporary states for newly created volumes, e.g. shortly
# in-use). Doing this after updating the device mapping
# state, to make it recoverable in case an exception
# happens (e.g. in other machine's deployments).
if volume: nixops.ec2_utils.wait_for_volume_available(self._conn, volume.id, self.logger)
# Always apply tags to the volumes we just created.
for k, v in self.block_device_mapping.items():
if not (('disk' in v and not (v['disk'].startswith("ephemeral")
or v['disk'].startswith("res-")
or v['disk'].startswith("vol-")))
or 'partOfImage' in v): continue
volume_tags = {}
volume_tags.update(common_tags)
volume_tags.update(defn.tags)
volume_tags['Name'] = "{0} [{1} - {2}]".format(self.depl.description, self.name, _sd_to_xvd(k))
self._retry(lambda: self._conn.create_tags([v['volumeId']], volume_tags))
# Attach missing volumes.
for k, v in self.block_device_mapping.items():
if v.get('needsAttach', False):
self.attach_volume(k, v['volumeId'])
del v['needsAttach']
self.update_block_device_mapping(k, v)
# FIXME: process changes to the deleteOnTermination flag.
# Auto-generate LUKS keys if the model didn't specify one.
for k, v in self.block_device_mapping.items():
if v.get('encrypt', False) and v.get('passphrase', "") == "" and v.get('generatedKey', "") == "" and v.get('encryptionType', "luks") == "luks":
v['generatedKey'] = nixops.util.generate_random_string(length=256)
self.update_block_device_mapping(k, v)
def _update_route53(self, defn):
import boto.route53
import boto.route53.record
self.dns_hostname = defn.dns_hostname
self.dns_ttl = defn.dns_ttl
self.route53_access_key_id = defn.route53_access_key_id
self.route53_use_public_dns_name = defn.route53_use_public_dns_name
record_type = 'CNAME' if self.route53_use_public_dns_name else 'A'
dns_value = self.public_dns_name if self.route53_use_public_dns_name else self.public_ipv4
self.log('sending Route53 DNS: {0} {1} {2}'.format(self.dns_hostname, record_type, dns_value))
self.connect_route53()
hosted_zone = ".".join(self.dns_hostname.split(".")[1:])
zones = self._conn_route53.get_all_hosted_zones()
def testzone(hosted_zone, zone):
"""returns True if there is a subcomponent match"""
hostparts = hosted_zone.split(".")
zoneparts = zone.Name.split(".")[:-1] # strip the last ""
return hostparts[::-1][:len(zoneparts)][::-1] == zoneparts
zones = [zone for zone in zones['ListHostedZonesResponse']['HostedZones'] if testzone(hosted_zone, zone)]
if len(zones) == 0:
raise Exception('hosted zone for {0} not found'.format(hosted_zone))
# use hosted zone with longest match
zones = sorted(zones, cmp=lambda a, b: cmp(len(a.Name), len(b.Name)), reverse=True)
zoneid = zones[0]['Id'].split("/")[2]
dns_name = '{0}.'.format(self.dns_hostname)
prev_a_rrs = [prev for prev
in self._conn_route53.get_all_rrsets(
hosted_zone_id=zoneid,
type="A",
name=dns_name
)
if prev.name == dns_name
and prev.type == "A"]
prev_cname_rrs = [prev for prev
in self._conn_route53.get_all_rrsets(
hosted_zone_id=zoneid,
type="CNAME",
name=self.dns_hostname
)
if prev.name == dns_name
and prev.type == "CNAME"]
changes = boto.route53.record.ResourceRecordSets(connection=self._conn_route53, hosted_zone_id=zoneid)
if len(prev_a_rrs) > 0:
for prevrr in prev_a_rrs:
change = changes.add_change("DELETE", self.dns_hostname, "A", ttl=prevrr.ttl)
change.add_value(",".join(prevrr.resource_records))
if len(prev_cname_rrs) > 0:
for prevrr in prev_cname_rrs:
change = changes.add_change("DELETE", prevrr.name, "CNAME", ttl=prevrr.ttl)
change.add_value(",".join(prevrr.resource_records))
change = changes.add_change("CREATE", self.dns_hostname, record_type, ttl=self.dns_ttl)
change.add_value(dns_value)
self._commit_route53_changes(changes)
def _commit_route53_changes(self, changes):
"""Commit changes, but retry PriorRequestNotComplete errors."""
retry = 3
while True:
try:
retry -= 1
return changes.commit()
except boto.route53.exception.DNSServerError, e:
code = e.body.split("<Code>")[1]
code = code.split("</Code>")[0]
if code != 'PriorRequestNotComplete' or retry < 0:
raise e
time.sleep(1)
def _delete_volume(self, volume_id, allow_keep=False):
if not self.depl.logger.confirm("are you sure you want to destroy EBS volume ‘{0}’?".format(volume_id)):
if allow_keep:
return
else:
raise Exception("not destroying EBS volume ‘{0}’".format(volume_id))
self.log("destroying EBS volume ‘{0}’...".format(volume_id))
volume = nixops.ec2_utils.get_volume_by_id(self.connect(), volume_id, allow_missing=True)
if not volume: return
nixops.util.check_wait(lambda: volume.update() == 'available')
volume.delete()
def destroy(self, wipe=False):
self._cancel_spot_request()
if not (self.vm_id or self.client_token): return True
if not self.depl.logger.confirm("are you sure you want to destroy EC2 machine ‘{0}’?".format(self.name)): return False
self.log_start("destroying EC2 machine... ".format(self.name))
# Find the instance, either by its ID or by its client token.
# The latter allows us to destroy instances that were "leaked"
# in create() due to it being interrupted after the instance
# was created but before it registered the ID in the database.
self.connect()
instance = None
if self.vm_id:
instance = self._get_instance(allow_missing=True)
else:
reservations = self._conn.get_all_instances(filters={'client-token': self.client_token})
if len(reservations) > 0:
instance = reservations[0].instances[0]
if instance:
instance.terminate()
# Wait until it's really terminated.
while True:
self.log_continue("[{0}] ".format(instance.state))
if instance.state == "terminated": break
time.sleep(3)
instance = self._get_instance(update=True)
self.log_end("")
nixops.known_hosts.update(self.public_ipv4, None, self.public_host_key)
# Destroy volumes created for this instance.
for k, v in self.block_device_mapping.items():
if v.get('charonDeleteOnTermination', False):
self._delete_volume(v['volumeId'])
self.update_block_device_mapping(k, None)
return True
def stop(self):
if not self._booted_from_ebs():
self.warn("cannot stop non-EBS-backed instance")
return
self.log_start("stopping EC2 machine... ")
instance = self._get_instance()
instance.stop() # no-op if the machine is already stopped
self.state = self.STOPPING
# Wait until it's really stopped.
def check_stopped():
instance = self._get_instance(update=True)
self.log_continue("[{0}] ".format(instance.state))
if instance.state == "stopped":
return True
if instance.state not in {"running", "stopping"}:
raise Exception(
"EC2 instance ‘{0}’ failed to stop (state is ‘{1}’)"
.format(self.vm_id, instance.state))
return False
if not nixops.util.check_wait(check_stopped, initial=3, max_tries=300, exception=False): # = 15 min
# If stopping times out, then do an unclean shutdown.
self.log_end("(timed out)")
self.log_start("force-stopping EC2 machine... ")
instance.stop(force=True)
if not nixops.util.check_wait(check_stopped, initial=3, max_tries=100, exception=False): # = 5 min
# Amazon docs suggest doing a force stop twice...
self.log_end("(timed out)")
self.log_start("force-stopping EC2 machine... ")
instance.stop(force=True)
nixops.util.check_wait(check_stopped, initial=3, max_tries=100) # = 5 min
self.log_end("")
self.state = self.STOPPED
self.ssh_master = None
def start(self):
if not self._booted_from_ebs():
return
self.log("starting EC2 machine...")
instance = self._get_instance()
instance.start() # no-op if the machine is already started
self.state = self.STARTING
# Wait until it's really started, and obtain its new IP
# address. Warn the user if the IP address has changed (which
# is generally the case).
prev_private_ipv4 = self.private_ipv4
prev_public_ipv4 = self.public_ipv4
if self.elastic_ipv4:
self.log("restoring previously attached elastic IP")
self._assign_elastic_ip(self.elastic_ipv4, True)
self._wait_for_ip()
if prev_private_ipv4 != self.private_ipv4 or prev_public_ipv4 != self.public_ipv4:
self.warn("IP address has changed, you may need to run ‘nixops deploy’")
self.wait_for_ssh(check=True)
self.send_keys()
def _check(self, res):
if not self.vm_id:
res.exists = False
return
self.connect()
instance = self._get_instance(allow_missing=True)
old_state = self.state
#self.log("instance state is ‘{0}’".format(instance.state if instance else "gone"))
if instance is None or instance.state in {"shutting-down", "terminated"}:
self.state = self.MISSING
return
res.exists = True
if instance.state == "pending":
res.is_up = False
self.state = self.STARTING
elif instance.state == "running":
res.is_up = True
res.disks_ok = True
for k, v in self.block_device_mapping.items():
if k not in instance.block_device_mapping.keys() and v.get('volumeId', None):
res.disks_ok = False
res.messages.append("volume ‘{0}’ not attached to ‘{1}’".format(v['volumeId'], _sd_to_xvd(k)))
volume = nixops.ec2_utils.get_volume_by_id(self.connect(), v['volumeId'], allow_missing=True)
if not volume:
res.messages.append("volume ‘{0}’ no longer exists".format(v['volumeId']))
if k in instance.block_device_mapping.keys() and instance.block_device_mapping[k].status != 'attached' :
res.disks_ok = False
res.messages.append("volume ‘{0}’ on device ‘{1}’ has unexpected state: ‘{2}’".format(v['volumeId'], _sd_to_xvd(k), instance.block_device_mapping[k].status))
if self.private_ipv4 != instance.private_ip_address or self.public_ipv4 != instance.ip_address:
self.warn("IP address has changed, you may need to run ‘nixops deploy’")
self.private_ipv4 = instance.private_ip_address
self.public_ipv4 = instance.ip_address
MachineState._check(self, res)
elif instance.state == "stopping":
res.is_up = False
self.state = self.STOPPING
elif instance.state == "stopped":
res.is_up = False
self.state = self.STOPPED
# check for scheduled events
instance_status = self._conn.get_all_instance_status(instance_ids=[instance.id])
for ist in instance_status:
if ist.events:
for e in ist.events:
res.messages.append("Event ‘{0}’:".format(e.code))
res.messages.append(" * {0}".format(e.description))
res.messages.append(" * {0} - {1}".format(e.not_before, e.not_after))
def reboot(self, hard=False):
self.log("rebooting EC2 machine...")
instance = self._get_instance()
instance.reboot()
self.state = self.STARTING
def get_console_output(self):
if not self.vm_id:
raise Exception("cannot get console output of non-existant machine ‘{0}’".format(self.name))
self.connect()
return self._conn.get_console_output(self.vm_id).output or "(not available)"
def next_charge_time(self):
if not self.start_time:
return None
# EC2 instances are paid for by the hour.
uptime = time.time() - self.start_time
return self.start_time + int(math.ceil(uptime / 3600.0) * 3600.0)
def _xvd_to_sd(dev):
return dev.replace("/dev/xvd", "/dev/sd")
def _sd_to_xvd(dev):
return dev.replace("/dev/sd", "/dev/xvd")
| fpletz/nixops | nixops/backends/ec2.py | Python | lgpl-3.0 | 62,815 |
__all__ = ["zeroSR1"]
import numpy as np
import scipy.linalg
import datetime
def zeroSR1(fcnGrad, h, prox, options):
"""
ZEROSR1 Solves smooth + nonsmooth/constrained optimization problems
xk,nit, stepSizes = zeroSR1(fcnGrad, h, prox_h, opts)
This uses the zero-memory SR1 method (quasi-Newton) to solve:
min_x f(x) + h(x)
where
'fcnGrad' calculates f(x) and its gradient at x,
and h(x) is a non-smooth term that can be infinite-valued (a constraint),
so long as you present a function 'prox' that computes diagional plus
rank-1 projections. The 'prox' function should accept at least three inputs:
'h' is the non-smooth function, and prox_h is a function with
3 or 4 inputs that returns:
y = prox_h( x0 , d, v, )
where
y = argmin_x h(x) + 1/2||x-x0||^2_B
and
B = inv(H) = inv( diag(D) + v*v' )
or, for the case with 4 arguments, y = prox_h( x0, d, v, sigma )
then B = inv( diag(D) + sigma*v*v' ) where sigma should be +1 or -1
The 4 argument case only matters when opts.SR1=true and opts.BB_type=1
or opts.SR1=true, opts.BB_type=1 and opts.SR1_diagWeight > 1
If 'prox_h' isn't provided or is [], it defaults to the identity mapping, which corresponds
to the case when h=0.
'prox_h' is mean to be given by something like prox_rank1_l1
e.g.,
prox = @(x0,d,v) prox_rank1_l1( x0, d, v, lambda );
or, for 4 arguments,
prox = @(x0,d,v,varargin) prox_rank1_l1( x0, d, v, lambda, [], varargin{:} );
"opts" is a dictionary with additional options
opts = {'tol': 1e-6, 'grad_tol' : 1e-6, 'nmax' : 1000, 'N' : N, 'L': normQ, 'verbose': 25}
- 'tol': final tolerance in function
- 'grad_tol': final tolerance in gradient
- 'nmax': maximum number of iterations
- 'N': size of signal (optional)
- 'x0': initial estimation of the signal (optional)
- 'L': estimation of the Lipschitz constant (or diagonal scaling)
- 'verbose': step size for the printing (=0 no printing)
Stephen Becker and Jalal Fadili, Nov 24 2011 -- Dec 2012
Copied from zeroSR1.m Dec 11 2012
Feb 28 2014, unnesting all functions to make compatible with octave.
See also proximalGradient.m
Python version directly translated from Matlab version (including comments): A. Asensio Ramos (March 12, 2015)
"""
start = datetime.datetime.now()
if (('N' in options) & ('x0' not in options)):
N = options['N']
xk = np.zeros((N,1))
elif (('N' not in options) & ('x0' in options)):
xk = x0.copy()
N = len(xk)
else:
print "I have no way to know the size of the signal to retrieve. Please, set options['N'] or options['x0']"
sys.exit(1)
maxStag = 10
SR1 = True
BB = True
nMax = options['nmax']
L = options['L']
Sigma = 1
BB_type = 2
if ((SR1) & (BB_type == 1)):
print("zeroSR1:experimental - With zero-memory SR1, BB_type=1 is an untested feature")
Sigma = -1
SR1_diagWeight = 0.8*(BB_type==2) + 1.0*(BB_type==1)
if ((SR1) & (BB_type == 2) & (SR1_diagWeight > 1)):
Sigma = -1
skipBB = False
stag = 0
fxOld = np.inf
t = 1.0 / L
stepSizes = np.zeros((nMax,1+SR1))
# Initialization
xk_old = xk
f, gradient = fcnGrad(xk)
f_xk = np.empty([])
gradientOld = gradient.copy()
# Begin algorithm
for nIteration in range(nMax):
# "sk" and "yk" are the vectors that will give us quasi-Newton
# information (and also used in BB step, since that can be
# seen as a quasi-Newton method)
sk = xk - xk_old
yk = gradient - gradientOld
if ((nIteration > 0) & (np.linalg.norm(yk) < 1e-13)):
print("zeroSR1:zeroChangeInGradient. Gradient isn't changing, try changing L")
yk = np.asarray([])
skipBB = True
# Find and initial stepsize
if ((BB) & (nIteration > 0) & (not skipBB)):
if (BB_type == 1):
t = np.linalg.norm(sk)**2 / (sk.T.dot(yk)) # eq (1.6) in Dai/Fletcher. This is longer
else:
t = sk.T.dot(yk) / np.linalg.norm(yk)**2 # eq (1.7) in Dai/Fletcher. This is shorter
if (t < 1e-14):
print("Curvature condition violated!")
stag = np.inf
if (SR1):
# we canot take a full BB step, otherwise we exactly satisfy the secant
# equation, and there is no need for a rank-1 correction.
t = SR1_diagWeight*t # SR1_diagWeights is a scalar less than 1 like 0.6
H0 = lambda x: t*x
diagH = t*np.ones((N,1))
else:
t = 1.0 / L
H0 = lambda x: t*x
diagH = t*np.ones((N,1))
skipBB = False
stepSizes[nIteration,0] = t
# ---------------------------------------------------------------------
# -- Quasi-Newton -- Requires: H0, and builds H
# ---------------------------------------------------------------------
if ((SR1) & (nIteration > 0) & (yk.size != 0)):
gs = yk.T.dot(sk)
if (gs < 0):
print("Serious curvature condition problem!")
stag = np.inf
H0 = lambda x: diagH * x
vk = sk - H0(yk)
vkyk = vk.T.dot(yk)
Sigma_local = np.sign(vkyk[0])
if ((Sigma_local * vkyk) <= 0):
print("Warning: violated curvature conditions")
vk = []
H = H0
stepSizes[nIteration,1] = 0
else:
vk /= np.sqrt(Sigma_local * vkyk)
H = lambda x: H0(x) + Sigma_local * vk.dot(vk.T.dot(x))
stepSizes[nIteration,1] = vk.T.dot(vk)
else:
Sigma_local = Sigma
H = H0
vk = []
# ---------------------------------
# Make the proximal update
# ---------------------------------
p = H(-gradient) # Scaled descent direction. H includes the stepsize
xk_old = xk.copy()
if (Sigma_local != 1):
xk = prox(xk_old + p, diagH, vk, Sigma_local)
else:
xk = prox(xk_old + p, diagH, vk)
norm_grad = np.linalg.norm(xk - xk_old)
if ( (np.any(np.isnan(xk))) | (np.linalg.norm(xk) > 1.e10)):
stag = np.inf
xk = xk_old
print("Prox algorithm failed, probably due to numerical cancellations")
# Update function and gradient
gradientOld = gradient.copy()
f_xk, gradient = fcnGrad(xk)
fx = f_xk + h(xk)
df = np.abs(fx - fxOld) / np.abs(fxOld)
fxOld = fx.copy()
# Print iteration and test for stopping
if ((df < options['tol']) | (t < 1e-10) | (np.any(np.isnan(fx))) | (norm_grad < options['grad_tol'])):
stag += 1
if ((options['verbose'] != 0)):
if (((nIteration+1) % options['verbose'] == 0) | (stag > maxStag)):
try:
print "Iter: {0:5d}, f: {1:.3e}, df: {2:.2e}, ||grad||: {3:.2e}, step: {4:.2e}".format(nIteration+1, fx, df, norm_grad, t[0,0])
except:
print "Iter: {0:5d}".format(nIteration+1)
if (stag > maxStag):
delta = datetime.datetime.now() - start
print "Quitting. Reached tolerance. Ellapsed time: {0:2f} s".format(delta.total_seconds())
break
return xk, nIteration, stepSizes | aasensio/pyzeroSR1 | pyzeroSR1/zeroSR1.py | Python | mit | 6,583 |
# Python program for implementation of Selection
# Sort
import sys
A = [64, 25, 12, 22, 11]
# Traverse through all array elements
for i in range(len(A)):
# Find the minimum element in remaining
# unsorted array
min_idx = i
for j in range(i+1, len(A)):
if A[min_idx] > A[j]:
min_idx = j
# Swap the found minimum element with
# the first element
A[i], A[min_idx] = A[min_idx], A[i]
# Driver code to test above
print ("Sorted array")
for i in range(len(A)):
print("%d" %A[i]),
| Harnek/algorithms | Sorting/SelectionSort.py | Python | mit | 500 |
import sys # this allows you to read the user input from keyboard also called "stdin"
import classOne # This imports all the classOne functions
import classTwo # This imports all the classTwo functions
import classThree # This imports all the classThree functions
import classFour # This imports all the classFour functions
TIMEOUT=10 # this is the amount of time you will wait for an answer in Seconds. 10 means 10 seconds
MAX_CLASS=5
QUIZ_INSTRUCTIONS = """
Get ready for the quiz. You will have 10 questions out of which you
will need 8 right to win the prize. You will have """ + str(TIMEOUT) + """ seconds
to answer each question.Press Enter to start."""
def getUsersClass(): #main
''' This function will get the user's class. It will compare the class with MAX_CLASS and
will return False if it is more than the MAX_CLASS. Class also has to be a natural number '''
print("Please tell me which Class you are in? ")
try:
usersClass = int(sys.stdin.readline().strip())
if (usersClass < 1 or usersClass > MAX_CLASS) :
print("No Quiz available for Class " + str(usersClass))
return False
else :
return usersClass
except :
print("Exception")
return False
if __name__ == '__main__':
while(True) :
usersClass = getUsersClass()
if (usersClass != False) :
break
print(QUIZ_INSTRUCTIONS)
sys.stdin.readline()
if (usersClass == 1) :
classOne.classOneQuiz()
elif (usersClass == 2) :
classTwo.classTwoQuiz()
elif(usersClass == 3):
classThree.classThreeQuiz()
elif(usersClass == 4):
classFour.classFourQuiz()
| nischal2002/m-quiz-2016 | quiz.py | Python | mit | 1,704 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# find_Drive.py
# Created on: 2014-09-08
# Description: Unzips or zips a directory
# ---------------------------------------------------------------------------
# This script is zips/unzips a directory.
# Currently set up to unzip (unzipDir(inputDir).
# Comment out unzipDir(inputDir) and uncomment zipDir(inputDir) to zip a
# directory.
import os
import zipfile
import zipfile
try:
import zlib
mode= zipfile.ZIP_DEFLATED
except:
mode= zipfile.ZIP_STORED
def unzipDir(inputDir):
for root, dirs, files in os.walk(inputDir):
for f in files:
if f.endswith('.zip'):
inFile = os.path.join(root, f)
print 'Working on', inFile
outDir = os.path.join(root, f[:-4])
if not os.path.isdir(outDir):
os.mkdir(outDir)
print 'Created',outDir
else:
continue
with zipfile.ZipFile(inFile,'r') as z:
z.extractall(outDir)
print f,'was successful.'
print 'Done.'
def zipDir(inputDir):
zipFileName = os.path.join(inputDir,'zipfile.zip')
print zipFileName
zip= zipfile.ZipFile(zipFileName, 'w', mode)
for root, dirs, files in os.walk(inputDir):
for f in files:
if f.endswith('.xml'):
fileName = os.path.join(root,f)
zip.write(fileName, arcname=f)
print 'ZIP CREATED'
zip.close()
print 'Done.'
inputDir = r'C:\Users\mart3565\Downloads\hennepin11102014'
unzipDir(inputDir)
#zipDir(inputDir)
| borchert/metadata-tools | general/zipUnzip.py | Python | mit | 1,713 |
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup
from os.path import abspath, join, dirname
setup(
name="cql",
version="1.0.4",
description="Cassandra Query Language driver",
long_description=open(abspath(join(dirname(__file__), 'README'))).read(),
maintainer='Apache Cassandra development team',
maintainer_email='[email protected]',
url="http://cassandra.apache.org",
packages=["cql", "cql.cassandra"],
scripts=["cqlsh"],
requires=["thrift"],
provides=["cql"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Database :: Front-Ends",
],
)
| segfault/apache_cassandra | drivers/py/setup.py | Python | apache-2.0 | 1,564 |
# Based on iwidgets2.2.0/tests/entryfield.test code.
import tkinter
import Test
import Pmw
Test.initialise()
_myValidators = {
'hello' : (lambda s: s == 'hello', len),
}
c = Pmw.EntryField
kw_1 = {'entry_width' : 12, 'labelpos' : 'n', 'label_text' : 'Entry Field:'}
tests_1 = (
(c.pack, (), {'padx' : 10, 'pady' : 10, 'fill' : 'both', 'expand' : 1}),
(Test.num_options, (), 10),
('errorbackground', 'red'),
('hull_background', 'yellow'),
('label_background', 'yellow'),
('entry_background', 'yellow'),
('hull_show', 'X', 'TclError: unknown option "-show"'),
('entry_show', ''),
('entry_borderwidth', 4),
('entry_borderwidth', 2),
('command', Test.callback),
('hull_cursor', 'gumby'),
('entry_exportselection', 0),
('label_foreground', 'Green'),
('entry_foreground', 'Green'),
('label_foreground', 'Black'),
('entry_foreground', 'Black'),
('label_highlightcolor', 'Red'),
('entry_highlightcolor', 'Red'),
('entry_highlightthickness', 2),
('entry_insertbackground', 'Yellow'),
('entry_insertbackground', 'Black'),
('entry_insertborderwidth', 1),
('entry_insertborderwidth', 0),
('entry_insertofftime', 400),
('entry_insertontime', 700),
('entry_insertwidth', 3),
('invalidcommand', Test.callback),
('entry_justify', 'right'),
('entry_justify', 'center'),
('entry_justify', 'left'),
('label_text', 'Label'),
('entry_relief', 'raised'),
('entry_relief', 'sunken'),
('entry_state', 'disabled'),
('entry_state', 'normal'),
('entry_background', 'GhostWhite'),
('validate', 'numeric'),
('validate', 'alphabetic'),
('entry_width', 30),
('validate', 'bogus',
"ValueError: bad validate value \"bogus\": must be a function or one " +
"of the standard validators ('alphabetic', 'alphanumeric', 'date', " +
"'hexadecimal', 'integer', 'numeric', 'real', 'time') or extra " +
"validators ()"),
('relief', 'bogus', 'KeyError: Unknown option "relief" for EntryField'),
(c.invoke, (), 1),
(c.interior, (), tkinter.Frame),
(c.clear, ()),
(c.get, (), ''),
(c.insert, ('end', 'Test String')),
(c.get, (), 'Test String'),
(c.delete, (0, 'end')),
(c.insert, ('end', 'Another Test')),
(c.icursor, 'end'),
(c.index, 'end', 12),
(c.selection_from, 0),
(c.selection_to, 'end'),
(c.xview, '3'),
(c.clear, ()),
(c.insert, ('end', '100')),
('validate', {'validator' : 'real', 'min' : 10}),
(c.setentry, '50', 1),
(c.setentry, 'hello', 0),
('extravalidators', _myValidators),
('validate', 'hello'),
(c.setentry, 'hello', 1),
(c.setentry, 'foo', 0),
(c.valid, (), 1),
(c.cget, 'entry_background', 'GhostWhite'),
('entry_textvariable', Test.stringvar),
(c.checkentry, (), 0),
(c.cget, 'entry_background', 'red'),
)
tests_2 = (
(c.pack, (), {'padx' : 10, 'pady' : 10}),
)
alltests = [(tests_1, kw_1)]
poslist = ('nw', 'n', 'ne', 'en', 'e', 'es', 'se', 's', 'sw', 'ws', 'w', 'wn',)
for pos in poslist:
kw_2 = {
'labelpos' : pos,
'label_text' : 'Entry Field',
}
alltests.append((tests_2, kw_2))
testData = ((c, alltests),)
if __name__ == '__main__':
Test.runTests(testData)
| wolf29f/iCook | iCook/Pmw/Pmw_2_0_0/tests/EntryField_test.py | Python | gpl-2.0 | 3,257 |
#!/usr/bin/python3
# ------------------------------------------------------------------------------
# Python
import os
# ------------------------------------------------------------------------------
def gen_salt(length=512):
uran = os.urandom(length)
return uran.hex()
| der-Daniel/Alohomora-Python | app/salt.py | Python | gpl-2.0 | 283 |
from django.db import models
from django.contrib.auth.models import User
class Tag(models.Model):
name = models.CharField(max_length=250)
owner = models.ForeignKey(User)
def __unicode__(self):
return self.name
class Family(models.Model):
husband_name = models.CharField('Husband: Name', max_length=250, blank=True)
husband_id = models.CharField('Husband: FamilyTree ID', max_length=50, blank=True)
wife_name = models.CharField('Wife: Name', max_length=250, blank=True)
wife_id = models.CharField('Wife: FamilyTree ID', max_length=50, blank=True)
notes = models.TextField(blank=True)
date_created = models.DateTimeField(auto_now_add=True)
tags = models.ManyToManyField(Tag, related_name='families', blank=True, null=True)
owner = models.ForeignKey(User)
starred = models.BooleanField(default=False)
class Meta:
verbose_name_plural = "families"
def __unicode__(self):
return ' / '.join([self.husband_name, self.wife_name])
def html(self):
return ' <span>/</span> '.join([self.husband_name, self.wife_name])
# Return incomplete items
def incomplete_items(self):
return self.items.filter(completed=False).order_by('order')
# Return latest incomplete items
def latest_incomplete_items(self):
return self.items.filter(completed=False).order_by('-date_created')
# Return incomplete items label
def incomplete_items_label(self):
num_items = len(self.incomplete_items())
return '{} item{}'.format(num_items, 's' if num_items != 1 else '')
# Return completed items
def completed_items(self):
return self.items.filter(completed=True).order_by('-date_completed')
class Item(models.Model):
title = models.CharField(max_length=500)
family = models.ForeignKey(Family, related_name='items', blank=True, null=True)
completed = models.BooleanField(default=False)
date_completed = models.DateTimeField(blank=True, null=True)
date_created = models.DateTimeField(auto_now_add=True)
tags = models.ManyToManyField(Tag, related_name='items', blank=True, null=True)
order = models.IntegerField(default=0)
notes = models.TextField(blank=True)
owner = models.ForeignKey(User)
starred = models.BooleanField(default=False)
def __unicode__(self):
return self.title
| bencrowder/gent | gent/models.py | Python | mit | 2,368 |
'''
Created on Nov 27, 2015
@author: ionut
'''
def ip2int(ip):
o = map(int, ip.split('.'))
if len(o) > 4:
raise Exception('invalid input')
for d in o:
if d < 0:
raise Exception('invalid input')
if d > 255:
raise Exception('invalid input')
res = (16777216 * o[0]) + (65536 * o[1]) + (256 * o[2]) + o[3]
return res
def int2ip(ipnum):
o1 = int(ipnum / 16777216) % 256
o2 = int(ipnum / 65536) % 256
o3 = int(ipnum / 256) % 256
o4 = int(ipnum) % 256
return '%s.%s.%s.%s' % (o1, o2, o3, o4)
def sh2rec(start_addr, doc):
return {
'range': (int2ip(start_addr), int2ip(doc[0])),
'country': doc[1],
'county': doc[2],
'city': doc[3]
}
| iticus/iplocpy | utils.py | Python | mit | 779 |
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.db import models
from django.apps import apps
from django.utils.functional import cached_property
from django.conf import settings
from django_pgjson.fields import JsonField
from taiga.mdrender.service import get_diff_of_htmls
from .choices import HistoryType
from .choices import HISTORY_TYPE_CHOICES
from taiga.base.utils.diff import make_diff as make_diff_from_dicts
def _generate_uuid():
return str(uuid.uuid1())
class HistoryEntry(models.Model):
"""
Domain model that represents a history
entry storage table.
It is used for store object changes and
comments.
"""
id = models.CharField(primary_key=True, max_length=255, unique=True,
editable=False, default=_generate_uuid)
user = JsonField(blank=True, default=None, null=True)
created_at = models.DateTimeField(default=timezone.now)
type = models.SmallIntegerField(choices=HISTORY_TYPE_CHOICES)
key = models.CharField(max_length=255, null=True, default=None, blank=True, db_index=True)
# Stores the last diff
diff = JsonField(null=True, default=None)
# Stores the last complete frozen object snapshot
snapshot = JsonField(null=True, default=None)
# Stores a values of all identifiers used in
values = JsonField(null=True, default=None)
# Stores a comment
comment = models.TextField(blank=True)
comment_html = models.TextField(blank=True)
delete_comment_date = models.DateTimeField(null=True, blank=True, default=None)
delete_comment_user = JsonField(blank=True, default=None, null=True)
# Flag for mark some history entries as
# hidden. Hidden history entries are important
# for save but not important to preview.
# Order fields are the good example of this fields.
is_hidden = models.BooleanField(default=False)
# Flag for mark some history entries as complete
# snapshot. The rest are partial snapshot.
is_snapshot = models.BooleanField(default=False)
_importing = None
@cached_property
def is_change(self):
return self.type == HistoryType.change
@cached_property
def is_create(self):
return self.type == HistoryType.create
@cached_property
def is_delete(self):
return self.type == HistoryType.delete
@cached_property
def owner(self):
pk = self.user["pk"]
model = apps.get_model("users", "User")
return model.objects.get(pk=pk)
@cached_property
def values_diff(self):
result = {}
users_keys = ["assigned_to", "owner"]
def resolve_diff_value(key):
value = None
diff = get_diff_of_htmls(
self.diff[key][0] or "",
self.diff[key][1] or ""
)
if diff:
key = "{}_diff".format(key)
value = (None, diff)
return (key, value)
def resolve_value(field, key):
data = self.values[field]
key = str(key)
if key not in data:
return None
return data[key]
for key in self.diff:
value = None
# Note: Hack to prevent description_diff propagation
# on old HistoryEntry objects.
if key == "description_diff":
continue
elif key == "content_diff":
continue
elif key == "blocked_note_diff":
continue
elif key in["description", "content", "blocked_note"]:
(key, value) = resolve_diff_value(key)
elif key in users_keys:
value = [resolve_value("users", x) for x in self.diff[key]]
elif key == "watchers":
value = [[resolve_value("users", x) for x in self.diff[key][0]],
[resolve_value("users", x) for x in self.diff[key][1]]]
elif key == "points":
points = {}
pointsold = self.diff["points"][0]
pointsnew = self.diff["points"][1]
# pointsold = pointsnew
if pointsold is None:
for role_id, point_id in pointsnew.items():
role_name = resolve_value("roles", role_id)
points[role_name] = [None, resolve_value("points", point_id)]
else:
for role_id, point_id in pointsnew.items():
role_name = resolve_value("roles", role_id)
oldpoint_id = pointsold.get(role_id, None)
points[role_name] = [resolve_value("points", oldpoint_id),
resolve_value("points", point_id)]
# Process that removes points entries with
# duplicate value.
for role in dict(points):
values = points[role]
if values[1] == values[0]:
del points[role]
if points:
value = points
elif key == "attachments":
attachments = {
"new": [],
"changed": [],
"deleted": [],
}
oldattachs = {x["id"]:x for x in self.diff["attachments"][0]}
newattachs = {x["id"]:x for x in self.diff["attachments"][1]}
for aid in set(tuple(oldattachs.keys()) + tuple(newattachs.keys())):
if aid in oldattachs and aid in newattachs:
changes = make_diff_from_dicts(oldattachs[aid], newattachs[aid],
excluded_keys=("filename", "url"))
if changes:
change = {
"filename": newattachs.get(aid, {}).get("filename", ""),
"url": newattachs.get(aid, {}).get("url", ""),
"changes": changes
}
attachments["changed"].append(change)
elif aid in oldattachs and aid not in newattachs:
attachments["deleted"].append(oldattachs[aid])
elif aid not in oldattachs and aid in newattachs:
attachments["new"].append(newattachs[aid])
if attachments["new"] or attachments["changed"] or attachments["deleted"]:
value = attachments
elif key == "custom_attributes":
custom_attributes = {
"new": [],
"changed": [],
"deleted": [],
}
oldcustattrs = {x["id"]:x for x in self.diff["custom_attributes"][0] or []}
newcustattrs = {x["id"]:x for x in self.diff["custom_attributes"][1] or []}
for aid in set(tuple(oldcustattrs.keys()) + tuple(newcustattrs.keys())):
if aid in oldcustattrs and aid in newcustattrs:
changes = make_diff_from_dicts(oldcustattrs[aid], newcustattrs[aid],
excluded_keys=("name"))
if changes:
change = {
"name": newcustattrs.get(aid, {}).get("name", ""),
"changes": changes
}
custom_attributes["changed"].append(change)
elif aid in oldcustattrs and aid not in newcustattrs:
custom_attributes["deleted"].append(oldcustattrs[aid])
elif aid not in oldcustattrs and aid in newcustattrs:
custom_attributes["new"].append(newcustattrs[aid])
if custom_attributes["new"] or custom_attributes["changed"] or custom_attributes["deleted"]:
value = custom_attributes
elif key in self.values:
value = [resolve_value(key, x) for x in self.diff[key]]
else:
value = self.diff[key]
if not value:
continue
result[key] = value
return result
class Meta:
ordering = ["created_at"]
| astagi/taiga-back | taiga/projects/history/models.py | Python | agpl-3.0 | 9,174 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# -----------------------------------------------------------------
# Package initialization file
# -----------------------------------------------------------------
## \package pts.modeling.simulation TO DO
#
# This package ...
#
# -----------------------------------------------------------------
# Import classes to make them available at the level of this subpackage
| SKIRT/PTS | modeling/simulation/__init__.py | Python | agpl-3.0 | 694 |
"""
Analysis of the performance of two clustering methods
on various subsetsof our county-level cancer risk data set.
In particular, we will compare these two clustering methods in three areas:
* Efficiency - Which method computes clusterings more efficiently?
* Automation - Which method requires less human supervision
to generate reasonable clusterings?
* Quality - Which method generates clusterings with less error?"""
from timeit import timeit
import matplotlib.pyplot as plt
setup = """from random import random, uniform
from cluster import Cluster
from closest_pair_and_clustering import fast_closest_pair, slow_closest_pair
def gen_random_clusters(num_clusters):
for dummy_i in range(num_clusters):
yield Cluster(set(), uniform(-1.0, 1.0), uniform(-1.0, 1.0), 0, 0)
cluster_lists = [list(gen_random_clusters(num_clusters)) for num_clusters in range(2, 201)]"""
code_for_fast = """fast_closest_pair(cluster_lists[{}])"""
times_for_fast = [timeit(stmt=code_for_fast.format(idx),
setup=setup,
number=50) for idx in range(199)]
code_for_slow = """slow_closest_pair(cluster_lists[{}])"""
times_for_slow = [timeit(stmt=code_for_slow.format(idx),
setup=setup,
number=50) for idx in range(199)]
plt.plot(times_for_fast, color='g', label='Fast Method')
plt.plot(times_for_slow, color='b', label='Slow Method')
plt.title('Slow and fast Closest pair - Desktop', fontsize=18, color='#ff8800')
plt.xlabel('Number of clusters', fontsize=14, color='#ff8800')
plt.ylabel('Time in seconds', fontsize=14, color='#ff8800')
plt.legend(loc='best', labels=['Fast Method', 'Slow Method'])
plt.show()
# plt.savefig('Q1', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
| MohamedAbdultawab/FOC_RiceUniv | algorithmic-thinking-2/module-3-project-and-application/02_application-3-comparison-of-clustering-algorithms/Q1_using_timeit.py | Python | gpl-3.0 | 1,860 |
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''This module contains the ChatInput class'''
import logging
import os
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
from PyQt4.QtCore import Qt
import e3
import gui
from HTMLParser import HTMLParser
from gui.qt4ui import Utils
log = logging.getLogger('qt4ui.widgets.ChatInput')
class ChatInput (QtGui.QTextEdit):
'''A widget suited for editing chat lines. Provides as-you-type
smileys, color settings and font settings, chat line history'''
NAME = 'Input Text'
DESCRIPTION = 'A widget to enter messages on the conversation'
AUTHOR = 'Gabriele "Whisky" Visconti'
WEBSITE = ''
style_changed = QtCore.pyqtSignal()
def __init__(self, on_send_message, on_cycle_history,
send_typing_notification, parent=None):
'''Constructor'''
QtGui.QTextEdit.__init__(self, parent)
self._smiley_dict = {}
self._max_shortcut_len = 0
self.on_send_message = on_send_message
self.on_cycle_history = on_cycle_history
self.send_typing_notification = send_typing_notification
self._emote_theme = gui.theme.emote_theme
self._qt_color = QtGui.QColor(Qt.black)
# typing notification
self.typing_timer = QtCore.QTimer()
self.typing_timer.setSingleShot(False)
self.typing_timer.timeout.connect(self.on_send_typing_notification)
# emesene's
def update_style(self, style):
'''update style'''
self.e3_style = style
def set_smiley_dict(self, smiley_dict):
'''Sets the smiley recognized by this widget'''
shortcuts = smiley_dict.keys()
for shortcut in shortcuts:
path = unicode(self._emote_theme.emote_to_path(shortcut))
if not path:
log.warning('No image path for: \t%s, %s'
% (shortcut, smiley_dict[shortcut]))
continue
shortcut = unicode(shortcut)
path = os.path.abspath(path[7:])
self._smiley_dict[shortcut] = path
current_len = len(shortcut)
if current_len > self._max_shortcut_len:
self._max_shortcut_len = current_len
def insert_text_after_cursor(self, text):
'''Insert given text at current cursor's position'''
text = unicode(text)
for i in range(len(text)):
# It's a little bit dirty, but seems to work....
fake_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, 0,
Qt.NoModifier, text[i])
self.keyPressEvent(fake_event)
def _insert_char(self, char):
'''Inserts a single char, checking for smileys'''
# this method uses python's builtin string type, not QString
max_shortcut_len = self._max_shortcut_len
shortcuts = self._smiley_dict.keys()
cursor = self.textCursor()
text_search = unicode(char)
i = 0
while i < max_shortcut_len - 1:
# TODO: check if the returned QChar is valid
last_char = self.document().characterAt(cursor.position() - 1 - i)
if last_char.isPrint():
last_char = QtCore.QString(last_char)
text_search = unicode(last_char) + text_search
i += 1
length = len(text_search)
if text_search in shortcuts:
for i in range(length - 1):
cursor.deletePreviousChar()
self._insert_image_resource(text_search)
cursor.insertImage(text_search)
# Prevent the color from changing:
self.setTextColor(self._qt_color)
return True
return False
def _insert_image_resource(self, shortcut):
'''Appends an image resource to this widget's
QTextDocument'''
image = QtGui.QImage(self._smiley_dict[shortcut])
self.document().addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(shortcut), image)
def _get_e3_style(self):
'''Returns the font style in e3 format'''
qt_font = self._get_qt_font()
e3_color = Utils.qcolor_to_e3_color(self._qt_color)
e3_style = Utils.qfont_to_style(qt_font, e3_color)
return e3_style
def _set_e3_style(self, e3_style):
'''Sets the font style, given an e3 style'''
qt_color = Utils.e3_color_to_qcolor(e3_style.color)
qt_font = QtGui.QFont()
qt_font.setFamily(e3_style.font)
qt_font.setBold(e3_style.bold)
qt_font.setItalic(e3_style.italic)
qt_font.setStrikeOut(e3_style.strike)
qt_font.setPointSize(e3_style.size)
self._set_qt_color(qt_color)
self._set_qt_font(qt_font)
e3_style = property(_get_e3_style, _set_e3_style)
def _set_qt_font(self, new_font):
'''sets the font style in qt format'''
old_font = self._get_qt_font()
self.document().setDefaultFont(new_font)
if old_font != new_font:
self.style_changed.emit()
def _get_qt_font(self):
'''Returns the default font in qt format'''
return self.document().defaultFont()
def _set_qt_color(self, new_color):
'''Sets the color'''
old_color = self._qt_color
self._qt_color = new_color
cursor = self.textCursor()
cursor_position = cursor.position()
cursor.select(QtGui.QTextCursor.Document)
char_format = QtGui.QTextCharFormat()
char_format.setForeground(QtGui.QBrush(new_color))
cursor.mergeCharFormat(char_format)
cursor.setPosition(cursor_position)
# We need this beacause the previous stuff doesn't work for the last
# block, if the block is empty. (i.e.: empty QTextEdit, QTextEdit's
# document ends with an image (so there's an empty block after it))
# Oh, and obviously this is not enough (and we need the previous part
# because it just changes current block's format!
self.setTextColor(new_color)
# -------------------- QT_OVERRIDE
def keyPressEvent(self, event):
'''handles special key combinations: Return, CTRL+Return,
CTRL+UP, CTRL+DOWN'''
# pylint: disable=C0103
if event.key() == Qt.Key_Return:
if event.modifiers() == Qt.ControlModifier:
temp = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
Qt.Key_Return,
Qt.NoModifier,
event.text(),
event.isAutoRepeat(),
event.count())
event = temp
else:
self._on_send_btn_clicked()
return
if (event.key() == Qt.Key_Up or event.key() == Qt.Key_P) and \
event.modifiers() == Qt.ControlModifier:
self.on_cycle_history()
return
if (event.key() == Qt.Key_Down or event.key() == Qt.Key_N) and \
event.modifiers() == Qt.ControlModifier:
self.on_cycle_history(1)
return
if event.text().length() > 0:
if not self.typing_timer.isActive():
self.send_typing_notification()
self.typing_timer.start(3000)
if self._insert_char(event.text()):
return
QtGui.QTextEdit.keyPressEvent(self, event)
def on_send_typing_notification(self):
self.typing_timer.stop()
def _on_send_btn_clicked(self):
'''Slot called when the user presses Enter in
the chat line editor. Sends the message'''
message_string = unicode(self.toPlainText())
if len(message_string) == 0:
return
self.clear()
self.on_send_message(message_string)
def _get_text(self):
return unicode(self.toPlainText())
def _set_text(self, text):
self.clear()
self.insert_text_after_cursor(text)
text = property(fget=_get_text, fset=_set_text)
def canInsertFromMimeData(self, source):
'''Makes only plain text insertable'''
if source.hasText():
return True
else:
return False
def insertFromMimeData(self, source):
'''Inserts from mime data'''
self.insert_text_after_cursor(source.text())
def createMimeDataFromSelection(self):
'''Creates a mime data object from selection'''
mime_data = QtGui.QTextEdit.createMimeDataFromSelection(self)
if mime_data.hasHtml():
parser = MyHTMLParser()
parser.feed(mime_data.html())
mime_data.setText(parser.get_data())
return mime_data
def toPlainText(self):
'''Gets a plain text representation of the contents'''
parser = MyHTMLParser()
parser.feed(self.toHtml())
return parser.get_data()
class MyHTMLParser (HTMLParser):
'''This class parses html text, collecting plain
text and substituting <img> tags with a proper
smiley shortcut if any'''
def __init__(self):
'''Constructor'''
HTMLParser.__init__(self)
self._in_body = False
self._data = ''
def reset(self):
'''Resets the parser'''
HTMLParser.reset(self)
self._in_body = False
self._data = ""
def feed(self, html_string):
'''Feeds the parser with an html string to parse'''
if isinstance(html_string, QtCore.QString):
html_string = unicode(html_string)
HTMLParser.feed(self, html_string)
def handle_starttag(self, tag, attrs):
'''Handle opening tags'''
if self._in_body:
if tag == "body":
raise NameError("Malformed HTML")
if tag == "img":
src = attrs[0][1]
self._data += src
else:
if tag == "body":
self._in_body = True
def handle_endtag(self, tag):
'''Handle closing tags'''
if self._in_body:
if tag == "body":
self._in_body = False
def handle_data(self, data):
'''Handle data sequences'''
if self._in_body:
self._data += data
def handle_charref(self, name):
self._data += Utils.unescape(u'&%s;' % name)
def handle_entityref(self, name):
self._data += Utils.unescape(u'&%s;' % name)
def get_data(self):
'''returns parsed string'''
# [1:] is to trim the leading line break.
return self._data[1:]
| tiancj/emesene | emesene/gui/qt4ui/widgets/ChatInput.py | Python | gpl-3.0 | 11,377 |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from rest_framework import permissions
class EmailPermissions(permissions.IsAuthenticated):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
# update email address to primary
if request.method in ['PUT', 'PATCH']:
return not obj.is_primary and obj.is_valid
# delete email address
if request.method in ['DELETE']:
return not obj.is_primary
return True
| glomium/elmnt.de | useraccounts/permissions.py | Python | mit | 597 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_client_ssl
short_description: BIG-IP ltm client-ssl profile module
description:
- You can use the client-ssl component to create, modify, or delete a custom Client SSL profile, or display a custom
or default Client SSL profile.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
alert-timeout:
description:
- Specifies the maximum time period in seconds to keep the SSL session active after alert message is sent.
default: 10
allow-non-ssl:
description:
- Enables or disables non-SSL connections.
default: disabled
choices: ['enabled', 'disabled']
app-service:
description:
- Specifies the name of the application service to which the profile belongs.
authenticate:
description:
- Specifies how often the system authenticates a user.
default: once
authenticate_depth:
description:
- Specifies the authenticate depth.
default: 9
ca_file:
description:
- Specifies the certificate authority (CA) file name.
cache_size:
description:
- Specifies the SSL session cache size.
default: 262144
cache_timeout:
description:
- Specifies the SSL session cache timeout value.
default: 3600
choices: range(0, 86401)
cert:
description:
- This option is deprecated and is maintained here for backward compatibility reasons.
- Please check cert_key_chain option to add certificate, key, passphrase and chain to the profile.
cert_extension_includes:
description:
- Specifies the extensions of the web server certificates to be included in the generated certificates using
SSL Forward Proxy.
cert_key_chain:
description:
- Adds, deletes, or replaces a set of certificate, key, passphrase, chain and OCSP Stapling Parameters
object.
cert_lookup_by_ipaddr_port:
description:
- Specifies whether to perform certificate look up by IP address and port number.
chain:
description:
- This option is deprecated and is maintained here for backward compatibility reasons.
- Please check cert_key_chain option to add certificate, key, passphrase and chain to the profile.
ciphers:
description:
- Specifies a cipher name.
default: DEFAULT
client_cert_ca:
description:
- Specifies the client cert certificate authority name.
crl_file:
description:
- Specifies the certificate revocation list file name.
defaults_from:
description:
- This setting specifies the profile that you want to use as the parent profile.
default: clientssl
description:
description:
- User defined description.
handshake_timeout:
description:
- Specifies the handshake timeout in seconds.
default: 10
key:
description:
- This option is deprecated and is maintained here for backward compatibility reasons.
- Please check cert_key_chain option to add certificate, key, passphrase and chain to the profile.
mod_ssl_methods:
description:
- Enables or disables ModSSL method emulation.
default: disabled
choices: ['enabled', 'disabled']
mode:
description:
- Specifies the profile mode, which enables or disables SSL processing.
default: enabled
choices: ['enabled', 'disabled']
name:
description:
- Specifies a unique name for the component.
required: true
tm_options:
description:
- Enables options, including some industry-related workarounds.
default: dont-insert-empty-fragments
choices: [
'all-bugfixes', 'cipher-server-preference', 'dont-insert-empty-fragments', 'ephemeral-rsa',
'microsoft-big-sslv3-buffer', 'microsoft-sess-id-bug', 'msie-sslv2-rsa-padding', 'netscape-ca-dn-bug',
'netscape-challenge-bug', 'netscape-demo-cipher-change-bug', 'netscape-reuse-cipher-change-bug',
'no-session-resumption-on-renegotiation', 'no-ssl', 'no-sslv2', 'no-sslv3', 'no-tls', 'no-tlsv1',
'no-tlsv1.1', 'no-tlsv1.2', 'no-dtls', 'passive-close, none, pkcs1-check-1', 'pkcs1-check-2, single-dh-use',
'ssleay-080-client-dh-bug', 'sslref2-reuse-cert-type-bug', 'tls-d5-bug', 'tls-rollback-bug'
]
partition:
description:
- Displays the administrative partition within which the component resides.
passphrase:
description:
- This option is deprecated and is maintained here for backward compatibility reasons.
- Please check cert_key_chain option to add certificate, key, passphrase and chain to the profile.
peer_cert_mode:
description:
- Specifies the peer certificate mode.
default: ignore
choices: ['ignore', 'require']
peer_no_renegotiate_timeout:
description:
- Specifies the timeout in seconds when the server sends Hello Request and waits for ClientHello before it
sends Alert with fatal alert.
default: 10
port:
description:
- Specifies a service for the data channel port used for this client-ssl profile.
proxy_ssl:
description:
- Enabling this option requires a corresponding server ssl profile with proxy-ssl enabled to perform
transparent SSL decryption.
choices: ['enabled', 'disabled']
proxy_ssl_passthrough:
description:
- This allows Proxy SSL to passthrough the traffic when ciphersuite negotiated between the client and server
is not supported.
default: disabled
choices: ['enabled', 'disabled']
proxy_ca_cert:
description:
- Specifies the name of the certificate file that is used as the certification authority certificate when
SSL forward proxy feature is enabled.
proxy_ca_key:
description:
- Specifies the name of the key file that is used as the certification authority key when SSL forward proxy
feature is enabled.
proxy_ca_passphrase:
description:
- Specifies the passphrase of the key file that is used as the certification authority key when SSL forward
proxy feature is enabled.
renegotiate_max_record_delay:
description:
- Specifies the maximum number of SSL records that the traffic management system can receive before it
renegotiates an SSL session.
default: indefinite
renegotiate_period:
description:
- Specifies the number of seconds required to renegotiate an SSL session.
default: indefinite
renegotiate_size:
description:
- Specifies the size of the application data, in megabytes, that is transmitted over the secure channel.
default: Indefinite
renegotiation:
description:
- Specifies whether renegotiations are enabled.
default: enabled
choices: ['enabled', 'disabled']
retain_certificate:
description:
- APM module requires storing certificate in SSL session.
default: true
type: bool
secure_renegotiation:
description:
- Specifies the secure renegotiation mode.
default: require
choices: ['request', 'require', 'require-strict']
max_renegotiations_per_minute:
description:
- Specifies the maximum number of renegotiation attempts allowed in a minute.
default: 5
server_name:
description:
- Specifies the server names to be matched with SNI (server name indication) extension information in
ClientHello from a client connection.
session_mirroring:
description:
- Specifies the name of the application service to which the profile belongs.
session_ticket:
description:
- Specifies the name of the application service to which the profile belongs.
sni_default:
description:
- When true, this profile is the default SSL profile when the server name in a client connection does not
match any configured server names, or a client connection does not specify any server name at all.
type: bool
sni_require:
description:
- When this option is enabled, a client connection that does not specify a known server name or does not
support SNI extension will be rejected.
choices: ['enabled', 'disabled']
source_ip_blacklist:
description:
- Specifies the data group name of source ip blacklist when SSL forward proxy bypass feature is enabled.
source_ip_whitelist:
description:
- Specifies the data group name of source ip whitelist when SSL forward proxy bypass feature is enabled.
ssl_forward_proxy:
description:
- Enables or disables SSL forward proxy feature.
default: disabled
choices: ['enabled', 'disabled']
ssl_forward_proxy_bypass:
description:
- Enables or disables SSL forward proxy bypass feature.
default: disabled
choices: ['enabled', 'disabled']
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
strict_resume:
description:
- Specifies the name of the application service to which the profile belongs.
unclean_shutdown:
description:
- When enabled, the SSL profile performs unclean shutdowns of all SSL connections without exchanging the
required SSL shutdown alerts.
default: enabled
choices: ['enabled', 'disabled']
generic_alert:
description:
- Enables or disables generic-alert.
choices: ['enabled', 'disabled']
ssl_sign_hash:
description:
- Specifies SSL sign hash algorithm which is used to sign and verify SSL Server Key Exchange and Certificate
Verify messages for the specified SSL profiles.
default: sha1
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Client SSL profile
f5bigip_ltm_profile_client_ssl:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_client_ssl_profile
partition: Common
key: /Common/exemple.localhost.key
cert: /Common/exemple.localhost.crt
ciphers: DEFAULT:!SSLv2:!SSLv3:!TLSv1
tm_options:
- dont-insert-empty-fragments
- single-dh-use
- no-sslv2
- no-sslv3
- no-tlsv1
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import range
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
alert_timeout=dict(type='int'),
allow_non_ssl=dict(type='str', choices=F5_ACTIVATION_CHOICES),
app_service=dict(type='str'),
authenticate=dict(type='str'),
authenticate_depth=dict(type='int'),
ca_file=dict(type='str'),
cache_size=dict(type='int'),
cache_timeout=dict(type='int', choices=range(0, 86401)),
cert=dict(type='str'),
# cert_extension_includes=dict(type='list'),
# cert_key_chain=dict(type='list'),
cert_lookup_by_ipaddr_port=dict(type='str'),
chain=dict(type='str'),
ciphers=dict(type='str'),
client_cert_ca=dict(type='str'),
crl_file=dict(type='str'),
defaults_from=dict(type='str'),
description=dict(type='str'),
handshake_timeout=dict(type='int'),
key=dict(type='str'),
mod_ssl_methods=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mode=dict(type='str', choices=F5_ACTIVATION_CHOICES),
tm_options=dict(type='list'),
passphrase=dict(type='str', no_log=True),
peer_cert_mode=dict(type='str', choices=['ignore', 'require']),
peer_no_renegotiate_timeout=dict(type='int'),
proxy_ssl=dict(type='str', choices=F5_ACTIVATION_CHOICES),
proxy_ssl_passthrough=dict(type='str', choices=F5_ACTIVATION_CHOICES),
proxy_ca_cert=dict(type='str'),
proxy_ca_key=dict(type='str'),
proxy_ca_passphrase=dict(type='str', no_log=True),
renegotiate_max_record_delay=dict(type='str'),
renegotiate_period=dict(type='str'),
renegotiate_size=dict(type='str'),
renegotiation=dict(type='str', choices=F5_ACTIVATION_CHOICES),
retain_certificate=dict(type='bool'),
secure_renegotiation=dict(type='str'),
max_renegotiations_per_minute=dict(type='int'),
server_name=dict(type='str'),
session_mirroring=dict(type='str'),
session_ticket=dict(type='str'),
sni_default=dict(type='bool'),
sni_require=dict(type='str', choices=F5_ACTIVATION_CHOICES),
source_ip_blacklist=dict(type='str'),
source_ip_whitelist=dict(type='str'),
ssl_forward_proxy=dict(type='str', choices=F5_ACTIVATION_CHOICES),
ssl_forward_proxy_bypass=dict(type='str', choices=F5_ACTIVATION_CHOICES),
strict_resume=dict(type='str', choices=F5_ACTIVATION_CHOICES),
unclean_shutdown=dict(type='str', choices=F5_ACTIVATION_CHOICES),
generic_alert=dict(type='str', choices=F5_ACTIVATION_CHOICES),
ssl_sign_hash=dict(type='str')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileClientSsl(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.client_ssls.client_ssl.create,
'read': self._api.tm.ltm.profile.client_ssls.client_ssl.load,
'update': self._api.tm.ltm.profile.client_ssls.client_ssl.update,
'delete': self._api.tm.ltm.profile.client_ssls.client_ssl.delete,
'exists': self._api.tm.ltm.profile.client_ssls.client_ssl.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmProfileClientSsl(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| GabrielFortin/ansible-module-f5bigip | library/f5bigip_ltm_profile_client_ssl.py | Python | apache-2.0 | 16,293 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['LinearTrend'] , ['Seasonal_DayOfMonth'] , ['SVR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_LinearTrend_Seasonal_DayOfMonth_SVR.py | Python | bsd-3-clause | 160 |
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.domain.dispute.dispute_response import DisputeResponse
from ingenico.connect.sdk.domain.payment.payment_response import PaymentResponse
from ingenico.connect.sdk.domain.refund.refund_response import RefundResponse
from ingenico.connect.sdk.domain.payout.payout_response import PayoutResponse
from ingenico.connect.sdk.domain.token.token_response import TokenResponse
class WebhooksEvent(DataObject):
__api_version = None
__id = None
__created = None
__merchant_id = None
__type = None
__payment = None
__refund = None
__payout = None
__token = None
__dispute = None
@property
def api_version(self):
return self.__api_version
@api_version.setter
def api_version(self, api_version):
self.__api_version = api_version
@property
def id(self):
return self.__id
@id.setter
def id(self, id):
self.__id = id
@property
def created(self):
return self.__created
@created.setter
def created(self, created):
self.__created = created
@property
def merchant_id(self):
return self.__merchant_id
@merchant_id.setter
def merchant_id(self, mercahant_id):
self.__merchant_id = mercahant_id
@property
def type(self):
return self.__type
@type.setter
def type(self, type):
self.__type = type
@property
def payment(self):
return self.__payment
@payment.setter
def payment(self, payment):
self.__payment = payment
@property
def refund(self):
return self.__refund
@refund.setter
def refund(self, refund):
self.__refund = refund
@property
def payout(self):
return self.__payout
@payout.setter
def payout(self, payout):
self.__payout = payout
@property
def token(self):
return self.__token
@token.setter
def token(self, token):
self.__token = token
@property
def dispute(self):
return self.__dispute
@dispute.setter
def dispute(self, dispute):
self.__dispute = dispute
def to_dictionary(self):
dictionary = super(WebhooksEvent, self).to_dictionary()
if self.__api_version is not None:
dictionary['apiVersion'] = self.__api_version
if self.__id is not None:
dictionary['id'] = self.__id
if self.__created is not None:
dictionary['created'] = self.__created
if self.__merchant_id is not None:
dictionary['merchantId'] = self.__merchant_id
if self.__type is not None:
dictionary['type'] = self.__type
if self.__payment is not None:
dictionary['payment'] = self.__payment
if self.__refund is not None:
dictionary['refund'] = self.__refund
if self.__payout is not None:
dictionary['payout'] = self.__payout
if self.__token is not None:
dictionary['token'] = self.__token
if self.__dispute is not None:
dictionary['dispute'] = self.__dispute
return dictionary
def from_dictionary(self, dictionary):
super(WebhooksEvent, self).from_dictionary(dictionary)
if 'apiVersion' in dictionary:
self.__api_version = dictionary['apiVersion']
if 'id' in dictionary:
self.__id = dictionary['id']
if 'created' in dictionary:
self.__created = dictionary['created']
if 'merchantId' in dictionary:
self.__merchant_id = dictionary['merchantId']
if 'type' in dictionary:
self.__type = dictionary['type']
if 'payment' in dictionary:
if not isinstance(dictionary['payment'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['payment']))
value = PaymentResponse()
self.__payment = value.from_dictionary(dictionary['payment'])
if 'payout' in dictionary:
if not isinstance(dictionary['payout'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['payout']))
value = PayoutResponse()
self.__payout = value.from_dictionary(dictionary['payout'])
if 'refund' in dictionary:
if not isinstance(dictionary['refund'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['refund']))
value = RefundResponse()
self.__refund = value.from_dictionary(dictionary['refund'])
if 'token' in dictionary:
if not isinstance(dictionary['token'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['token']))
value = TokenResponse()
self.__token = value.from_dictionary(dictionary['token'])
if 'dispute' in dictionary:
if not isinstance(dictionary['dispute'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['dispute']))
value = DisputeResponse()
self.__dispute = value.from_dictionary(dictionary['dispute'])
return self
| Ingenico-ePayments/connect-sdk-python2 | ingenico/connect/sdk/domain/webhooks/web_hooks_event.py | Python | mit | 5,274 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import urllib2
from util.global_def import get_msg
from util.message import Msg
def check_access_status():
print(get_msg(Msg.check_network_connection))
try:
urllib2.urlopen('http://google.com', timeout=3)
print(get_msg(Msg.network_status_succeed))
return True
except urllib2.URLError:
pass
print(get_msg(Msg.network_status_fail))
return False
__CHECKED = False
__REACHABLE = False
def reachable():
global __CHECKED, __REACHABLE
if not __CHECKED:
__REACHABLE = check_access_status()
__CHECKED = True
return __REACHABLE
if __name__ == '__main__':
print(reachable())
| r-kan/reminder | util/network.py | Python | mit | 759 |
"""
Return types from api classes interface for the SOAP kegg api.
"""
from datetime import datetime
from collections import namedtuple
from operator import methodcaller
Definition = namedtuple(
"Definition",
["entry_id",
"definition"]
)
def _Definition_from_items(items):
""" Definition 'items' tuple from a list of items
"""
items_list = []
for name, val in items:
if isinstance(name, list):
name = name[0]
if isinstance(val, list):
val = val[0]
items_list.append((str(name), str(val)))
return Definition(**dict(items_list))
def _Definition_from_str(text):
"""
Return a `Definition` item by parsing a tab separated string `text`
i.e. text must be of the form '<entry_id>\t<definition>'
"""
return Definition(*text.split("\t", 1))
Definition.from_items = staticmethod(_Definition_from_items)
Definition.from_str = staticmethod(_Definition_from_str)
OrganismSummary = namedtuple(
"OrganismSummary",
["entry_id",
"org_code",
"name",
"lineage"],
)
def OrganismSummary_from_str(string):
#string = string.decode("utf8")
return OrganismSummary(*string.split("\t"))
OrganismSummary.from_str = staticmethod(OrganismSummary_from_str)
BInfo = namedtuple(
'BInfo',
["entry_id",
"definition",
"name",
"release",
"curator",
"contents",
"last_update",
"supported_formats"]
)
def _BInfo_from_text(text):
""" Parse the return string from info into a new BInfo instance.
"""
lines = text.splitlines()
name, definition = lines[0].split(" ", 1)
definition = definition.strip()
entry_id, release = lines[1].split(" ", 1)
_, release = release.strip().split(" ", 1)
curator = lines[2].strip()
contents = "\n".join(map(methodcaller("strip"), lines[3:]))
return BInfo(entry_id, definition, name, release, curator,
contents, None, None)
BInfo.from_text = staticmethod(_BInfo_from_text)
Link = namedtuple("Link", ["entry_id1", "entry_id2"])
SSDBRelation = namedtuple(
"SSDBRelation", ["genes_id1",
"genes_id2",
"sw_score",
"bit_score",
"identity",
"overlap",
"start_position1",
"end_position1",
"start_position2",
"end_position2",
"best_flag_1to2",
"best_flag_2to1",
"definition1",
"definition2",
"length1",
"length2"
])
MotifResult = namedtuple(
"MotifResult", ["motif_id",
"definition",
"genes_id",
"start_position",
"end_position",
"score",
"evalue"
])
LinkDBRelation = namedtuple(
"LinkDBRelation", ["entry_id1",
"entry_id2",
"type",
"path"
])
PathwayElement = namedtuple(
"PathwayElement", ["element_id",
"type",
"names",
"components"
])
PathwayElementRelation = namedtuple(
"PathwayElementRelation", ["element_id1",
"element_id2",
"type",
"subtypes"
])
Subtype = namedtuple(
"Subtype", ["element_id",
"relation",
"type",
])
| tomazc/orange-bio | orangecontrib/bio/kegg/types.py | Python | gpl-3.0 | 3,716 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from inspire_dojson.utils import get_record_ref, get_recid_from_ref
from inspire_matcher import match
from inspire_utils.dedupers import dedupe_list
from inspire_utils.record import get_value
from inspirehep.modules.refextract import config
def _add_match_to_reference(reference, matched_recid, es_index):
"""Modifies a reference to include its record id."""
if es_index == 'records-data':
reference['record'] = get_record_ref(matched_recid, 'data')
elif es_index == 'records-hep':
reference['record'] = get_record_ref(matched_recid, 'literature')
def match_reference_with_config(reference, config, previous_matched_recid=None):
"""Match a reference using inspire-matcher given the config.
Args:
reference (dict): the metadata of the reference.
config (dict): the list of inspire-matcher configurations for queries.
previous_matched_recid (int): the record id of the last matched
reference from the list of references.
Returns:
dict: the matched reference.
"""
# XXX: avoid this type casting.
try:
reference['reference']['publication_info']['year'] = str(
reference['reference']['publication_info']['year'])
except KeyError:
pass
matched_recids = [matched_record['_source']['control_number'] for matched_record in match(reference, config)]
matched_recids = dedupe_list(matched_recids)
same_as_previous = any(matched_recid == previous_matched_recid for matched_recid in matched_recids)
if len(matched_recids) == 1:
_add_match_to_reference(reference, matched_recids[0], config['index'])
elif same_as_previous:
_add_match_to_reference(reference, previous_matched_recid, config['index'])
# XXX: avoid this type casting.
try:
reference['reference']['publication_info']['year'] = int(
reference['reference']['publication_info']['year'])
except KeyError:
pass
return reference
def match_reference(reference, previous_matched_recid=None):
"""Match a reference using inspire-matcher.
Args:
reference (dict): the metadata of a reference.
previous_matched_recid (int): the record id of the last matched
reference from the list of references.
Returns:
dict: the matched reference.
"""
if reference.get('curated_relation'):
return reference
config_unique_identifiers = config.REFERENCE_MATCHER_UNIQUE_IDENTIFIERS_CONFIG
config_texkey = config.REFERENCE_MATCHER_TEXKEY_CONFIG
config_default_publication_info = config.REFERENCE_MATCHER_DEFAULT_PUBLICATION_INFO_CONFIG
config_jcap_and_jhep_publication_info = config.REFERENCE_MATCHER_JHEP_AND_JCAP_PUBLICATION_INFO_CONFIG
config_data = config.REFERENCE_MATCHER_DATA_CONFIG
journal_title = get_value(reference, 'reference.publication_info.journal_title')
config_publication_info = config_jcap_and_jhep_publication_info if \
journal_title in ['JCAP', 'JHEP'] else config_default_publication_info
configs = [config_unique_identifiers, config_publication_info, config_texkey, config_data]
matches = (match_reference_with_config(reference, config, previous_matched_recid) for config in configs)
matches = (matched_record for matched_record in matches if 'record' in matched_record)
reference = next(matches, reference)
return reference
def match_references(references):
"""Match references to their respective records in INSPIRE.
Args:
references (list): the list of references.
Returns:
list: the matched references.
"""
matched_references, previous_matched_recid = [], None
for ref in references:
ref = match_reference(ref, previous_matched_recid)
matched_references.append(ref)
if 'record' in ref:
previous_matched_recid = get_recid_from_ref(ref['record'])
return matched_references
| inspirehep/inspire-next | inspirehep/modules/refextract/matcher.py | Python | gpl-3.0 | 4,916 |
import logging
import functools
import collections
import idna
import regex
import unicodedata
import synapse.exc as s_exc
import synapse.data as s_data
import synapse.lib.crypto.coin as s_coin
logger = logging.getLogger(__name__)
tldlist = list(s_data.get('iana.tlds'))
tldlist.extend([
'bit',
'onion',
])
tldlist.sort(key=lambda x: len(x))
tldlist.reverse()
tldcat = '|'.join(tldlist)
fqdn_re = regex.compile(r'((?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))' % tldcat)
idna_disallowed = r'\$+<->\^`|~\u00A8\u00AF\u00B4\u00B8\u02D8-\u02DD\u037A\u0384\u0385\u1FBD\u1FBF-\u1FC1\u1FCD-\u1FCF\u1FDD-\u1FDF\u1FED-\u1FEF\u1FFD\u1FFE\u207A\u207C\u208A\u208C\u2100\u2101\u2105\u2106\u2474-\u24B5\u2A74-\u2A76\u2FF0-\u2FFB\u309B\u309C\u3200-\u321E\u3220-\u3243\u33C2\u33C7\u33D8\uFB29\uFC5E-\uFC63\uFDFA\uFDFB\uFE62\uFE64-\uFE66\uFE69\uFE70\uFE72\uFE74\uFE76\uFE78\uFE7A\uFE7C\uFE7E\uFF04\uFF0B\uFF1C-\uFF1E\uFF3E\uFF40\uFF5C\uFF5E\uFFE3\uFFFC\uFFFD\U0001F100-\U0001F10A\U0001F110-\U0001F129\U000E0100-\U000E01EF'
udots = regex.compile(r'[\u3002\uff0e\uff61]')
# avoid thread safety issues due to uts46_remap() importing uts46data
idna.encode('init', uts46=True)
inverse_prefixs = {
'[': ']',
'<': '>',
'{': '}',
'(': ')',
}
def fqdn_prefix_check(match: regex.Match):
mnfo = match.groupdict()
valu = mnfo.get('valu')
prefix = mnfo.get('prefix')
cbfo = {}
if prefix is not None:
new_valu = valu.rstrip(inverse_prefixs.get(prefix))
if new_valu != valu:
valu = new_valu
cbfo['match'] = valu
return valu, cbfo
def fqdn_check(match: regex.Match):
mnfo = match.groupdict()
valu = mnfo.get('valu')
nval = unicodedata.normalize('NFKC', valu)
nval = regex.sub(udots, '.', nval)
nval = nval.strip().strip('.')
try:
idna.encode(nval, uts46=True).decode('utf8')
except idna.IDNAError:
try:
nval.encode('idna').decode('utf8').lower()
except UnicodeError:
return None, {}
return valu, {}
# these must be ordered from most specific to least specific to allow first=True to work
scrape_types = [ # type: ignore
('inet:url', r'(?P<prefix>[\\{<\(\[]?)(?P<valu>[a-zA-Z][a-zA-Z0-9]*://(?(?=[,.]+[ \'\"\t\n\r\f\v])|[^ \'\"\t\n\r\f\v])+)',
{'callback': fqdn_prefix_check}),
('inet:email', r'(?=(?:[^a-z0-9_.+-]|^)(?P<valu>[a-z0-9_\.\-+]{1,256}@(?:[a-z0-9_-]{1,63}\.){1,10}(?:%s))(?:[^a-z0-9_.-]|[.\s]|$))' % tldcat, {}),
('inet:server', r'(?P<valu>(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):[0-9]{1,5})', {}),
('inet:ipv4', r'(?P<valu>(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))', {}),
('inet:fqdn', r'(?=(?:[^\p{L}\p{M}\p{N}\p{S}\u3002\uff0e\uff61_.-]|^|[' + idna_disallowed + '])(?P<valu>(?:((?![' + idna_disallowed + r'])[\p{L}\p{M}\p{N}\p{S}_-]){1,63}[\u3002\uff0e\uff61\.]){1,10}(?:' + tldcat + r'))(?:[^\p{L}\p{M}\p{N}\p{S}\u3002\uff0e\uff61_.-]|[\u3002\uff0e\uff61.]([\p{Z}\p{Cc}]|$)|$|[' + idna_disallowed + r']))', {'callback': fqdn_check}),
('hash:md5', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>[A-Fa-f0-9]{32})(?:[^A-Za-z0-9]|$))', {}),
('hash:sha1', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>[A-Fa-f0-9]{40})(?:[^A-Za-z0-9]|$))', {}),
('hash:sha256', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>[A-Fa-f0-9]{64})(?:[^A-Za-z0-9]|$))', {}),
('it:sec:cve', r'(?:[^a-z0-9]|^)(?P<valu>CVE-[0-9]{4}-[0-9]{4,})(?:[^a-z0-9]|$)', {}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>[1][a-zA-HJ-NP-Z0-9]{25,39})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.btc_base58_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>3[a-zA-HJ-NP-Z0-9]{33})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.btc_base58_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>(bc|bcrt|tb)1[qpzry9x8gf2tvdw0s3jn54khce6mua7l]{3,71})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.btc_bech32_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>0x[A-Fa-f0-9]{40})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.eth_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>(bitcoincash|bchtest):[qpzry9x8gf2tvdw0s3jn54khce6mua7l]{42})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.bch_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>[xr][a-zA-HJ-NP-Z0-9]{25,46})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.xrp_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>[1a-z][a-zA-HJ-NP-Z0-9]{46,47})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.substrate_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>(DdzFF|Ae2td)[a-zA-HJ-NP-Z0-9]{54,99})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.cardano_byron_check}),
('crypto:currency:address', r'(?=(?:[^A-Za-z0-9]|^)(?P<valu>addr1[qpzry9x8gf2tvdw0s3jn54khce6mua7l]{53})(?:[^A-Za-z0-9]|$))',
{'callback': s_coin.cardano_shelly_check}),
]
_regexes = collections.defaultdict(list)
for (name, rule, opts) in scrape_types:
blob = (regex.compile(rule, regex.IGNORECASE), opts)
_regexes[name].append(blob)
def getForms():
'''
Get a list of forms recognized by the scrape APIs.
Returns:
list: A list of form values.
'''
return sorted(_regexes.keys())
FANGS = {
'hxxp:': 'http:',
'hxxps:': 'https:',
'hxxp[:]': 'http:',
'hxxps[:]': 'https:',
'hxxp[://]': 'http://',
'hxxps[://]': 'https://',
'hxxp(:)': 'http:',
'hxxps(:)': 'https:',
'[.]': '.',
'[.]': '.',
'[。]': '。',
'[。]': '。',
'(.)': '.',
'(.)': '.',
'(。)': '。',
'(。)': '。',
'[:]': ':',
'fxp': 'ftp',
'fxps': 'ftps',
'[at]': '@',
'[@]': '@',
}
def genFangRegex(fangs, flags=regex.IGNORECASE):
# Fangs must be matches of equal or smaller length in order for the
# contextScrape API to function.
for src, dst in fangs.items():
if len(dst) > len(src):
raise s_exc.BadArg(mesg=f'fang dst[{dst}] must be <= in length to src[{src}]',
src=src, dst=dst)
restr = "|".join(map(regex.escape, fangs.keys()))
re = regex.compile(restr, flags)
return re
re_fang = genFangRegex(FANGS)
def refang_text(txt):
'''
Remove address de-fanging in text blobs, .e.g. example[.]com to example.com
Matches to keys in FANGS is case-insensitive, but replacement will always be
with the lowercase version of the re-fanged value.
For example, HXXP://FOO.COM will be returned as http://FOO.COM
Returns:
(str): Re-fanged text blob
'''
return re_fang.sub(lambda match: FANGS[match.group(0).lower()], txt)
def _refang2_func(match: regex.Match, offsets: dict, fangs: dict):
# This callback exploits the fact that known de-fanging strategies either
# do in-place transforms, or transforms which increase the target string
# size. By re-fanging, we are compressing the old string into a new string
# of potentially a smaller size. We record the offset where any transform
# affects the contents of the string. This means, downstream, we can avoid
# have to go back to the source text if there were **no** transforms done.
# This relies on the prior assertions of refang sizing.
group = match.group(0)
ret = fangs[group.lower()]
rlen = len(ret)
mlen = len(group)
span = match.span(0)
consumed = offsets.get('_consumed', 0)
offs = span[0] - consumed
nv = mlen - rlen
# For offsets, we record the nv + 1 since the now-compressed string
# has one character represented by mlen - rlen + 1 characters in the
# original string.
offsets[offs] = nv + 1
offsets['_consumed'] = consumed + nv
return ret
def refang_text2(txt: str, re: regex.Regex =re_fang, fangs: dict =FANGS):
'''
Remove address de-fanging in text blobs, .e.g. example[.]com to example.com
Notes:
Matches to keys in FANGS is case-insensitive, but replacement will
always be with the lowercase version of the re-fanged value.
For example, ``HXXP://FOO.COM`` will be returned as ``http://FOO.COM``
Args:
txt (str): The text to re-fang.
Returns:
tuple(str, dict): A tuple containing the new text, and a dictionary
containing offset information where the new text was altered with
respect to the original text.
'''
# The _consumed key is a offset used to track how many chars have been
# consumed while the cb is called. This is because the match group
# span values are based on their original string locations, and will not
# produce values which can be cleanly mapped backwards.
offsets = {'_consumed': 0}
cb = functools.partial(_refang2_func, offsets=offsets, fangs=fangs)
# Start applying FANGs and modifying the info to match the output
ret = re.sub(cb, txt)
# Remove the _consumed key since it is no longer useful for later use.
offsets.pop('_consumed')
return ret, offsets
def _rewriteRawValu(text: str, offsets: dict, info: dict):
# Our match offset. This is the match offset value into the refanged text.
offset = info.get('offset')
# We need to see if there are values in the offsets which are less than our
# match offset and increment our base offset by them. This gives us a
# shift into the original string where we would find the actual offset.
# This can be represented as a a comprehension but I find that the loop
# below is easier to read.
baseoff = 0
for k, v in offsets.items():
if k < offset:
baseoff = baseoff + offsets[k] - 1
# If our return valu is not a str, then base our text recovery on the
# original regex matched valu.
valu = info.get('valu')
if not isinstance(valu, str):
valu = info.get('match')
# Start enumerating each character in our valu, incrementing the end_offset
# by 1, or the recorded offset difference in offsets dictionary.
end_offset = offset
for i, c in enumerate(valu, start=offset):
end_offset = end_offset + offsets.get(i, 1)
# Extract a new match and push the match and new offset into info
match = text[baseoff + offset: baseoff + end_offset]
info['match'] = match
info['offset'] = baseoff + offset
def genMatches(text: str, regx: regex.Regex, opts: dict):
'''
Generate regular expression matches for a blob of text.
Args:
text (str): The text to generate matches for.
regx (regex.Regex): A compiled regex object. The regex must contained a named match group for ``valu``.
opts (dict): An options dictionary.
Notes:
The dictionaries yielded by this function contains the following keys:
raw_valu
The raw matching text found in the input text.
offset
The offset into the text where the match was found.
valu
The resulting value - this may be altered by callbacks.
The options dictionary can contain a ``callback`` key. This function is expected to take a single argument,
a regex.Match object, and return a tuple of the new valu and info dictionary. The new valu is used as the
``valu`` key in the returned dictionary, and any other information in the info dictionary is pushed into
the return dictionary as well.
Yields:
dict: A dictionary of match results.
'''
cb = opts.get('callback')
for valu in regx.finditer(text): # type: regex.Match
raw_span = valu.span('valu')
raw_valu = valu.group('valu')
info = {
'match': raw_valu,
'offset': raw_span[0]
}
if cb:
# CB is expected to return a tufo of <new valu, info>
valu, cbfo = cb(valu)
if valu is None:
continue
# Smash cbfo into our info dict
info.update(**cbfo)
else:
valu = raw_valu
info['valu'] = valu
yield info
def contextScrape(text, form=None, refang=True, first=False):
'''
Scrape types from a blob of text and yield info dictionaries.
Args:
text (str): Text to scrape.
ptype (str): Optional ptype to scrape. If present, only scrape items which match the provided type.
refang (bool): Whether to remove de-fanging schemes from text before scraping.
first (bool): If true, only yield the first item scraped.
Notes:
The dictionaries yielded by this function contains the following keys:
match
The raw matching text found in the input text.
offset
The offset into the text where the match was found.
valu
The resulting value.
form
The corresponding form for the valu.
Returns:
(dict): Yield info dicts of results.
'''
scrape_text = text
offsets = {}
if refang:
scrape_text, offsets = refang_text2(text)
for ruletype, blobs in _regexes.items():
if form and form != ruletype:
continue
for (regx, opts) in blobs:
for info in genMatches(scrape_text, regx, opts):
info['form'] = ruletype
if refang and offsets:
_rewriteRawValu(text, offsets, info)
yield info
if first:
return
def scrape(text, ptype=None, refang=True, first=False):
'''
Scrape types from a blob of text and return node tuples.
Args:
text (str): Text to scrape.
ptype (str): Optional ptype to scrape. If present, only scrape items which match the provided type.
refang (bool): Whether to remove de-fanging schemes from text before scraping.
first (bool): If true, only yield the first item scraped.
Returns:
(str, object): Yield tuples of node ndef values.
'''
for info in contextScrape(text, form=ptype, refang=refang, first=first):
yield info.get('form'), info.get('valu')
| vertexproject/synapse | synapse/lib/scrape.py | Python | apache-2.0 | 14,179 |
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
"""Helper functions to graft the current UI on to the new data model."""
import json
from django.urls import reverse
from django.utils.http import urlquote_plus
def is_dependent(subtitle_language):
"""Return whether the language is "dependent" on another one.
Basically, whether it's a translation or not.
"""
return subtitle_language.get_translation_source_language_code() != None
def get_widget_url(subtitle_language, mode=None, task_id=None):
# duplicates
# unisubs.widget.SubtitleDialogOpener.prototype.openDialogOrRedirect_
video = subtitle_language.video
video_url = video.get_video_url()
config = {
"videoID": video.video_id,
"videoURL": video_url,
"effectiveVideoURL": video_url,
"languageCode": subtitle_language.language_code,
"subLanguagePK": subtitle_language.pk,
"originalLanguageCode": video.language,
"mode": mode,
"task": task_id,
}
if is_dependent(subtitle_language):
config['baseLanguageCode'] = subtitle_language.get_translation_source_language_code()
return (reverse('onsite_widget') +
'?config=' + urlquote_plus(json.dumps(config)))
| pculture/unisubs | apps/subtitles/shims.py | Python | agpl-3.0 | 1,976 |
class ZillowError(Exception):
def __init__(self, response):
super(ZillowError, self).__init__(
"There was a problem with your request. Status code {}, Content {}".
format(response.status_code, response.content)
)
| ahlusar1989/flowzillow | flowzillow/exceptions.py | Python | gpl-2.0 | 259 |
import os
import pytest
from pkgstack.profile import Profile
TESTS_PATH = os.path.realpath(os.path.dirname(__file__))
def test_profile_create():
config = Profile(os.path.join(TESTS_PATH, 'resources/sample.yml')).config
assert config == [
{'install': 'pytest', 'stage': 'test'},
{'name': 'Install pytest-cov', 'install': 'pytest-cov', 'stage': 'test'},
{'name': 'Install codecov', 'install': 'codecov', 'alternatives': ['test1', 'test2'], 'stage': 'test'},
{'name': 'Install dtguess', 'install': 'dtguess==0.1.3'},
{'install': 'dtguess==0.1.3',
'alternatives': ['https://github.com/ownport/dtguess/releases/download/v0.1.3/dtguess-0.1.3.tar.gz'],
}
]
def test_process():
assert Profile(os.path.join(TESTS_PATH, 'resources/sample.yml')).process() == {
'packages.successed': 1,
'packages.failed': 1,
'packages.total': 5
}
def test_profile_process_via_stage():
assert Profile(os.path.join(TESTS_PATH, 'resources/sample.yml'), stages=['test',]).process() == {
'packages.successed': 5,
'packages.failed': 0,
'packages.total': 5
}
def test_profile_incorrect_stage_type():
with pytest.raises(RuntimeError):
p = Profile(os.path.join(TESTS_PATH, 'resources/sample.yml'), stages='test')
def test_profile_no_deps():
config = Profile(os.path.join(TESTS_PATH, 'resources/sample-no-deps.yml')).config
assert config == [
{'install': 'requests', 'name': 'install requests package w/o deps', 'no-deps': True}
]
def test_process_profile_no_deps():
assert Profile(os.path.join(TESTS_PATH, 'resources/sample-no-deps.yml')).process() == {
'packages.successed': 1,
'packages.failed': 0,
'packages.total': 1
}
| ownport/pkgstack | tests/test_profile.py | Python | mit | 1,807 |
# import pydrakeik first. This is a workaround for the issue:
# https://github.com/RobotLocomotion/director/issues/467
from director import pydrakeik
import director
from director import robotsystem
from director.consoleapp import ConsoleApp
from director import transformUtils
from director import robotstate
from director import ikplanner
from director import visualization as vis
from director import objectmodel as om
import numpy as np
import time
import itertools
def onMatlabStartup(ikServer, startSuccess):
assert startSuccess
runTest()
def computeIk(goalFrame, constraints, ikParameters, seedPoseName, nominalPoseName):
constraints[-2].referenceFrame = goalFrame
constraints[-1].quaternion = goalFrame
cs = ikplanner.ConstraintSet(robotSystem.ikPlanner, constraints, '', '')
cs.seedPoseName = seedPoseName
cs.nominalPoseName = nominalPoseName
return cs.runIk()
def runTest():
side = 'left'
testTolerances = False
renderAllSamples = True
randomizeSamples = True
samplesPerJoint = 10
jointLimitPadding = np.radians(5)
ikPlanner = robotSystem.ikPlanner
jointController = robotSystem.robotStateJointController
robotModel = robotSystem.robotStateModel
if app.getTestingEnabled():
samplesPerJoint = 2
jointGroup = str('%s Arm' % side).title()
jointNames = ikPlanner.getJointGroup(jointGroup)
jointIndices = [robotstate.getDrakePoseJointNames().index(name) for name in jointNames]
jointLimits = np.array([robotModel.model.getJointLimits(jointName) for jointName in jointNames])
otherJoints = [name for name in robotstate.getDrakePoseJointNames() if name not in jointNames]
jointSamples = []
for name, limit in zip(jointNames, jointLimits):
jointMin = limit[0] + jointLimitPadding
jointMax = limit[1] - jointLimitPadding
samples, spacing = np.linspace(jointMin, jointMax, samplesPerJoint, retstep=True)
jointSamples.append(samples)
print 'joint name:', name
print 'joint range: [%.4f, %.4f]' % (limit[0], limit[1])
print 'joint number of samples:', samplesPerJoint
print 'joint sample spacing: %.4f' % spacing
totalSamples = np.product([len(x) for x in jointSamples])
print 'total number of samples:', totalSamples
allSamples = list(itertools.product(*jointSamples))
if randomizeSamples:
np.random.shuffle(allSamples)
if 'endEffectorConfig' in robotSystem.directorConfig:
linkName = robotSystem.directorConfig['endEffectorConfig']['endEffectorLinkNames'][0]
else:
linkName = ikPlanner.getHandLink(side)
linkFrame = robotModel.getLinkFrame(linkName)
constraints = []
constraints.append(ikPlanner.createPostureConstraint('q_nom', otherJoints))
constraints.extend(ikPlanner.createSixDofLinkConstraints(jointController.q, linkName))
def setTolerance(distance, angleInDegrees):
constraints[-1].angleToleranceInDegrees = angleInDegrees
constraints[-2].upperBound = np.ones(3)*distance
constraints[-2].lowerBound = np.ones(3)*-distance
setTolerance(0.005, 0.5)
ikParameters = ikplanner.IkParameters()
ikParameters.setToDefaults()
ikParameters.majorIterationsLimit = 10000
ikParameters.majorOptimalityTolerance = 1e-4
ikParameters.majorFeasibilityTolerance = 1e-6
#seedPoseName = 'q_nom'
#nominalPoseName = 'q_nom'
seedPoseName = 'sample_pose'
nominalPoseName = 'sample_pose'
print
print 'constraints:'
print
print constraints[-2]
print
print constraints[-1]
print
print ikParameters
print
print 'seed pose name:', seedPoseName
print 'nominal pose name:', nominalPoseName
print
ikPlanner.addPose(jointController.q, 'sample_pose')
endPose, info = computeIk(linkFrame, constraints, ikParameters, seedPoseName, nominalPoseName)
assert info == 1
assert np.allclose(endPose, jointController.q)
q = jointController.q.copy()
nom_sample = q[jointIndices].copy()
sampleCount = 0
totalSampleCount = 0
badSampleCount = 0
sampleL2NormAccum = 0.0
startTime = time.time()
for sample in allSamples:
sampleCount += 1
totalSampleCount += 1
dist = np.linalg.norm(sample - nom_sample)
sampleL2NormAccum += dist
q[jointIndices] = sample
jointController.setPose('sample_pose', q)
ikPlanner.addPose(q, 'sample_pose')
if renderAllSamples:
view.forceRender()
targetFrame = robotModel.getLinkFrame(linkName)
#pos, quat = transformUtils.poseFromTransform(frame)
endPose, info = computeIk(targetFrame, constraints, ikParameters, seedPoseName, nominalPoseName)
if info >= 10:
print
print 'bad info:', info
jointController.addPose('bad', endPose)
print 'sample num:', totalSampleCount
print 'sample:', sample
print
badSampleCount += 1
errorRate = badSampleCount/float(totalSampleCount)
print 'error rate: %.2f' % errorRate
print 'avg pose l2 norm:', sampleL2NormAccum/totalSampleCount
if testTolerances:
succeeded = False
for tol in [(0.01, 1), (0.01, 2), (0.02, 2), (0.02, 3), (0.03, 3), (0.03, 5), (0.04, 5), (0.05, 5), (0.1, 10), (0.2, 20)]:
print 'retry tolerance:', tol
setTolerance(tol[0], tol[1])
endPose, info = computeIk(frame, constraints, ikParameters, seedPoseName, nominalPoseName)
if info < 10:
succeeded = True
print 'Worked!'
break
setTolerance(0.005, 0.5)
if not succeeded:
print 'Giving up after retries.'
continue
timeNow = time.time()
elapsed = timeNow - startTime
if elapsed > 1.0:
view.forceRender()
print '%d samples/sec' % (sampleCount / elapsed), '%d total samples' % totalSampleCount
startTime = timeNow
sampleCount = 0
if app.getTestingEnabled():
assert badSampleCount == 0
app.quit()
app = ConsoleApp()
app.setupGlobals(globals())
view = app.createView()
view.show()
robotSystem = robotsystem.create(view, planningOnly=True)
view.resetCamera()
if robotSystem.ikPlanner.planningMode == 'pydrake':
robotSystem.ikPlanner.plannerPub._setupLocalServer()
runTest()
elif robotSystem.ikPlanner.planningMode == 'matlabdrake':
robotSystem.ikServer.connectStartupCompleted(onMatlabStartup)
robotSystem.startIkServer()
app.start(enableAutomaticQuit=False)
# after the app starts, runTest() will be called by onMatlabStartup
| patmarion/director | src/python/tests/testEndEffectorIk.py | Python | bsd-3-clause | 6,875 |
from __future__ import absolute_import
from typing import Any, Iterable, Mapping, Optional, Set, Tuple
from six import text_type
from zerver.lib.initial_password import initial_password
from zerver.models import Realm, Stream, UserProfile, Huddle, \
Subscription, Recipient, Client, get_huddle_hash, email_to_domain
from zerver.lib.create_user import create_user_profile
def bulk_create_realms(realm_list):
# type: (Iterable[text_type]) -> None
existing_realms = set(r.domain for r in Realm.objects.select_related().all())
realms_to_create = [] # type: List[Realm]
for domain in realm_list:
if domain not in existing_realms:
realms_to_create.append(Realm(domain=domain, name=domain))
existing_realms.add(domain)
Realm.objects.bulk_create(realms_to_create)
def bulk_create_users(realm, users_raw, bot_type=None, tos_version=None):
# type: (Realm, Set[Tuple[text_type, text_type, text_type, bool]], Optional[int], Optional[text_type]) -> None
"""
Creates and saves a UserProfile with the given email.
Has some code based off of UserManage.create_user, but doesn't .save()
"""
existing_users = frozenset(UserProfile.objects.values_list('email', flat=True))
users = sorted([user_raw for user_raw in users_raw if user_raw[0] not in existing_users])
# Now create user_profiles
profiles_to_create = [] # type: List[UserProfile]
for (email, full_name, short_name, active) in users:
profile = create_user_profile(realm, email,
initial_password(email), active, bot_type,
full_name, short_name, None, False, tos_version)
profiles_to_create.append(profile)
UserProfile.objects.bulk_create(profiles_to_create)
profiles_by_email = {} # type: Dict[text_type, UserProfile]
profiles_by_id = {} # type: Dict[int, UserProfile]
for profile in UserProfile.objects.select_related().all():
profiles_by_email[profile.email] = profile
profiles_by_id[profile.id] = profile
recipients_to_create = [] # type: List[Recipient]
for (email, full_name, short_name, active) in users:
recipients_to_create.append(Recipient(type_id=profiles_by_email[email].id,
type=Recipient.PERSONAL))
Recipient.objects.bulk_create(recipients_to_create)
recipients_by_email = {} # type: Dict[text_type, Recipient]
for recipient in Recipient.objects.filter(type=Recipient.PERSONAL):
recipients_by_email[profiles_by_id[recipient.type_id].email] = recipient
subscriptions_to_create = [] # type: List[Subscription]
for (email, full_name, short_name, active) in users:
subscriptions_to_create.append(
Subscription(user_profile_id=profiles_by_email[email].id,
recipient=recipients_by_email[email]))
Subscription.objects.bulk_create(subscriptions_to_create)
def bulk_create_streams(realm, stream_dict):
# type: (Realm, Dict[text_type, Dict[text_type, Any]]) -> None
existing_streams = frozenset([name.lower() for name in
Stream.objects.filter(realm=realm)
.values_list('name', flat=True)])
streams_to_create = [] # type: List[Stream]
for name, options in stream_dict.items():
if name.lower() not in existing_streams:
streams_to_create.append(
Stream(
realm=realm, name=name, description=options["description"],
invite_only=options["invite_only"]
)
)
Stream.objects.bulk_create(streams_to_create)
recipients_to_create = [] # type: List[Recipient]
for stream in Stream.objects.filter(realm=realm).values('id', 'name'):
if stream['name'].lower() not in existing_streams:
recipients_to_create.append(Recipient(type_id=stream['id'],
type=Recipient.STREAM))
Recipient.objects.bulk_create(recipients_to_create)
def bulk_create_clients(client_list):
# type: (Iterable[text_type]) -> None
existing_clients = set(client.name for client in Client.objects.select_related().all()) # type: Set[text_type]
clients_to_create = [] # type: List[Client]
for name in client_list:
if name not in existing_clients:
clients_to_create.append(Client(name=name))
existing_clients.add(name)
Client.objects.bulk_create(clients_to_create)
def bulk_create_huddles(users, huddle_user_list):
# type: (Dict[text_type, UserProfile], Iterable[Iterable[text_type]]) -> None
huddles = {} # type: Dict[text_type, Huddle]
huddles_by_id = {} # type: Dict[int, Huddle]
huddle_set = set() # type: Set[Tuple[text_type, Tuple[int, ...]]]
existing_huddles = set() # type: Set[text_type]
for huddle in Huddle.objects.all():
existing_huddles.add(huddle.huddle_hash)
for huddle_users in huddle_user_list:
user_ids = [users[email].id for email in huddle_users] # type: List[int]
huddle_hash = get_huddle_hash(user_ids)
if huddle_hash in existing_huddles:
continue
huddle_set.add((huddle_hash, tuple(sorted(user_ids))))
huddles_to_create = [] # type: List[Huddle]
for (huddle_hash, _) in huddle_set:
huddles_to_create.append(Huddle(huddle_hash=huddle_hash))
Huddle.objects.bulk_create(huddles_to_create)
for huddle in Huddle.objects.all():
huddles[huddle.huddle_hash] = huddle
huddles_by_id[huddle.id] = huddle
recipients_to_create = [] # type: List[Recipient]
for (huddle_hash, _) in huddle_set:
recipients_to_create.append(Recipient(type_id=huddles[huddle_hash].id, type=Recipient.HUDDLE))
Recipient.objects.bulk_create(recipients_to_create)
huddle_recipients = {} # type: Dict[text_type, Recipient]
for recipient in Recipient.objects.filter(type=Recipient.HUDDLE):
huddle_recipients[huddles_by_id[recipient.type_id].huddle_hash] = recipient
subscriptions_to_create = [] # type: List[Subscription]
for (huddle_hash, huddle_user_ids) in huddle_set:
for user_id in huddle_user_ids:
subscriptions_to_create.append(Subscription(active=True, user_profile_id=user_id,
recipient=huddle_recipients[huddle_hash]))
Subscription.objects.bulk_create(subscriptions_to_create)
| joyhchen/zulip | zerver/lib/bulk_create.py | Python | apache-2.0 | 6,505 |
import json
import random
import subprocess
import sys
import opt_bug_reducer
import swift_tools
def random_bug_finder(args):
"""Given a path to a sib file with canonical sil, attempt to find a perturbed
list of passes that the perf pipeline"""
tools = swift_tools.SwiftTools(args.swift_build_dir)
config = swift_tools.SILToolInvokerConfig(args)
json_data = json.loads(subprocess.check_output(
[tools.sil_passpipeline_dumper, '-Performance']))
passes = sum((p[1:] for p in json_data), [])
passes = ['-' + x[1] for x in passes]
extra_args = []
if args.extra_args is not None:
extra_args.extend(args.extra_args)
sil_opt_invoker = swift_tools.SILOptInvoker(config, tools,
args.input_file,
extra_args)
# Make sure that the base case /does/ crash.
max_count = args.max_count
for count in range(max_count):
print("Running round %i/%i" % (count, max_count))
random.shuffle(passes)
filename = sil_opt_invoker.get_suffixed_filename(str(count))
result = sil_opt_invoker.invoke_with_passlist(passes, filename)
if result['exit_code'] == 0:
print("*** Success with PassList: %s" % (' '.join(passes)))
continue
cmdline = sil_opt_invoker.cmdline_with_passlist(passes)
print("*** Fail with PassList: %s" % (' '.join(passes)))
print("*** Output File: %s" % filename)
print("*** Reproducing commandline: %s" % ' '.join(cmdline))
print("*** Trying to reduce pass list and function list")
result = opt_bug_reducer.pass_bug_reducer(tools, config, passes,
sil_opt_invoker, True)
if not result:
sys.exit(-1)
def add_parser_arguments(parser):
"""Add parser arguments for random_bug_reducer"""
parser.set_defaults(func=random_bug_finder)
parser.add_argument('input_file', help='The input file to optimize')
parser.add_argument('--module-cache', help='The module cache to use')
parser.add_argument('--sdk', help='The sdk to pass to sil-opt')
parser.add_argument('--target', help='The target to pass to sil-opt')
parser.add_argument('--resource-dir',
help='The resource-dir to pass to sil-opt')
parser.add_argument('--work-dir',
help='Working directory to use for temp files',
default='bug_reducer')
parser.add_argument('--module-name',
help='The name of the module we are optimizing')
parser.add_argument('--max-count',
help='Maximum number of permutations to try before'
' exiting',
default=100)
parser.add_argument('--extra-silopt-arg', help='extra argument to pass to '
'sil-opt',
dest='extra_args', action='append')
| tardieu/swift | utils/bug_reducer/bug_reducer/random_bug_finder.py | Python | apache-2.0 | 3,006 |
import sideboard
| migetman9/ubersystem | conftest.py | Python | agpl-3.0 | 17 |
import os
import bpy
from bpy.props import *
bl_info = {
"name": "PMK PBR Materials",
"author": "Karol Wajs",
"version": (0, 0, 1),
"blender": (2, 7, 6),
"location": "Viewport",
"description": "Adds panel in material properties that allows editing material PBR properties.",
"category": "Material"
}
def register():
bpy.utils.register_class(OBJECT_PT_MaterialProps)
bpy.types.Material.pmk = PointerProperty(type=OBJECT_PT_MaterialProps)
bpy.utils.register_class(OBJECT_PT_MaterialPropsUI)
def unregister():
bpy.utils.unregister_class(OBJECT_PT_MaterialPropsUI)
del bpy.types.Material.pmk
bpy.utils.unregister_class(OBJECT_PT_MaterialProps)
if __name__ == "__main__":
register()
class OBJECT_PT_MaterialPropsUI(bpy.types.Panel):
bl_label = "PMK"
bl_idname = "OBJECT_PT_MaterialPropsUI"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_options = {'DEFAULT_CLOSED'}
bl_context = "material"
def draw(self, context):
layout = self.layout
obj = context.object
layout.row().prop(obj.active_material.pmk, "roughness")
layout.row().prop(obj.active_material.pmk, "metallic")
layout.row().prop(obj.active_material.pmk, "reflectance")
layout.row().prop(obj.active_material.pmk, "clearCoat")
layout.row().prop(obj.active_material.pmk, "clearCoatRoughness")
layout.row().prop(obj.active_material.pmk, "anisotropy")
layout.row().prop(obj.active_material.pmk, "emissive")
class OBJECT_PT_MaterialProps(bpy.types.PropertyGroup):
'''Common module properties '''
roughness = FloatProperty(default = 0.6, name = 'Roughness', min = 0.0, max = 1.0)
metallic = FloatProperty(default = 0, name = 'Metallic', min = 0.0, max = 1.0)
reflectance = FloatProperty(default = 0.5, name = 'Reflectance', min = 0.0, max = 1.0)
clearCoat = FloatProperty(default = 0, name = 'ClearCoat', min = 0.0, max = 1.0)
clearCoatRoughness = FloatProperty(default = 0.1, name = 'ClearCoat Roughness', min = 0.0, max = 1.0)
anisotropy = FloatProperty(default = 0.0, name = 'Anisotropy', min = 0.0, max = 1.0)
emissive = FloatProperty(default = 0.0, name = 'Emissive', min = 0.0, max = 1.0)
| DezerteR/PMK-Blender-Scripts | PMK_PBRMaterialProperties.py | Python | mit | 2,250 |
#!/usr/bin/env python
"""
Select a source file, then view it with syntax highlighting.
"""
import argparse
import subprocess
import sys
try:
import tkFileDialog
except ImportError:
try:
from tkinter import filedialog as tkFileDialog
except:
pass
from cjh.cli import Fileman
from cjh.config import Config
from cjh.lists import PlainList
__author__ = 'Chris Horn <[email protected]>'
def _parse_args():
"""
Scan for --shell option.
"""
parser = argparse.ArgumentParser(description=__doc__)
if __name__ == '__main__':
parser.parse_args()
def view_file(file_):
"""
View file with syntax-highlighting and paging.
"""
proc = subprocess.Popen(
'terminator -x highlighter.py {}'.format(file_), shell=True)
proc.wait()
# If help flag found, print help and exit.
_parse_args()
# Get default UI name and load
CONFIG = Config()
SHELL = CONFIG.start_user_profile()
def main():
"""
Display a greeting, view file if filename given, otherwise display
zenity file dialog.
"""
# Greeting
SHELL.welcome('Simple Source Viewer', description=__doc__)
# If filename given, view that file.
if len(sys.argv[1:]) > 0:
filename = sys.argv[1]
view_file(filename)
# Otherwise, use zenity to display a file dialog.
else:
while True:
if SHELL.interface == 'zenity':
filename = subprocess.check_output(
'zenity --file-selection', shell=True)
filename = filename.strip()
elif SHELL.interface == 'Tk':
filename = str(tkFileDialog.askopenfilename(
parent=SHELL.main_window,
filetypes=[('Python files', '*.py')],
title='Choose a file')).split('/')[-1]
if len(filename) == 0:
return 0
elif SHELL.interface == 'bash':
plain_list = PlainList([name for name in Fileman.ls(
opts='BF', get_list=True) if not name.endswith('/')])
filename = plain_list[SHELL.list_menu(plain_list) - 1]
if filename[-1] == '*':
filename = filename[:-1]
view_file(filename)
if __name__ == '__main__':
main()
| hammerhorn/hammerhorn-jive | cloop/cats/view.py | Python | gpl-2.0 | 2,314 |
"""Standard typecasts for Nengo on SpiNNaker."""
import rig.type_casts as tp
value_to_fix = tp.float_to_fix(True, 32, 15) # Float -> S16.15
fix_to_value = tp.fix_to_float(True, 32, 15) # S16.15 -> Float
np_to_fix = tp.NumpyFloatToFixConverter(True, 32, 15) # Float -> S16.15
fix_to_np = tp.NumpyFixToFloatConverter(15) # xX.15 -> Float
| project-rig/nengo_spinnaker | nengo_spinnaker/utils/type_casts.py | Python | mit | 343 |
#!/usr/bin/python -u
import subprocess
from time import sleep, strftime
import os, sys, fcntl, signal
import json, urllib2, tempfile
from datetime import datetime, timedelta, date
import optparse
import ConfigParser
import errno
done = False
run_now = False
STATS_STR = {'NEW DB ENTRIES':'new_entries', 'SUCCESSFUL MATCH':'found_match', 'FAILED REGEX':'failed_match',
'FAILED DOWNLOAD':'failed_download', 'ALL URLs':'all_urls'}
STATS = {'new_entries':[], 'found_match':[], 'failed_match':[], 'failed_download':[], 'all_urls':[]}
STAT_FILE = 'webcrawler_run_stats'
START_DATE = date.today()
DAY = timedelta(days=1)
SCRAPY = 'scrapy' # command to run scrapy, if Scrapy was not installed from repositories,
# add whole path to 'scrapy' script, located in Scrapy/build
def parse_argv():
opts = {'user':'jozefceluch', 'hour':None, 'minute':'00', 'reset':None, 'db':None,
'search_id':None, 'api_key':None, 'folder':None, 'reset_now':False}
opt_parser = optparse.OptionParser("%prog [options] config.ini") # 1st argument is usage, %prog is replaced with sys.argv[0]
conf_parser = ConfigParser.SafeConfigParser()
opt_parser.add_option(
"-d", "--db", # short and long option
# dest="delay", # not needed in this case, because default dest name is derived from long option
# type="int", # "string" is default, other types: "int", "long", "choice", "float" and "complex"
# action="store", # "store" is default, other actions: "store_true", "store_false" and "append"
# default=0, # set default value here, None is used otherwise
help="Prepared database file",
)
opt_parser.add_option(
"-u", "--user",
help="User registered in database",
)
opt_parser.add_option(
"--hour",
help="Hour when to run crawler",
)
opt_parser.add_option(
"--minute",
help="Minute when to run crawler",
)
opt_parser.epilog = "To quit send SIGINT, SIGQUIT or SIGTERM. To show statistics send SIGUSR1."
options, args = opt_parser.parse_args()
if not args:
args = ['config.ini']
for ini_file in args:
try:
if not conf_parser.read(ini_file):
print '%s is not valid config file' %ini_file
sys.exit(1)
except ConfigParser.Error:
print '%s is not valid config file' %ini_file
sys.exit(1)
if conf_parser.has_section('database'):
if conf_parser.has_option('database','file'):
opts['db'] = conf_parser.get('database', 'file')
if conf_parser.has_option('database','user'):
opts['user'] = conf_parser.get('database', 'user')
if conf_parser.has_section('time'):
if conf_parser.has_option('time','hour'):
opts['hour'] = conf_parser.get('time', 'hour')
if conf_parser.has_option('time','minute'):
opts['minute'] = conf_parser.get('time', 'minute')
if conf_parser.has_section('scrapy'):
if conf_parser.has_option('scrapy','reset'):
opts['reset'] = conf_parser.get('scrapy', 'reset')
if conf_parser.has_option('scrapy','api_key'):
opts['api_key'] = conf_parser.get('scrapy', 'api_key')
if conf_parser.has_option('scrapy','search_id'):
opts['search_id'] = conf_parser.get('scrapy', 'search_id')
if options.db:
opts['db'] = options.db
if options.user:
opts['user'] = options.user
if options.hour:
opts['hour'] = options.hour
if options.minute:
opts['minute'] = options.minute
return opts
# use parser.error to report missing options or args:
#parser.error("Option X is not set")
def save_stats():
try:
fd = open(STAT_FILE, 'a+b')
except (IOError, OSError):
print 'Unable to open file'
print STATS
fd.write('%s %s\n' %(str(date.today()), json.dumps(STATS)))
fd.close
def print_stats():
print 'CURRENT RUN STATS\n'
for entry in STATS.keys():
result = 0
for i in STATS[entry]:
result = result + int(i)
print '\t%s %d' %(entry, result)
print "\n"
def handler(signum, frame):
global done
global run_now
if (signum == signal.SIGUSR1):
print_stats()
elif (signum == signal.SIGUSR2):
run_now = True
else:
print "Program finished"
done = 1
save_stats()
print_stats()
def reg_signals():
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGUSR2, handler)
def run_parser(spider, db, user):
s = None
ret = None
try:
s = subprocess.Popen(['./parser', '-f','%s.item' %spider, '-d', db, '-u', user], stdout=subprocess.PIPE)
except OSError, (e, msg):
if e == errno.ENOENT:
print "ERROR: Parser program is not in current directory"
print msg
sys.exit(1)
# while s.poll() == None:
try:
out = s.communicate()[0]
for line in out.strip().split('\n'):
try:
name, value = line.strip().split(':')
print name, value
for i in STATS_STR.keys():
if name.strip() == i:
STATS[STATS_STR[name.strip()]].append(value.strip())
except (ValueError, NameError):
print "Error processing parser response"
continue
ret = s.wait()
except (AttributeError):
print "Error parsing results"
return ret
def run_process(opts):
spider_pid = {'bugzilla': None, 'google':None}
pids=set()
for spider in spiders:
args=['%s runspider spiders/%s_spider.py --set reset=%s' %(SCRAPY, spider, opts['reset_now'])]
if spider == 'google':
args[0] += ' --set id=%s --set key=%s' %(opts['search_id'], opts['api_key'])
p = subprocess.Popen(args, shell=True)
spider_pid[spider]=p.pid
pids.add(p.pid)
while pids:
spider=None
pid,retval=os.wait()
print '%s finished' %pid
for i in spider_pid.keys():
if spider_pid[i] == pid:
spider = i
pids.remove(pid)
run_parser(spider = spider, db = opts['db'], user = opts['user'])
if __name__ == "__main__":
reg_signals()
opts = parse_argv()
print opts
reset_period = None
try:
reset_period = int(opts['reset']) * DAY
except (ValueError):
reset_period = 30 * DAY
reset_date = START_DATE + reset_period
spiders = ['bugzilla', 'google']
hrs = opts['hour']
mins = opts['minute']
if hrs == None:
hrs = strftime('%H')
if mins == None:
mins = '00'
if len(hrs) == 1:
hrs = '0%s'%hrs
if len(mins) == 1:
mins = '0%s' % mins
try:
int(hrs)
int(mins)
except (ValueError):
print 'Input error, insert valid numbers'
sys.exit(1)
print "PID: %s" %os.getpid()
curr_time = datetime.now().replace(microsecond=0)
usr_time = curr_time.replace(hour=int(hrs), minute=int(mins), microsecond=0)
print "Run program at: %s:%s" %(hrs, mins)
while True:
if done:
break
print "Time: %s" %strftime('%H:%M:%S')
if curr_time.date() == reset_date:
opts['reset_now'] = True
reset_date = reset_date + reset_period
print 'Search reset'
else:
opts['reset'] = False
if run_now or (datetime.now().replace(second=0, microsecond=0) == usr_time.replace(second=0)):
run_now = False
run_start = datetime.now()
print "process started: %s" %run_start
run_process(opts)
run_end = datetime.now()
print "process finished: %s" %run_end
runtime = run_end - run_start
if ((runtime.microseconds + (runtime.seconds + runtime.days * 24 * 3600) * 10**6) / 10**6) < 60:
print 'Process ran less than a minute'
sleep(60 - ((runtime.microseconds + (runtime.seconds + runtime.days * 24 * 3600) * 10**6) / 10**6))
usr_time = usr_time.replace(second=0) + DAY
else:
curr_time = datetime.now().replace(microsecond=0)
sleep_time = usr_time - curr_time - timedelta(seconds=usr_time.second-1)
if ((sleep_time.microseconds + (sleep_time.seconds + sleep_time.days * 24 * 3600) * 10**6) / 10**6) < 0:
sleep_time = DAY + sleep_time
usr_time = usr_time.replace(second=0) + DAY
print "Sleeping until %s" %(sleep_time + curr_time)
sleep((sleep_time.microseconds + (sleep_time.seconds + sleep_time.days * 24 * 3600) * 10**6) / 10**6)
#def run_spider(spider, options):
# """ Unused method to run spider
# Method to run spider through scrapyd daemon. This is not used by default
# because all spiders run in local folder and are not managed by the daemon.
# Should be usable with minor updates.
#
# """
# url = 'http://%s:%s/schedule.json' %(options['server'], options['port'])
# proj = 'project=%s' %options['project']
# spid = 'spider=%s' %spider
# fldr = 'folder=%s' %options['folder']
# raise Exception
# s = subprocess.Popen(['curl','-s', url, '-d', proj, '-d', spid, '-d', fldr], stdout=subprocess.PIPE)
# (out, err) = s.communicate()
# print out, err
# out = out.strip('{}\n')
# out = out.split(',')
# ok = False
# for item in out:
# (key, value) = item.split(':')
# key = key.strip(' \"')
# value = value.strip(' \"')
# print key, value
# if ok:
# return value
# if key == 'status' and value == 'ok':
# ok = True
# if not ok:
## raise exception
# print "Spider did not start successfully"
#def get_items(opts, job_id):
# """ Unused method to retrieve items from spider
# """
# url = 'http://%s:%s/listjobs.json?project=%s' %(opts['server'], opts['port'], opts['project'])
# done = False
# spider_name = None
# while not done:
# s = subprocess.Popen(['curl','-s', url], stdout = subprocess.PIPE)
# (out, err) = s.communicate()
# out = json.loads(out)
# for i in out['finished']:
# if i['id'] == job_id:
# spider_name = i['spider']
# done = True
# if not done:
# print 'Waiting for crawl to finish'
# sleep(10)
# url = 'http://localhost:6800/items/tutorial/%s/%s.jl' %(spider_name, job_id)
# f = tempfile.TemporaryFile()
# g = open("%s"%job_id, "wb+")
# data = urllib2.urlopen(url).read()
# f.write(data)
# f.seek(0)
# lines = []
# for line in f.readlines():
# line = json.loads(line)
# g.write('%s\n' %line['url'])
# g.close()
# f.close()
#
| dodkoC/webCrawler | bin/webCrawler_sched.py | Python | gpl-3.0 | 11,112 |
def extractTsuigeki(item):
"""
# Tsuigeki Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Seiju no Kuni no Kinju Tsukai' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'Seiju no Kuni no Kinju Tsukai', vol, chp, frag=frag, postfix=postfix)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractTsuigeki.py | Python | bsd-3-clause | 410 |
# module_interface.py, part of Rubber building system for LaTeX documents..
# Copyright (C) 2015-2015 Nicolas Boulenguez <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module defines the common interface for all rubber modules.
Most of them are implemented as Python modules under
rubber.latex_modules. Each one contains a subclass of
rubber.module_interface.Module that will be instantiated once when the
plugin is loaded by rubber.converters.latex.
rubber.converters.latex also declares a subclass that is instantiated
each time a .rub file is read.
"""
import abc
from rubber import msg, _
import rubber.util
class Module:
# This class may not be instantiated directly, only subclassed.
__metaclass__ = abc.ABCMeta
"""
This is the base class for modules. Each module should define a class
named 'Module' that derives from this one. The default implementation
provides all required methods with no effects.
The constructor is mandatory. Its profile must be
def __init__ (self, document, context):
'document' is the compiling environment (an instance of
converters.latex.LaTeXDep)
'context' is a dictionary that describes the command that caused
the module to load.
"""
def pre_compile (self):
"""
This method is called before the first LaTeX compilation. It is
supposed to build any file that LaTeX would require to compile the
document correctly. The method must return true on success.
"""
return True
def post_compile (self):
"""
This method is called after each LaTeX compilation. It is supposed to
process the compilation results and possibly request a new
compilation. The method must return true on success.
"""
return True
def clean (self):
"""
This method is called when cleaning the compiled files. It is supposed
to remove all the files that this modules generates.
"""
def command (self, cmd, args):
"""
This is called when a directive for the module is found in the source.
We treat syntax errors in the directive as fatal, aborting the run.
"""
try:
handler = getattr (self, "do_" + cmd)
except AttributeError:
# there is no do_ method for this directive, which means there
# is no such directive.
msg.error (_("no such directive '%s'") % cmd, pkg=self.__module__)
rubber.util.abort_rubber_syntax_error ()
try:
return handler (*args)
except TypeError:
# Python failed to coerce the arguments given into whatever
# the handler would like to see. report a generic failure.
msg.error (_("invalid syntax for directive '%s'") % cmd, pkg=self.__module__)
rubber.util.abort_rubber_syntax_error ()
def get_errors (self):
"""
This is called if something has failed during an operation performed
by this module. The method returns a generator with items of the same
form as in LaTeXDep.get_errors.
"""
# TODO: what does this mean (copied from rubber.converters.latex)?
if None:
yield None
| oracleyue/rubber-for-latex | src/module_interface.py | Python | gpl-2.0 | 3,888 |
from contextlib import suppress
from .controller import Controller
from ..protocol.proto.main_pb2 import Robot as RobotMessage, BaseStation
from ..util import get_config
import asyncio
import websockets
import logging
from sys import stdout
logger = logging.getLogger(__name__)
class Component(object):
def __init__(self, **kwargs):
self.parts = kwargs
self.state = {k: None for k in kwargs}
def check_updates(self, msg):
needs_update = False
for k, v in self.parts.items():
new_value = v()
old_value = self.state[k]
if new_value != old_value:
needs_update = True
setattr(msg, k, new_value)
msg.update = True
self.state[k] = new_value
return needs_update
class RobotState(object):
def __init__(self, controller):
self.controller = controller
self.headlights = Component(on=controller.get_y)
self.motor_right = Component(
# speed=lambda: int(controller.get_left_y() * -120),
speed=lambda: self.calculate_motor_speed()[0],
breaks=controller.get_b)
self.motor_left = Component(
# speed=lambda: int(controller.get_left_y() * -120),
speed=lambda: self.calculate_motor_speed()[1],
breaks=controller.get_b)
def _neg(self, num):
if num < 0:
return -1
return 1
def calculate_motor_speed(self):
x = self.controller.get_left_x()
y = self.controller.get_left_y()
r, l = -y, -y
# r_neg = self._neg()
# l_neg = self._neg()
# forward_value = abs(int((abs(y) - abs(x)) * 100))
#
# if abs(x) > abs(y) or y > 0:
# if x > 0:
# forward_value *= -1
# # 100 + forward_value
#
# return int(x)
# if x < 0:
# r += x
# else:
# l -= x
x *= 1
r += -x/4
l += x/4
modifier = 120
if self.controller.get_x():
modifier = 60
r = int(r*modifier)
l = int(l*modifier)
return r, l
async def run(url):
logger.info('Connecting to {}'.format(url))
Controller.init()
controller = Controller(0)
robot_state = RobotState(controller)
async with websockets.connect(url) as websocket:
headlights_state = False
headlights_btn_state = False
while True:
controller.update()
robot_msg = RobotMessage()
# robot_msg.motor_left_rpm.update = True
# robot_msg.motor_left_rpm.speed = 120
#
# robot_msg.motor_right_rpm.update = True
# robot_msg.motor_right_rpm.speed = 120
# robot_msg.arm.update = True
# robot_msg.arm.degree = 10
# robot_msg.wrist.update = True
# robot_msg.wrist.degree = 2
# robot_msg.camera.update = True
# robot_msg.camera.degree = 0
robot_msg.claw.update = True
robot_msg.claw.degree = controller.get_a() * 90
robot_msg.arm.update = True
if controller.get_right_trigger() > 0.9:
robot_msg.arm.degree = 5304
else:
robot_msg.arm.degree = 3120
robot_msg.camera.update = True
robot_msg.camera.degree = 190 - int((controller.get_right_x()) * 190)
if headlights_btn_state ^ controller.get_y():
headlights_btn_state = controller.get_y()
if headlights_btn_state == True:
headlights_state = not headlights_state
robot_msg.headlights.update = True
robot_msg.headlights.on = headlights_state
# robot_state.headlights.check_updates(robot_msg.headlights)
robot_state.motor_right.check_updates(robot_msg.motor_right_rpm)
robot_state.motor_left.check_updates(robot_msg.motor_left_rpm)
ser_msg = robot_msg.SerializeToString()
await websocket.send(ser_msg)
with suppress(asyncio.TimeoutError):
msg = await asyncio.wait_for(websocket.recv(), .1)
# print(msg)
base_msg = BaseStation()
base_msg.ParseFromString(msg)
print("SD left ", base_msg.sensor_data.front_left)
print("SD right ", base_msg.sensor_data.front_right)
def main():
config = get_config()
loop = asyncio.get_event_loop()
loop.run_until_complete(run("ws://{0.addr}:{0.port}".format(config)))
| ksurct/MercuryRoboticsEmbedded2016 | ksurobot/basestation/__init__.py | Python | apache-2.0 | 4,654 |
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Centralize knowledge about how to create standardized Google Storage paths.
This includes definitions for various build flags:
SKIP - means a given build is bad and should not have payloads generated.
FINISHED - means that the payloads have been fully generated.
LOCK - means that payload processing is in progress on the host which
owns the locks. Locks have a timeout associated with them in
case of error, but are not 100% atomic when a lock is timing out.
Example file paths:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/SKIP_flag
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/FINISHED_flag
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/LOCK_flag
"""
# pylint: disable=bad-continuation
# pylint: disable=bad-whitespace
from __future__ import print_function
import hashlib
import os
import random
import re
import fixup_path
fixup_path.FixupPath()
from chromite.lib.paygen import utils
class Build(utils.RestrictedAttrDict):
"""Define a ChromeOS Build.
The order of attributes in self._slots dictates the order attributes
are printed in by __str__ method of super class. Keep the attributes
that are more helpful in identifying this build earlier in the list,
because this string ends up cut off in email subjects.
Fields:
board: The board of the image "x86-mario", etc.
bucket: The bucket of the image. "chromeos-releases" as default.
channel: The channel of the image "stable-channel", "nplusone", etc.
uri: The URI of the build directory.
version: The version of the image. "0.14.23.2", "3401.0.0", etc.
"""
_slots = ('board', 'version', 'channel', 'bucket', 'uri')
_name = 'Build definition'
def __init__(self, *args, **kwargs):
super(Build, self).__init__(*args, **kwargs)
# If these match defaults, set to None.
self._clear_if_default('bucket', ChromeosReleases.BUCKET)
class Image(utils.RestrictedAttrDict):
"""Define a ChromeOS Image.
Fields:
board: The board of the image "x86-mario", etc.
bucket: The bucket of the image. "chromeos-releases" as default.
channel: The channel of the image "stable-channel", "nplusone", etc.
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone). None otherwise.
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone). None otherwise.
key: The key the image was signed with. "premp", "mp", "mp-v2"
This is not the board specific key name, but the general value used
in image/payload names.
uri: The URI of the image. This URI can be any format understood by
urilib.
version: The version of the image. "0.14.23.2", "3401.0.0", etc.
"""
_name = 'Image definition'
_slots = ('board', 'version', 'channel', 'key',
'image_channel', 'image_version', 'bucket',
'uri')
def __init__(self, *args, **kwargs):
super(Image, self).__init__(*args, **kwargs)
# If these match defaults, set to None.
self._clear_if_default('bucket', ChromeosReleases.BUCKET)
self._clear_if_default('image_channel', self['channel'])
self._clear_if_default('image_version', self['version'])
def __str__(self):
if self.uri:
return '%s' % self.uri.split('/')[-1]
else:
return ('Image: %s:%s/%s%s/%s%s/%s (no uri)' %
(self.bucket, self.board, self.channel,
'(%s)' % self.image_channel if self.image_channel else '',
self.version,
'(%s)' % self.image_version if self.image_version else '',
self.key))
class UnsignedImageArchive(utils.RestrictedAttrDict):
"""Define a unsigned ChromeOS image archive.
Fields:
bucket: The bucket of the image. "chromeos-releases" as default.
channel: The channel of the image "stable-channel", "nplusone", etc.
board: The board of the image "x86-mario", etc.
version: The version of the image. "0.14.23.2", "3401.0.0", etc.
milestone: the most recent branch corresponding to the version; "R19" etc
image_type: "test" or "recovery"
uri: The URI of the image. This URI can be any format understood by
urilib.
"""
_name = 'Unsigned image archive definition'
_slots = ('bucket', 'channel', 'board', 'version', 'milestone', 'image_type',
'uri')
def __str__(self):
if self.uri:
return '%s' % self.uri.split('/')[-1]
else:
return ('Unsigned image archive: %s:%s/%s/%s-%s/%s (no uri)' %
(self.bucket, self.board, self.channel,
self.milestone, self.version,
self.image_type))
class Payload(utils.RestrictedAttrDict):
"""Define a ChromeOS Payload.
Fields:
tgt_image: An instance of Image saying what the payload updates to.
src_image: An instance of Image showing what it updates from. None for
Full updates.
uri: The URI of the payload. This can be any format understood by urilib.
"""
_name = 'Payload definition'
_slots = ('tgt_image', 'src_image', 'uri')
def __str__(self):
if self.uri:
return self.uri.split('/')[-1]
else:
return '%s -> %s (no uri)' % (self.src_image or 'any', self.tgt_image)
class ChromeosReleases(object):
"""Name space class for static methods for URIs in chromeos-releases."""
BUCKET = 'chromeos-releases'
# Build flags
SKIP = 'SKIP'
FINISHED = 'FINISHED'
LOCK = 'LOCK'
FLAGS = (SKIP, FINISHED, LOCK)
UNSIGNED_IMAGE_TYPES = ('test', 'recovery')
@staticmethod
def BuildUri(channel, board, version, bucket=None):
"""Creates the gspath for a given build.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: "What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build artifacts. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3
"""
if not bucket:
bucket = ChromeosReleases.BUCKET
return 'gs://%s/%s/%s/%s' % (bucket, channel, board, version)
@staticmethod
def GeneratorUri(channel, board, version, bucket=None):
"""Creates the gspath for a given build image.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? Usually "chromeos-releases".
Returns:
The url for the specified build's delta generator zip file.
"""
return os.path.join(ChromeosReleases.BuildUri(channel,
board,
version,
bucket=bucket),
'au-generator.zip')
@staticmethod
def BuildPayloadsUri(channel, board, version, bucket=None):
"""Creates the gspath for the payloads of a given build.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: "What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads
"""
return os.path.join(ChromeosReleases.BuildUri(channel,
board,
version,
bucket=bucket),
'payloads')
@staticmethod
def BuildPayloadsSigningUri(channel, board, version, bucket=None):
"""Creates the base gspath for payload signing files.
We create a number of files during signer interaction. This method creates
the base path for all such files associated with a given build. There
should still be subdirectories per-payload to avoid collisions, but by
using this uniform base pass clean up can be more reliable.
Args:
channel: What channel does the build belong to. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version. "3015.0.0", "1945.76.3", etc
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/signing
"""
return os.path.join(ChromeosReleases.BuildPayloadsUri(channel,
board,
version,
bucket=bucket),
'signing')
@staticmethod
def BuildPayloadsFlagUri(channel, board, version, flag, bucket=None):
"""Creates the gspath for a given build flag.
SKIP - means a given build is bad and should not have payloads generated.
FINISHED - means that the payloads have been fully generated.
LOCK - means that payload processing is in progress on the host which
owns the locks. Locks have a timeout associated with them in
case of error, but are not 100% atomic when a lock is timing out.
Args:
channel: What channel does the build belong too. Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version. "3015.0.0", "1945.76.3", etc
flag: gs_paths.SKIP, gs_paths.FINISHED, or gs_paths.LOCK
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/payloads/SKIP_FLAG
"""
assert flag in ChromeosReleases.FLAGS
return os.path.join(ChromeosReleases.BuildPayloadsUri(channel,
board,
version,
bucket=bucket),
'%s_flag' % flag)
@staticmethod
def ImageName(channel, board, version, key):
"""Creates the base file name for a given build image.
Args:
channel: What channel does the build belong too. Usually xxx-channel.
board: What board is the build for? "x86-alex", "lumpy", etc.
version: "What is the build version. "3015.0.0", "1945.76.3", etc
key: "What is the signing key. "premp", "mp", "mp-v2", etc
Returns:
The name of the specified image. Should be of the form:
chromeos_1.2.3_board-name_recovery_blah-channel_key.bin
"""
template = 'chromeos_%(version)s_%(board)s_recovery_%(channel)s_%(key)s.bin'
return template % { 'channel': channel,
'board': board,
'version': version,
'key': key }
@staticmethod
def UnsignedImageArchiveName(board, version, milestone, image_type):
"""The base name for the tarball containing an unsigned build image.
Args:
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version? "3015.0.0", "1945.76.3", etc
milestone: the most recent branch corresponding to the version; "R19" etc
image_type: either "recovery" or "test", currently
Returns:
The name of the specified image archive. Should be of the form:
ChromeOS-type-R19-1.2.3-board-name.tar.xz
"""
template = (
'ChromeOS-%(image_type)s-%(milestone)s-%(version)s-%(board)s.tar.xz')
return template % { 'board': board,
'version': version,
'milestone': milestone,
'image_type': image_type }
@staticmethod
def ImageUri(channel, board, version, key,
image_channel=None, image_version=None,
bucket=None):
"""Creates the gspath for a given build image.
Args:
channel: What channel does the build belong too? Usually "xxx-channel"
board: What board is the build for? "x86-alex", "lumpy", etc
version: What is the build version? "3015.0.0", "1945.76.3", etc
key: What is the signing key? "premp", "mp", "mp-v2", etc
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone).
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone).
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's image. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/
chromeos_1.2.3_board-name_recovery_blah-channel_key.bin
"""
if not image_channel:
image_channel = channel
if not image_version:
image_version = version
return os.path.join(
ChromeosReleases.BuildUri(channel, board, version, bucket=bucket),
ChromeosReleases.ImageName(image_channel, board, image_version, key))
@staticmethod
def UnsignedImageArchiveUri(channel, board, version, milestone, image_type,
bucket=None):
"""Creates the gspath for a given unsigned build image archive.
Args:
channel: What channel does the build belong too? Usually "xxx-channel"
board: What board is the build for? "x86-alex", "lumpy", etc
version: What is the build version? "3015.0.0", "1945.76.3", etc
milestone: the most recent branch corresponding to the version; "R19" etc
image_type: either "recovery" or "test", currently
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's image. Should be of the form:
gs://chromeos-releases/blah-channel/board-name/1.2.3/
ChromeOS-type-R19-1.2.3-board-name.tar.xz
"""
return os.path.join(
ChromeosReleases.BuildUri(channel, board, version, bucket=bucket),
ChromeosReleases.UnsignedImageArchiveName(board, version,
milestone, image_type))
@classmethod
def ParseImageUri(cls, image_uri):
"""Parse the URI of an image into an Image object."""
# The named values in this regex must match the arguments to gspaths.Image.
exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
'(?P<version>.*)/chromeos_(?P<image_version>[^_]+)_'
'(?P=board)_recovery_(?P<image_channel>[^_]+)_(?P<key>[^_]+).bin$')
m = re.match(exp, image_uri)
if not m:
return None
values = m.groupdict()
# Insert the URI
values['uri'] = image_uri
# Create an Image object using the values we parsed out.
return Image(values)
@classmethod
def ParseUnsignedImageArchiveUri(cls, image_uri):
"""Parse the URI of an image into an UnsignedImageArchive object."""
# The named values in this regex must match the arguments to gspaths.Image.
exp = (r'gs://(?P<bucket>[^/]+)/(?P<channel>[^/]+)/'
'(?P<board>[^/]+)/(?P<version>[^/]+)/'
'ChromeOS-(?P<image_type>%s)-(?P<milestone>R[0-9]+)-'
'(?P=version)-(?P=board).tar.xz' %
'|'.join(cls.UNSIGNED_IMAGE_TYPES))
m = re.match(exp, image_uri)
if not m:
return None
values = m.groupdict()
# Insert the URI
values['uri'] = image_uri
# Reset values if they match their defaults.
if values['bucket'] == cls.BUCKET:
values['bucket'] = None
# Create an Image object using the values we parsed out.
return UnsignedImageArchive(values)
@staticmethod
def PayloadName(channel, board, version, key=None, random_str=None,
src_version=None, unsigned_image_type='test'):
"""Creates the gspath for a payload associated with a given build.
Args:
channel: What channel does the build belong to? Usually "xxx-channel".
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version? "3015.0.0", "1945.76.3", etc
key: What is the signing key? "premp", "mp", "mp-v2", etc; None (default)
indicates that the image is not signed, e.g. a test image
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone).
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone).
random_str: Force a given random string. None means generate one.
src_version: If this payload is a delta, this is the version of the image
it updates from.
unsigned_image_type: the type descriptor (string) of an unsigned image;
significant iff key is None (default: "test")
Returns:
The name for the specified build's payloads. Should be of the form:
chromeos_0.12.433.257-2913.377.0_x86-alex_stable-channel_
delta_mp-v3.bin-b334762d0f6b80f471069153bbe8b97a.signed
chromeos_2913.377.0_x86-alex_stable-channel_full_mp-v3.
bin-610c97c30fae8561bde01a6116d65cb9.signed
"""
if random_str is None:
random.seed()
# pylint: disable=E1101
random_str = hashlib.md5(str(random.getrandbits(128))).hexdigest()
if key is None:
signed_ext = ''
key = unsigned_image_type
else:
signed_ext = '.signed'
if src_version:
template = ('chromeos_%(src_version)s-%(version)s_%(board)s_%(channel)s_'
'delta_%(key)s.bin-%(random_str)s%(signed_ext)s')
return template % { 'channel': channel,
'board': board,
'version': version,
'key': key,
'random_str': random_str,
'src_version': src_version,
'signed_ext': signed_ext,
}
else:
template = ('chromeos_%(version)s_%(board)s_%(channel)s_'
'full_%(key)s.bin-%(random_str)s%(signed_ext)s')
return template % { 'channel': channel,
'board': board,
'version': version,
'key': key,
'random_str': random_str,
'signed_ext': signed_ext,
}
@staticmethod
def PayloadUri(channel, board, version, random_str, key=None,
image_channel=None, image_version=None,
src_version=None,
bucket=None):
"""Creates the gspath for a payload associated with a given build.
Args:
channel: What channel does the build belong to? Usually "xxx-channel"
board: What board is the build for? "x86-alex", "lumpy", etc.
version: What is the build version? "3015.0.0", "1945.76.3", etc
key: What is the signing key? "premp", "mp", "mp-v2", etc; None means
that the image is unsigned (e.g. a test image)
image_channel: Sometimes an image has a different channel than the build
directory it's in. (ie: nplusone).
image_version: Sometimes an image has a different version than the build
directory it's in. (ie: nplusone).
random_str: Force a given random string. None means generate one.
src_version: If this payload is a delta, this is the version of the image
it updates from.
bucket: What bucket is the build in? (None means ChromeosReleases.BUCKET)
Returns:
The url for the specified build's payloads. Should be of the form:
gs://chromeos-releases/stable-channel/x86-alex/2913.377.0/payloads/
chromeos_0.12.433.257-2913.377.0_x86-alex_stable-channel_
delta_mp-v3.bin-b334762d0f6b80f471069153bbe8b97a.signed
gs://chromeos-releases/stable-channel/x86-alex/2913.377.0/payloads/
chromeos_2913.377.0_x86-alex_stable-channel_full_mp-v3.
bin-610c97c30fae8561bde01a6116d65cb9.signed
"""
if image_channel is None:
image_channel = channel
if image_version is None:
image_version = version
return os.path.join(ChromeosReleases.BuildPayloadsUri(channel,
board,
version,
bucket=bucket),
ChromeosReleases.PayloadName(image_channel,
board,
image_version,
key,
random_str,
src_version))
@classmethod
def ParsePayloadUri(cls, payload_uri):
"""Parse the URI of an image into an Image object."""
# Sample Delta URI:
# gs://chromeos-releases/stable-channel/x86-mario/4731.72.0/payloads/
# chromeos_4537.147.0-4731.72.0_x86-mario_stable-channel_delta_mp-v3.bin-
# 3a90d8666d1d42b7a7367660b897e8c9.signed
# Sample Full URI:
# gs://chromeos-releases/stable-channel/x86-mario/4731.72.0/payloads/
# chromeos_4731.72.0_x86-mario_stable-channel_full_mp-v3.bin-
# 969f24ba8cbf2096ebe3c57d5f0253b7.signed
# Handle FULL payload URIs.
full_exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
r'(?P<version>.*)/payloads/chromeos_(?P<image_version>[^_]+)_'
r'(?P=board)_(?P<image_channel>[^_]+)_full_(?P<key>[^_]+)\.bin'
r'-[0-9A-Fa-f]+\.signed$')
m = re.match(full_exp, payload_uri)
if m:
image_values = m.groupdict()
# The image URIs can't be discovered from the payload URI.
image_values['uri'] = None
# Create the Payload.
tgt_image = Image(image_values)
return Payload(tgt_image=tgt_image, uri=payload_uri)
# Handle DELTA payload URIs.
delta_exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
r'(?P<version>.*)/payloads/chromeos_(?P<src_version>[^_]+)-'
r'(?P<image_version>[^_]+)_(?P=board)_'
r'(?P<image_channel>[^_]+)_delta_(?P<key>[^_]+)\.bin'
r'-[0-9A-Fa-f]+\.signed$')
m = re.match(delta_exp, payload_uri)
if m:
image_values = m.groupdict()
# The image URIs can't be discovered from the payload URI.
image_values['uri'] = None
# Remember the src_version for the src_image.
src_version = image_values['src_version']
del image_values['src_version']
# Create the payload.
tgt_image = Image(image_values)
# Set the values which are different for src versions.
image_values['version'] = src_version
# The payload URI doesn't tell us any of these values. However, it's
# a mostly safe bet that the src version has no
# image_version/image_channel.
# Not knowing the source key is problematic.
image_values['image_version'] = None
image_values['image_channel'] = None
image_values['key'] = None
src_image = Image(image_values)
return Payload(src_image=src_image, tgt_image=tgt_image, uri=payload_uri)
# The URI didn't match.
return None
class ChromeosImageArchive(object):
"""Name space class for static methods for URIs in chromeos-image-archive."""
BUCKET = 'chromeos-image-archive'
@classmethod
def BuildUri(cls, board, milestone, version, bucket=None):
"""Creates the gspath for a given build.
Args:
board: What board is the build for? "x86-alex", "lumpy", etc.
milestone: a number that defines the milestone mark, e.g. 19 for R19
version: "What is the build version. "3015.0.0", "1945.76.3", etc
bucket: the bucket the build in (None means cls.BUCKET)
Returns:
The url for the specified build artifacts. Should be of the form:
gs://chromeos-image-archive/board-release/R23-4.5.6
"""
bucket = bucket or cls.BUCKET
return 'gs://%s/%s-release/R%s-%s' % (bucket, board, milestone, version)
def VersionKey(version):
"""Convert a version string to a comparable value.
All old style values are considered older than all new style values.
The actual values returned should only be used for comparison against
other VersionKey results.
Args:
version: String with a build version "1.2.3" or "0.12.3.4"
Returns:
A value comparable against other version strings.
"""
key = [int(n) for n in version.split('.')]
# 3 number versions are new style.
# 4 number versions are old style.
assert len(key) in (3, 4)
if len(key) == 3:
# 1.2.3 -> (1, 0, 1, 2, 3)
return [1, 0] + key
else:
# 0.12.3.4 -> (0, 0, 12, 3, 4)
return [0] + key
def VersionGreater(left, right):
"""Compare two version strings. left > right
Args:
left: String with lefthand version string "1.2.3" or "0.12.3.4"
right: String with righthand version string "1.2.3" or "0.12.3.4"
Returns:
left > right taking into account new style versions versus old style.
"""
return VersionKey(left) > VersionKey(right)
| mxOBS/deb-pkg_trusty_chromium-browser | third_party/chromite/lib/paygen/gspaths.py | Python | bsd-3-clause | 26,101 |
"""Map Comprehensions"""
def inverse_filter_dict(dictionary, keys):
"""Filter a dictionary by any keys not given.
Args:
dictionary (dict): Dictionary.
keys (iterable): Iterable containing data type(s) for valid dict key.
Return:
dict: Filtered dictionary.
"""
return {key: val for key, val in dictionary.items() if key not in keys}
def ne_dict(dictionary):
"""Prune dictionary of empty key-value pairs.
Aliases: pruned()
"""
return {k: v for k, v in dictionary.items() if v}
def pruned(dictionary):
"""Prune dictionary of empty key-value pairs.
Alias of ne_dict().
"""
return ne_dict(dictionary)
def prune_by_n_required_children(dictionary, n=1):
"""Return with only key value pairs that meet required n children."""
return {key: val for key, val in dictionary.items() if len(val) >= n}
| joeflack4/jflack | joeutils/data_structures/comprehensions/maps/__init__.py | Python | mit | 883 |
# Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFR-BERT experiment configurations."""
# pylint: disable=g-doc-return-or-yield,line-too-long
from tensorflow_ranking.extension import premade
# pylint: disable=g-import-not-at-top
try:
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
except ModuleNotFoundError:
raise ModuleNotFoundError(
'tf-models-official needs to be installed. Run command: '
'`pip install tf-models-official`.') from None
# pylint: enable=g-import-not-at-top
@exp_factory.register_config_factory('tfr_bert')
def tfrbert_exp() -> cfg.ExperimentConfig:
"""Defines a TFR-BERT experiment."""
config = cfg.ExperimentConfig(
task=premade.TFRBertConfig(
train_data=premade.TFRBertDataConfig(),
validation_data=premade.TFRBertDataConfig(
is_training=False, drop_remainder=False)),
trainer=cfg.TrainerConfig(
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'weight_decay_rate':
0.01,
'exclude_from_weight_decay':
['LayerNorm', 'layer_norm', 'bias'],
}
},
'learning_rate': {
'type': 'polynomial',
'polynomial': {
'initial_learning_rate': 3e-5,
'end_learning_rate': 0.0,
}
},
'warmup': {
'type': 'polynomial'
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
config.task.model.encoder.type = 'bert'
return config
| tensorflow/ranking | tensorflow_ranking/examples/keras/tfrbert_task_experiments.py | Python | apache-2.0 | 2,440 |
import urllib, urllib2, cookielib,urlparse
import os, random, re
from contextlib import closing
from BeautifulSoup import BeautifulSoup
import json
BASE_PATH = 'http://www.italiansubs.net/index.php'
class Itasa(object):
"""
rss: http://www.italiansubs.net/index.php?option=com_rsssub... #myitasa or itasa subtitle feed
accept_all: yes #accept all from myitasa
itasa:
username: itasaUsername
password: itasaPassword
path: ~/subtitle/download/folder # absolute or starting from $HOME
messages:
- Grazie
- Grazie mille!!!
- Mitici
"""
def getToken(self, contentHtml):
reg = re.compile('<input type="hidden" name="([a-zA-z0-9]{32})" value="1"')
value = reg.search(contentHtml).group(1)
return value
def validator(self):
'''validator'''
from flexget import validator
d = validator.factory('dict')
d.accept('text', key='username')
d.accept('text', key='password')
d.accept('text', key='path')
d.accept('list', key='messages').accept('text')
return d
def on_process_start(self, feed):
'''Itasa login, storing cookie'''
self.config = feed.config['itasa']
cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
self.opener.addheaders = [('User-agent', 'Mozilla/5.0'), ('Referer', BASE_PATH.rstrip('index.php'))]
response = self.opener.open(BASE_PATH)
content = response.read()
token = self.getToken(content)
login_data = urllib.urlencode({'username' : self.config['username']
, 'passwd' : self.config['password']
, 'Submit' :'Login'
, 'silent' : 'true'
, 'option' : 'com_user'
, 'task' : 'login'
, token : '1'
, 'remember':'yes'})
with closing(self.opener.open(BASE_PATH, login_data)) as page:
if page.read().find('Nome utente e password non sono corrette') != -1:
raise Exception("Wrong user or password")
def on_feed_download(self,feed):
self.on_task_download(feed)
def on_task_download(self,feed):
'''download zip file'''
for entry in feed.entries:
if entry.get('urls'):
urls = entry.get('urls')
else:
urls = [entry['url']]
for url in urls:
with closing(self.opener.open(url)) as page:
try:
content = page.read()
z = self._zip(content)
filename = z.headers.dict['content-disposition'].split('=')[1]
filename = os.path.join(self.config['path'],filename)
filename = os.path.expanduser(filename)
soup = BeautifulSoup(content)
with open(filename,'wb') as f:
f.write(z.read())
entry['output'] = filename
if 'messages' in self.config :
self._post_comment(soup,page.geturl())
self._fill_fields(entry,soup)
except ValueError:
print("Missing subtitle link in page: %s" % page.geturl())
def _fill_fields(self,entry,soup):
title = soup.find(id='remositoryfileinfo').find('center').string
m = re.search("(.*?)[\s-]+(\d+)x(\d+)", title, re.UNICODE)
if m:
show_data = m.groups()
entry['title'] = title.strip()
entry['series_name'] = show_data[0].strip()
entry['series_season'] = int(show_data[1].strip())
entry['series_episode'] = int(show_data[2].strip())
def _zip(self,content):
'''extract zip subtitle link from page, open download zip link'''
start = content.index('<center><a href="')
end = content.index('" rel',start)
url = content[start+17:end]
return self.opener.open(url)
def _post_comment(self,soup,url):
form = soup.find(id='jc_commentForm')
arg2_dict = []
for inputTag in form.findAll('input'):
if not inputTag['name'] == 'jc_name':
arg2_dict.append([inputTag['name'],inputTag['value'] if inputTag.has_key('value') else None])
m = self.config['messages']
arg2_dict.append(['jc_comment',m[random.randint(0,len(m)-1)] ])
arg2_dict.append(['jc_name',self.config['username']])
data = { 'arg2': json.dumps(arg2_dict)
, 'func' : "jcxAddComment"
, 'task' : "azrul_ajax"
, 'no_html': 1
, 'option' : "jomcomment"}
return self.opener.open(url,urllib.urlencode(data))
try:
from flexget.plugin import register_plugin
register_plugin(Itasa, 'itasa')
except:
pass
| carlo-colombo/ItasaFlexget | ItasaFlexGet.py | Python | mit | 5,083 |
# Example: monitors events and logs them into a log file.
#
import pyinotify
class Log(pyinotify.ProcessEvent):
def my_init(self, fileobj):
"""
Method automatically called from ProcessEvent.__init__(). Additional
keyworded arguments passed to ProcessEvent.__init__() are then
delegated to my_init(). This is the case for fileobj.
"""
self._fileobj = fileobj
def process_default(self, event):
self._fileobj.write(str(event) + '\n')
self._fileobj.flush()
class TrackModifications(pyinotify.ProcessEvent):
def process_IN_MODIFY(self, event):
print 'IN_MODIFY'
class Empty(pyinotify.ProcessEvent):
def my_init(self, msg):
self._msg = msg
def process_default(self, event):
print self._msg
# pyinotify.log.setLevel(10)
fo = file('/var/log/pyinotify_log', 'w')
try:
wm = pyinotify.WatchManager()
# It is important to pass named extra arguments like 'fileobj'.
handler = Empty(TrackModifications(Log(fileobj=fo)), msg='Outer chained method')
notifier = pyinotify.Notifier(wm, default_proc_fun=handler)
wm.add_watch('/tmp', pyinotify.ALL_EVENTS)
notifier.loop()
finally:
fo.close()
| seb-m/pyinotify | python2/examples/chain.py | Python | mit | 1,216 |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'dumma_af.views.home', name='home'),
# url(r'^dumma_af/', include('dumma_af.foo.urls')),
url(r'^browserid/', include('django_browserid.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('af.urls', namespace="af")),
)
| Spindel/dumma_af | dumma_af/urls.py | Python | gpl-3.0 | 667 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from core.models import List, Item, Comment
from favit.models import Favorite
class ListAdmin(admin.ModelAdmin):
list_display = ["ListName","slug","ListOwner","ListOwnerState", "ListPubDate"]
search_fields = ("ListName","slug","ListOwner")
class ItemAdmin(admin.ModelAdmin):
list_display = ["list_ListName","list_slug","ItemOwner"]
search_fields = ("ItemOwner","list__ListName","list__slug",)
def list_ListName(self, instance):
return instance.list.ListName
def list_slug(self, instance):
return instance.list.slug
class CommentAdmin(admin.ModelAdmin):
list_display = ["NewComment_ListName","NewComment_slug","ComOwner","ComContent"]
search_fields = ("ComOwner","NewComment__ListName","NewComment__slug",)
def NewComment_ListName(self, instance):
return instance.NewComment.ListName
def NewComment_slug(self, instance):
return instance.NewComment.slug
class FavoriteAdmin(admin.ModelAdmin):
list_display = ["user", "target_content_type", "target_object_id", "timestamp"]
list_select_related = True
search_fields = ("user__username", )
raw_id_fields = ("user", )
admin.site.register(List, ListAdmin)
admin.site.register(Item, ItemAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Favorite, FavoriteAdmin)
| soplerproject/sopler | core/admin.py | Python | agpl-3.0 | 1,431 |
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import operator
import pickle
import tempfile
import unittest
import warnings
import test.support
import test.string_tests
import test.buffer_tests
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def setUp(self):
self.warning_filters = warnings.filters[:]
def tearDown(self):
warnings.filters = self.warning_filters
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxsize])
self.assertRaises(IndexError, lambda: b[sys.maxsize+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxsize])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-1])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, bytearray, [Indexable(-1)])
self.assertRaises(ValueError, bytearray, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(bytearray(0), b'')
self.assertEqual(bytearray(1), b'\x00')
self.assertEqual(bytearray(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, bytearray, -1)
self.assertEqual(bytearray('0', 'ascii'), b'0')
self.assertEqual(bytearray(b'0'), b'0')
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxsize])
self.assertRaises(ValueError, self.type2test, [sys.maxsize+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.failUnless(b2 != b3)
self.failUnless(b1 <= b2)
self.failUnless(b1 <= b3)
self.failUnless(b1 < b3)
self.failUnless(b1 >= b2)
self.failUnless(b3 >= b2)
self.failUnless(b3 > b2)
self.failIf(b1 != b2)
self.failIf(b2 == b3)
self.failIf(b1 > b2)
self.failIf(b1 > b3)
self.failIf(b1 >= b3)
self.failIf(b1 < b2)
self.failIf(b3 < b2)
self.failIf(b3 <= b2)
def test_compare_to_str(self):
warnings.simplefilter('ignore', BytesWarning)
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc", False)
self.assertEqual(self.type2test() == str(), False)
self.assertEqual(self.type2test() != str(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = "Hello world\n\u1234\u5678\u9abc"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
b = self.type2test(sample, "latin1", "ignore")
self.assertEqual(b, self.type2test(sample[:-3], "utf-8"))
def test_decode(self):
sample = "Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = "Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + "def")
self.assertRaises(TypeError, lambda: "abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.failUnless(ord('a') in b)
self.failUnless(int(ord('a')) in b)
self.failIf(200 in b)
self.failIf(200 in b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: "a" in b)
for f in bytes, bytearray:
self.failUnless(f(b"") in b)
self.failUnless(f(b"a") in b)
self.failUnless(f(b"b") in b)
self.failUnless(f(b"c") in b)
self.failUnless(f(b"ab") in b)
self.failUnless(f(b"bc") in b)
self.failUnless(f(b"abc") in b)
self.failIf(f(b"ac") in b)
self.failIf(f(b"d") in b)
self.failIf(f(b"dab") in b)
self.failIf(f(b"abd") in b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEquals(self.type2test.fromhex(''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEquals(self.type2test.fromhex('1a2B30'), b)
self.assertEquals(self.type2test.fromhex(' 1A 2B 30 '), b)
self.assertEquals(self.type2test.fromhex('0000'), b'\0\0')
self.assertRaises(TypeError, self.type2test.fromhex, b'1B')
self.assertRaises(ValueError, self.type2test.fromhex, 'a')
self.assertRaises(ValueError, self.type2test.fromhex, 'rt')
self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, '\x00')
self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_index(self):
b = self.type2test(b'parrot')
self.assertEqual(b.index('p'), 0)
self.assertEqual(b.index('rr'), 2)
self.assertEqual(b.index('t'), 5)
self.assertRaises(ValueError, lambda: b.index('w'))
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, ' ')
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_maketrans(self):
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable)
self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def')
class BytesTest(BaseBytesTest):
type2test = bytes
def test_buffer_is_readonly(self):
fd = os.dup(sys.stdin.fileno())
with open(fd, "rb", buffering=0) as f:
self.assertRaises(TypeError, f.readinto, b"")
def test_custom(self):
class A:
def __bytes__(self):
return b'abc'
self.assertEqual(bytes(A()), b'abc')
class A: pass
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return None
self.assertRaises(TypeError, bytes, A())
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
self.assertEqual(list(b), list(sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEquals(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEquals(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.failUnless(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += ""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.failUnless(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.failUnless(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assert_(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assert_(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(int, orig * 25))
a.extend(int(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove('e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(b'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(OverflowError, lambda: bytearray().pop())
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(b'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assert_(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assert_(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEquals(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEquals(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEquals(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEquals(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEquals(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEquals(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEquals(b, orig)
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
def setUp(self):
self.warning_filters = warnings.filters[:]
def tearDown(self):
warnings.filters = self.warning_filters
def test_repr_str(self):
warnings.simplefilter('ignore', BytesWarning)
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.failUnless(bytearray.__doc__ != None)
self.failUnless(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.failUnless(bytes.__doc__ != None)
self.failUnless(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
def test_to_str(self):
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
def test_return_self(self):
# bytearray.replace must always return a new bytearray
b = bytearray()
self.failIf(b.replace(b'', b'') is b)
def test_compare(self):
if sys.flags.bytes_warning:
warnings.simplefilter('error', BytesWarning)
self.assertRaises(BytesWarning, operator.eq, b'', '')
self.assertRaises(BytesWarning, operator.ne, b'', '')
self.assertRaises(BytesWarning, operator.eq, bytearray(b''), '')
self.assertRaises(BytesWarning, operator.ne, bytearray(b''), '')
else:
# raise test.support.TestSkipped("BytesWarning is needed for this test: use -bb option")
pass
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super().fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class BytesAsStringTest(FixedStringTest):
type2test = bytes
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assert_(issubclass(ByteArraySubclass, bytearray))
self.assert_(isinstance(ByteArraySubclass(), bytearray))
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assert_(_a == _a)
self.assert_(_a != _b)
self.assert_(_a < _b)
self.assert_(_a <= _b)
self.assert_(_b >= _a)
self.assert_(_b > _a)
self.assert_(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assert_(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assert_(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
test.support.run_unittest(BytesTest)
test.support.run_unittest(ByteArrayTest)
test.support.run_unittest(AssortedBytesTest)
test.support.run_unittest(BytesAsStringTest)
test.support.run_unittest(ByteArrayAsStringTest)
test.support.run_unittest(ByteArraySubclassTest)
test.support.run_unittest(BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/test/test_bytes.py | Python | mit | 41,679 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2015-Today Litex Service Sp. z o.o.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_variant_creation | mwegrzynek/product_manual_variants | tests/__init__.py | Python | agpl-3.0 | 1,007 |
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from .views import *
urlpatterns = [
# Listado
url(r'^evaluacion-lista/', login_required(evaluacion_list), name='listar_evaluacion'),
# Evaluacion paso a paso
url(r'^generar/step1/$', login_required(evaluacion_step1), name='evaluacion_step1'),
url(r'^generar/step1/(?P<evaluacion_id>\d+)/$', login_required(evaluacion_step1_back), name='evaluacion_step1_back'),
url(r'^generar/step2/(?P<evaluacion_id>\d+)/$', login_required(evaluacion_step2), name='evaluacion_step2'),
url(r'^generar/step3/(?P<evaluacion_id>\d+)/$', login_required(evaluacion_step3), name='evaluacion_step3'),
url(r'^generar/step4/(?P<evaluacion_id>\d+)/$', login_required(evaluacion_step4), name='evaluacion_step4'),
# Evaluacion automatica
url(r'^automatica/step1/$', login_required(evaluacion_rapida_step1), name='evaluacion_rapida_step1'),
url(r'^automatica/step2/(?P<evaluacion_id>\d+)/$', login_required(evaluacion_rapida_step2), name='evaluacion_rapida_step2'),
# Detalle evaluacion
url(r'^detalle/(?P<evaluacion_id>\d+)/$', login_required(evaluacion_detail), name='evaluacion_detail'),
url(r'^descartar/(?P<evaluacion_id>\d+)/$', login_required(descartar_evaluacion), name='evaluacion_descartar'),
# Calificacion alumnos
url(r'^estudiantes-notas/(?P<evaluacion_id>\d+)/$', login_required(estudiante_calificacion), name='estudiante_calificacion'),
#excel
url(r'^calificacion/(?P<evaluacion_id>\d+)/$', login_required(calificaciones), name='calificacion'),
url(r'^agregar/calificaciones/(?P<evaluacion_id>\d+)/$', login_required(get_calificaciones), name='calificacionExcel'),
#PDF
url(r'^evaluacion-pdf/(?P<evaluacion_id>\d+)/$', login_required(evaluacion_pdf), name='evaluacionPDF'),
url(r'^solucion-pdf/(?P<evaluacion_id>\d+)/$', login_required(solucion_pdf), name='solucionPDF'),
# AJAX
url(r'^ContenidoFiltroAjax/$', login_required(ContenidoFiltroAjax.as_view()), name='ContenidoFiltroAjax'),
url(r'^PreguntaObjAjax/$', login_required(PreguntaObjAjax.as_view()), name='PreguntaObjAjax'),
url(r'^filtro/palabras/$', login_required(busqueda), name='busqueda_palabra'),
url(r'^PreguntaBusquedaAjax/$', login_required(PreguntaBusquedaAjax.as_view()), name='PreguntaBusquedaAjax'),
] | Mansilla1/Sistema-SEC | apps/evaluacion/urls.py | Python | apache-2.0 | 2,293 |
import requests
url = "http://186.249.34.34/api/consulta"
payload = "{\n \"codigoProduto\": \"11\",\n \"tipoConsumidor\": \"F\",\n \"documentoConsumidor\": \"42924057191\",\n \"telefoneConsultar\": {\n \"ddd\": \"16\",\n \"numero\": \"999999999\"\n },\n \"cepConsumidor\": \"14401-360\",\n \"cepOrigem\": \"14401-360\",\n \"codigoEstacaoConsultante\": \"123\"\n}"
headers = {
'content-type': "application/json",
'authorization': "Bearer eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJjb25zdWx0YUBlbnRlcnBsdWcuY29tLmJyIiwiZXhwIjoxNDcwODU3ODQwfQ.g7E2ABhES7yQXoIfpBTv30yDjal07EEI9i9Tu-d1Jjksxpv1UseaZpbjfqeNKF3pi_-xeX5ihN2EITPg184oaA",
'cache-control': "no-cache",
}
response = requests.request("POST", url, data=payload, headers=headers)
print(response.text)
| enterplug/consulta-enterplug | consulta-11/consulta-11-python-example/consulta-11-python-requests.py | Python | mit | 792 |
# --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Haozhi Qi
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import logging
import numpy as np
from mxnet import context as ctx
from mxnet import ndarray as nd
from mxnet.io import DataDesc
from mxnet.executor_manager import _split_input_slice
def _load_general(data, targets, major_axis):
"""Load a list of arrays into a list of arrays specified by slices"""
# data = data[0]
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
elif isinstance(d_src, (list, tuple)):
for src, dst in zip(d_src, d_targets):
src.copyto(dst)
else:
raise NotImplementedError
def _load_data(batch, targets, major_axis):
"""Load data into sliced arrays"""
_load_general(batch.data, targets, major_axis)
def _load_label(batch, targets, major_axis):
"""Load label into sliced arrays"""
_load_general(batch.label, targets, major_axis)
def _merge_multi_context(outputs, major_axis):
"""Merge outputs that lives on multiple context into one, so that they look
like living on one context.
"""
rets = []
for tensors, axis in zip(outputs, major_axis):
if axis >= 0:
rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
else:
# negative axis means the there is no batch_size axis, and all the
# results should be the same on each device. We simply take the
# first one, without checking they are actually the same
rets.append(tensors[0])
return rets
class DataParallelExecutorGroup(object):
"""DataParallelExecutorGroup is a group of executors that lives on a group of devices.
This is a helper class used to implement data parallelization. Each mini-batch will
be split and run on the devices.
Parameters
----------
symbol : Symbol
The common symbolic computation graph for all executors.
contexts : list
A list of contexts.
workload : list
If not `None`, could be a list of numbers that specify the workload to be assigned
to different context. Larger number indicate heavier workload.
data_shapes : list
Should be a list of (name, shape) tuples, for the shapes of data. Note the order is
important and should be the same as the order that the `DataIter` provide the data.
label_shapes : list
Should be a list of (name, shape) tuples, for the shapes of label. Note the order is
important and should be the same as the order that the `DataIter` provide the label.
param_names : list
A list of strings, indicating the names of parameters (e.g. weights, filters, etc.)
in the computation graph.
for_training : bool
Indicate whether the executors should be bind for training. When not doing training,
the memory for gradients will not be allocated.
inputs_need_grad : bool
Indicate whether the gradients for the input data should be computed. This is currently
not used. It will be useful for implementing composition of modules.
shared_group : DataParallelExecutorGroup
Default is `None`. This is used in bucketing. When not `None`, it should be a executor
group corresponding to a different bucket. In other words, it will correspond to a different
symbol but with the same set of parameters (e.g. unrolled RNNs with different lengths).
In this case, many memory will be shared.
logger : Logger
Default is `logging`.
fixed_param_names: list of str
Indicate parameters to be fixed during training. Parameters in this list will not allocate
space for gradient, nor do gradient calculation.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
def __init__(self, symbol, contexts, workload, data_shapes, label_shapes, param_names,
for_training, inputs_need_grad, shared_group=None, logger=logging,
fixed_param_names=None, grad_req='write', state_names=None):
self.param_names = param_names
self.arg_names = symbol.list_arguments()
self.aux_names = symbol.list_auxiliary_states()
self.symbol = symbol
self.contexts = contexts
self.workload = workload
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.logger = logger
# self._total_exec_bytes = 0
self.fixed_param_names = fixed_param_names
if self.fixed_param_names is None:
self.fixed_param_names = []
self.state_names = state_names
if self.state_names is None:
self.state_names = []
if not for_training:
grad_req = 'null'
data_names = [x.name for x in data_shapes[0]]
if isinstance(grad_req, str):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else grad_req
elif k in data_names:
self.grad_req[k] = grad_req if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
elif isinstance(grad_req, (list, tuple)):
assert len(grad_req) == len(self.arg_names)
self.grad_req = dict(zip(self.arg_names, grad_req))
elif isinstance(grad_req, dict):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else 'write'
elif k in data_names:
self.grad_req[k] = 'write' if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
self.grad_req.update(grad_req)
else:
raise ValueError("grad_req must be one of str, list, tuple, or dict.")
if shared_group is not None:
self.shared_data_arrays = shared_group.shared_data_arrays
else:
self.shared_data_arrays = [{} for _ in contexts]
# initialize some instance variables
self.batch_size = len(data_shapes)
self.slices = None
self.execs = []
self._default_execs = None
self.data_arrays = None
self.label_arrays = None
self.param_arrays = None
self.state_arrays = None
self.grad_arrays = None
self.aux_arrays = None
self.input_grad_arrays = None
self.data_shapes = None
self.label_shapes = None
self.data_layouts = None
self.label_layouts = None
self.output_layouts = [DataDesc.get_batch_axis(self.symbol[name].attr('__layout__'))
for name in self.symbol.list_outputs()]
self.bind_exec(data_shapes, label_shapes, shared_group)
def decide_slices(self, data_shapes):
"""Decide the slices for each context according to the workload.
Parameters
----------
data_shapes : list
list of (name, shape) specifying the shapes for the input data or label.
"""
assert len(data_shapes) > 0
major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes]
for (name, shape), axis in zip(data_shapes, major_axis):
if axis == -1:
continue
batch_size = shape[axis]
if self.batch_size is not None:
assert batch_size == self.batch_size, ("all data must have the same batch size: "
+ ("batch_size = %d, but " % self.batch_size)
+ ("%s has shape %s" % (name, shape)))
else:
self.batch_size = batch_size
self.slices = _split_input_slice(self.batch_size, self.workload)
return major_axis
def _collect_arrays(self):
"""Collect internal arrays from executors."""
# convenient data structures
# self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)]
# for name, _ in self.data_shapes]
self.data_arrays = [[e.arg_dict[name] for name, _ in self.data_shapes[0]] for e in self.execs]
self.state_arrays = [[e.arg_dict[name] for e in self.execs]
for name in self.state_names]
if self.label_shapes is not None:
# self.label_arrays = [[(self.slices[i], e.arg_dict[name])
# for i, e in enumerate(self.execs)]
# for name, _ in self.label_shapes]
self.label_arrays = [[e.arg_dict[name] for name, _ in self.label_shapes[0]] for e in self.execs]
else:
self.label_arrays = None
self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
if self.for_training:
self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
else:
self.grad_arrays = None
data_names = [x[0] for x in self.data_shapes]
if self.inputs_need_grad:
self.input_grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in data_names]
else:
self.input_grad_arrays = None
self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs]
for i in range(len(self.aux_names))]
def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False):
"""Bind executors on their respective devices.
Parameters
----------
data_shapes : list
label_shapes : list
shared_group : DataParallelExecutorGroup
reshape : bool
"""
assert reshape or not self.execs
# self.batch_size = None
# calculate workload and bind executors
# self.data_layouts = self.decide_slices(data_shapes)
# if label_shapes is not None:
# # call it to make sure labels has the same batch size as data
# self.label_layouts = self.decide_slices(label_shapes)
for i in range(len(self.contexts)):
# data_shapes_i = self._sliced_shape(data_shapes, i, self.data_layouts)
data_shapes_i = data_shapes[i]
if label_shapes is not None:
label_shapes_i = label_shapes[i]
# label_shapes_i = self._sliced_shape(label_shapes, i, self.label_layouts)
else:
label_shapes_i = []
if reshape:
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes_i + label_shapes_i))
else:
self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
shared_group))
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self._collect_arrays()
def reshape(self, data_shapes, label_shapes):
"""Reshape executors.
Parameters
----------
data_shapes : list
label_shapes : list
"""
# if data_shapes == self.data_shapes and label_shapes == self.label_shapes:
# return
# if self._default_execs is None:
# self._default_execs = [i for i in self.execs]
# self.bind_exec(data_shapes, label_shapes, reshape=True)
if self._default_execs is None:
self._default_execs = [i for i in self.execs]
for i in range(len(self.contexts)):
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes[i] + (label_shapes[i] if label_shapes is not None else []))
)
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self._collect_arrays()
def set_params(self, arg_params, aux_params):
"""Assign, i.e. copy parameters to all the executors.
Parameters
----------
arg_params : dict
A dictionary of name to `NDArray` parameter mapping.
aux_params : dict
A dictionary of name to `NDArray` auxiliary variable mapping.
"""
for exec_ in self.execs:
exec_.copy_params_from(arg_params, aux_params)
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
target parameter arrays
aux_params : list of NDArray
target aux arrays
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
"""
_load_data(data_batch, self.data_arrays, self.data_layouts)
if is_train is None:
is_train = self.for_training
if self.label_arrays is not None:
assert not is_train or data_batch.label
if data_batch.label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
for exec_ in self.execs:
exec_.forward(is_train=is_train)
# def get_output_shapes(self):
# """Get the shapes of the outputs."""
# outputs = self.execs[0].outputs
# shapes = [out.shape for out in outputs]
#
# concat_shapes = []
# for key, the_shape, axis in zip(self.symbol.list_outputs(), shapes, self.output_layouts):
# the_shape = list(the_shape)
# if axis >= 0:
# the_shape[axis] = self.batch_size
# concat_shapes.append((key, tuple(the_shape)))
# return concat_shapes
def get_outputs(self, merge_multi_context=True):
"""Get outputs of the previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(len(self.execs[0].outputs))]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs
def get_states(self, merge_multi_context=True):
"""Get states from all devices
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
assert not merge_multi_context, \
"merge_multi_context=True is not supported for get_states yet."
return self.state_arrays
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
"""
if states is not None:
assert value is None, "Only one of states & value can be specified."
_load_general(states, self.state_arrays, (0,)*len(states))
else:
assert value is not None, "At least one of states & value must be specified."
assert states is None, "Only one of states & value can be specified."
for d_dst in self.state_arrays:
for dst in d_dst:
dst[:] = value
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it
is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
`self.for_training` is `True`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
# for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
for i, exec_ in enumerate(self.execs):
out_grads_slice = []
# for grad, axis in zip(out_grads, self.output_layouts):
# if axis >= 0:
# # pylint: disable=no-member
# og_my_slice = nd.slice_axis(grad, axis=axis, begin=islice.start,
# end=islice.stop)
# # pylint: enable=no-member
# out_grads_slice.append(og_my_slice.as_in_context(self.contexts[i]))
# else:
# out_grads_slice.append(grad.copyto(self.contexts[i]))
#
exec_.backward(out_grads=out_grads_slice)
def update_metric(self, eval_metric, labels):
"""Accumulate the performance according to `eval_metric` on all devices.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
"""
for texec, labels in zip(self.execs, labels):
# labels_slice = []
# for label, axis in zip(labels, self.label_layouts):
# if axis == 0:
# # slicing NDArray along axis 0 can avoid copying
# labels_slice.append(label[islice])
# elif axis > 0:
# # pylint: disable=no-member
# label_my_slice = nd.slice_axis(label, axis=axis, begin=islice.start,
# end=islice.stop).as_in_context(label.context)
# # pylint: enable=no-member
# labels_slice.append(label_my_slice)
# else:
# labels_slice.append(label)
eval_metric.update(labels, texec.outputs)
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None, "shape inference failed"
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
arg_types, _, aux_types = self.symbol.infer_type(**input_types)
assert arg_types is not None, "type inference failed"
arg_arrays = []
grad_arrays = {} if self.for_training else None
def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
"""Internal helper to get a memory block or re-use by re-shaping"""
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape):
# nice, we can directly re-use this data blob
assert arg_arr.dtype == arg_type
arg_arr = arg_arr.reshape(arg_shape)
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape)) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to') +
(' be the bucket taking the largest input for better ') +
('memory sharing.'))
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
return arg_arr
# create or borrow arguments and gradients
for j in range(len(self.arg_names)):
name = self.arg_names[j]
if name in self.param_names: # model parameters
if shared_exec is None:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
if self.grad_req[name] != 'null':
grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
grad_arrays[name] = grad_arr
else:
arg_arr = shared_exec.arg_dict[name]
assert arg_arr.shape == arg_shapes[j]
assert arg_arr.dtype == arg_types[j]
if self.grad_req[name] != 'null':
grad_arrays[name] = shared_exec.grad_dict[name]
else: # data, label, or states
arg_arr = _get_or_reshape(name, shared_data_arrays, arg_shapes[j], arg_types[j],
context, self.logger)
# data might also need grad if inputs_need_grad is True
if self.grad_req[name] != 'null':
grad_arrays[name] = _get_or_reshape('grad of ' + name, shared_data_arrays,
arg_shapes[j], arg_types[j], context,
self.logger)
arg_arrays.append(arg_arr)
# create or borrow aux variables
if shared_exec is None:
aux_arrays = [nd.zeros(s, context, dtype=t) for s, t in zip(aux_shapes, aux_types)]
else:
for j, arr in enumerate(shared_exec.aux_arrays):
assert aux_shapes[j] == arr.shape
assert aux_types[j] == arr.dtype
aux_arrays = shared_exec.aux_arrays[:]
executor = self.symbol.bind(ctx=context, args=arg_arrays,
args_grad=grad_arrays, aux_states=aux_arrays,
grad_req=self.grad_req, shared_exec=shared_exec)
# Get the total bytes allocated for this executor
# self._total_exec_bytes += int(executor.debug_str().split('\n')[-3].split()[1])
return executor
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes
def install_monitor(self, mon):
"""Install monitor on all executors"""
for exe in self.execs:
mon.install(exe)
| msracver/FCIS | fcis/core/DataParallelExecutorGroup.py | Python | mit | 27,490 |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from cm_api.endpoints.clusters import ApiCluster
from cm_api.endpoints.services import *
from cm_api.endpoints.types import *
from cm_api_tests import utils
class TestService(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self.resource = utils.MockResource(self)
self.service = ApiService(self.resource, 'hdfs1', 'HDFS')
self.service.__dict__['clusterRef'] = \
ApiClusterRef(self.resource, clusterName='cluster1')
def test_create_hdfs_tmp(self):
self.resource.expect("POST", "/clusters/cluster1/services/hdfs1/commands/hdfsCreateTmpDir",
retdata=ApiCommand(self.resource).to_json_dict())
self.service.create_hdfs_tmp()
def test_role_cmd(self):
args = ['role1', 'role2']
expected = ApiBulkCommandList([ApiCommand(self.resource)])
expected.__dict__['errors'] = [ 'err1', 'err2' ]
self.resource.expect("POST", "/clusters/cluster1/services/hdfs1/roleCommands/start",
data=ApiList(args),
retdata=expected.to_json_dict(True))
ret = self.service.start_roles(*args)
self.assertEqual(1, len(ret))
self.assertEqual(expected.errors, ret.errors)
| kostin88/cm_api | python/src/cm_api_tests/test_services.py | Python | apache-2.0 | 1,965 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file tests that we can learn and predict the particularly vexing case of a
single constant signal!
"""
import numpy as np
import unittest2 as unittest
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
_SEED = 42
VERBOSITY = 1
np.random.seed(_SEED)
def _printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def _getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector. These patterns
are used as elements of sequences when building up a training set."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = np.zeros(numCols, dtype='float32')
x[i*numOnes:(i + 1)*numOnes] = 1
p.append(x)
return p
def _createTms(numCols):
"""Create two instances of temporal poolers (backtracking_tm.py
and backtracking_tm_cpp.py) with identical parameter settings."""
# Keep these fixed:
minThreshold = 4
activationThreshold = 5
newSynapseCount = 7
initialPerm = 0.3
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.05
globalDecay = 0
cellsPerColumn = 1
cppTm = BacktrackingTMCPP(numberOfCols=numCols,
cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm,
connectedPerm=connectedPerm,
minThreshold=minThreshold,
newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc,
permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=_SEED, verbosity=VERBOSITY,
checkSynapseConsistency=True,
pamLength=1000)
# Ensure we are copying over learning states for TMDiff
cppTm.retrieveLearningStates = True
pyTm = BacktrackingTM(numberOfCols=numCols,
cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm,
connectedPerm=connectedPerm,
minThreshold=minThreshold,
newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc,
permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=_SEED, verbosity=VERBOSITY,
pamLength=1000)
return cppTm, pyTm
class TMConstantTest(unittest.TestCase):
def setUp(self):
self.cppTm, self.pyTm = _createTms(100)
def _basicTest(self, tm=None):
"""Test creation, pickling, and basic run of learning and inference."""
trainingSet = _getSimplePatterns(10, 10)
# Learn on several constant sequences, with a reset in between
for _ in range(2):
for seq in trainingSet[0:5]:
for _ in range(10):
tm.learn(seq)
tm.reset()
print "Learning completed"
# Infer
print "Running inference"
tm.collectStats = True
for seq in trainingSet[0:5]:
tm.reset()
tm.resetStats()
for _ in range(10):
tm.infer(seq)
if VERBOSITY > 1 :
print
_printOneTrainingVector(seq)
tm.printStates(False, False)
print
print
if VERBOSITY > 1:
print tm.getStats()
# Ensure our predictions are accurate for each sequence
self.assertGreater(tm.getStats()['predictionScoreAvg2'], 0.8)
print ("tm.getStats()['predictionScoreAvg2'] = ",
tm.getStats()['predictionScoreAvg2'])
print "TMConstant basicTest ok"
def testCppTmBasic(self):
self._basicTest(self.cppTm)
def testPyTmBasic(self):
self._basicTest(self.pyTm)
def testIdenticalTms(self):
self.assertTrue(fdrutils.tmDiff2(self.cppTm, self.pyTm))
if __name__=="__main__":
unittest.main()
| ywcui1990/nupic | tests/unit/nupic/algorithms/backtracking_tm_constant_test.py | Python | agpl-3.0 | 5,242 |
# importing libraries:
from maya import cmds
from . import dpBaseControlClass
from importlib import reload
reload(dpBaseControlClass)
# global variables to this module:
CLASS_NAME = "LocatorFlat"
TITLE = "m133_locatorFlat"
DESCRIPTION = "m099_cvControlDesc"
ICON = "/Icons/dp_locatorFlat.png"
dpLocatorFlatVersion = 1.2
class LocatorFlat(dpBaseControlClass.ControlStartClass):
def __init__(self, *args, **kwargs):
#Add the needed parameter to the kwargs dict to be able to maintain the parameter order
kwargs["CLASS_NAME"] = CLASS_NAME
kwargs["TITLE"] = TITLE
kwargs["DESCRIPTION"] = DESCRIPTION
kwargs["ICON"] = ICON
dpBaseControlClass.ControlStartClass.__init__(self, *args, **kwargs)
def cvMain(self, useUI, cvID=None, cvName=CLASS_NAME+'_Ctrl', cvSize=1.0, cvDegree=1, cvDirection='+Y', cvRot=(0, 0, 0), cvAction=1, dpGuide=False, *args):
""" The principal method to call all other methods in order to build the cvControl curve.
Return the result: new control curve or the destination list depending of action.
"""
result = self.cvCreate(useUI, cvID, cvName, cvSize, cvDegree, cvDirection, cvRot, cvAction, dpGuide)
return result
def getLinearPoints(self, *args):
""" Get a list of linear points for this kind of control curve.
Set class object variables cvPointList, cvKnotList and cvPeriodic.
"""
r = self.cvSize
self.cvPointList = [(r, 0, 0), (-r, 0, 0), (0, 0, 0), (0, r, 0), (0, -r, 0)]
self.cvKnotList = [1, 2, 3, 4, 5]
self.cvPeriodic = False #open
def getCubicPoints(self, *args):
""" Get a list of cubic points for this kind of control curve.
Set class object variables cvPointList, cvKnotList and cvPeriodic.
"""
r = self.cvSize
self.cvPointList = [(r, 0, 0), (r, 0, 0), (0.5*r, 0, 0), (0, 0, 0), (0, 0.5*r, 0),
(0, r, 0), (0, r, 0), (0, 0.5*r, 0), (0, 0, 0), (-0.5*r, 0, 0),
(-r, 0, 0), (-r, 0, 0), (-0.5*r, 0, 0), (0, 0, 0), (0, -0.5*r, 0),
(0, -r, 0), (0, -r, 0), (0, -0.5*r, 0), (0, 0, 0), (0.5*r, 0, 0),
(r, 0, 0), (r, 0, 0)]
self.cvKnotList = [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
self.cvPeriodic = False #open | nilouco/dpAutoRigSystem | dpAutoRigSystem/Controls/dpLocatorFlat.py | Python | gpl-2.0 | 2,457 |
#!/usr/bin/env python
import os
from tempfile import NamedTemporaryFile
from snakemake.shell import shell
# All wrappers must be able to handle an optional params.extra.
extra = snakemake.params.get('extra', '')
# This lets us handle whether to write to a log file or to write to stdout.
# See snakemake.script.log_fmt_shell for details.
if snakemake.log:
snakemake.log = os.path.realpath(str(snakemake.log))
log = snakemake.log_fmt_shell(stdout=False)
# Get directories that I need to move between
cwd = os.getcwd()
tmpdir = os.getenv('TMPDIR')
# Copy files over to ease I/O on filesystem.
bam = NamedTemporaryFile(suffix='.bam').name
bed = NamedTemporaryFile(suffix='.bed').name
name = bam.rstrip('.bam')
shell(
'cp {snakemake.input.bam} {bam} '
'&& cp {snakemake.input.bed} {bed}')
os.chdir(tmpdir)
shell(
'infer_experiment.py '
'-i {bam} '
'-r {bed} '
'{extra} '
'> {name}.txt '
'{log}')
# Cleanup 1
shell(
'rm {bam} '
'&& rm {bed}')
# Move outputs
os.chdir(cwd)
shell(
'cp {name}.txt {snakemake.output.txt} '
'&& rm {name}.txt')
| lcdb/lcdb-wrapper-tests | wrappers/rseqc/infer_experiment/wrapper.py | Python | mit | 1,096 |
"""IDLE Configuration Dialog: support user customization of IDLE by GUI
Customize font faces, sizes, and colorization attributes. Set indentation
defaults. Customize keybindings. Colorization and keybindings can be
saved as user defined sets. Select startup options including shell/editor
and default window size. Define additional help sources.
Note that tab width in IDLE is currently fixed at eight due to Tk issues.
Refer to comments in EditorWindow autoindent code for details.
"""
from tkinter import *
import tkinter.messagebox as tkMessageBox
import tkinter.colorchooser as tkColorChooser
import tkinter.font as tkFont
import copy
from idlelib.configHandler import idleConf
from idlelib.dynOptionMenuWidget import DynOptionMenu
from idlelib.tabbedpages import TabbedPageSet
from idlelib.keybindingDialog import GetKeysDialog
from idlelib.configSectionNameDialog import GetCfgSectionNameDialog
from idlelib.configHelpSourceEdit import GetHelpSourceDialog
from idlelib import macosxSupport
class ConfigDialog(Toplevel):
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.wm_withdraw()
self.configure(borderwidth=5)
self.title('IDLE Preferences')
self.geometry("+%d+%d" % (parent.winfo_rootx()+20,
parent.winfo_rooty()+30))
#Theme Elements. Each theme element key is its display name.
#The first value of the tuple is the sample area tag name.
#The second value is the display name list sort index.
self.themeElements={'Normal Text':('normal','00'),
'Python Keywords':('keyword','01'),
'Python Definitions':('definition','02'),
'Python Builtins':('builtin', '03'),
'Python Comments':('comment','04'),
'Python Strings':('string','05'),
'Selected Text':('hilite','06'),
'Found Text':('hit','07'),
'Cursor':('cursor','08'),
'Error Text':('error','09'),
'Shell Normal Text':('console','10'),
'Shell Stdout Text':('stdout','11'),
'Shell Stderr Text':('stderr','12'),
}
self.ResetChangedItems() #load initial values in changed items dict
self.CreateWidgets()
self.resizable(height=FALSE,width=FALSE)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.tabPages.focus_set()
#key bindings for this dialog
#self.bind('<Escape>',self.Cancel) #dismiss dialog, no save
#self.bind('<Alt-a>',self.Apply) #apply changes, save
#self.bind('<F1>',self.Help) #context help
self.LoadConfigs()
self.AttachVarCallbacks() #avoid callbacks during LoadConfigs
self.wm_deiconify()
self.wait_window()
def CreateWidgets(self):
self.tabPages = TabbedPageSet(self,
page_names=['Fonts/Tabs','Highlighting','Keys','General'])
frameActionButtons = Frame(self,pady=2)
#action buttons
if macosxSupport.isAquaTk():
# Surpress the padx and pady arguments when
# running as IDLE.app, otherwise the text
# on these buttons will not be readable.
extraKwds={}
else:
extraKwds=dict(padx=6, pady=3)
# Comment out button creation and packing until implement self.Help
## self.buttonHelp = Button(frameActionButtons,text='Help',
## command=self.Help,takefocus=FALSE,
## **extraKwds)
self.buttonOk = Button(frameActionButtons,text='Ok',
command=self.Ok,takefocus=FALSE,
**extraKwds)
self.buttonApply = Button(frameActionButtons,text='Apply',
command=self.Apply,takefocus=FALSE,
**extraKwds)
self.buttonCancel = Button(frameActionButtons,text='Cancel',
command=self.Cancel,takefocus=FALSE,
**extraKwds)
self.CreatePageFontTab()
self.CreatePageHighlight()
self.CreatePageKeys()
self.CreatePageGeneral()
## self.buttonHelp.pack(side=RIGHT,padx=5)
self.buttonOk.pack(side=LEFT,padx=5)
self.buttonApply.pack(side=LEFT,padx=5)
self.buttonCancel.pack(side=LEFT,padx=5)
frameActionButtons.pack(side=BOTTOM)
Frame(self, height=2, borderwidth=0).pack(side=BOTTOM)
self.tabPages.pack(side=TOP,expand=TRUE,fill=BOTH)
def CreatePageFontTab(self):
#tkVars
self.fontSize=StringVar(self)
self.fontBold=BooleanVar(self)
self.fontName=StringVar(self)
self.spaceNum=IntVar(self)
self.editFont=tkFont.Font(self,('courier',10,'normal'))
##widget creation
#body frame
frame=self.tabPages.pages['Fonts/Tabs'].frame
#body section frames
frameFont=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Base Editor Font ')
frameIndent=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Indentation Width ')
#frameFont
frameFontName=Frame(frameFont)
frameFontParam=Frame(frameFont)
labelFontNameTitle=Label(frameFontName,justify=LEFT,
text='Font Face :')
self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE,
exportselection=FALSE)
self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease)
scrollFont=Scrollbar(frameFontName)
scrollFont.config(command=self.listFontName.yview)
self.listFontName.config(yscrollcommand=scrollFont.set)
labelFontSizeTitle=Label(frameFontParam,text='Size :')
self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None,
command=self.SetFontSample)
checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold,
onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample)
frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1)
self.labelFontSample=Label(frameFontSample,
text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]',
justify=LEFT,font=self.editFont)
#frameIndent
frameIndentSize=Frame(frameIndent)
labelSpaceNumTitle=Label(frameIndentSize, justify=LEFT,
text='Python Standard: 4 Spaces!')
self.scaleSpaceNum=Scale(frameIndentSize, variable=self.spaceNum,
orient='horizontal',
tickinterval=2, from_=2, to=16)
#widget packing
#body
frameFont.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameIndent.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameFont
frameFontName.pack(side=TOP,padx=5,pady=5,fill=X)
frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X)
labelFontNameTitle.pack(side=TOP,anchor=W)
self.listFontName.pack(side=LEFT,expand=TRUE,fill=X)
scrollFont.pack(side=LEFT,fill=Y)
labelFontSizeTitle.pack(side=LEFT,anchor=W)
self.optMenuFontSize.pack(side=LEFT,anchor=W)
checkFontBold.pack(side=LEFT,anchor=W,padx=20)
frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
self.labelFontSample.pack(expand=TRUE,fill=BOTH)
#frameIndent
frameIndentSize.pack(side=TOP,fill=X)
labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5)
self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X)
return frame
def CreatePageHighlight(self):
self.builtinTheme=StringVar(self)
self.customTheme=StringVar(self)
self.fgHilite=BooleanVar(self)
self.colour=StringVar(self)
self.fontName=StringVar(self)
self.themeIsBuiltin=BooleanVar(self)
self.highlightTarget=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Highlighting'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Highlighting ')
frameTheme=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Highlighting Theme ')
#frameCustom
self.textHighlightSample=Text(frameCustom,relief=SOLID,borderwidth=1,
font=('courier',12,''),cursor='hand2',width=21,height=11,
takefocus=FALSE,highlightthickness=0,wrap=NONE)
text=self.textHighlightSample
text.bind('<Double-Button-1>',lambda e: 'break')
text.bind('<B1-Motion>',lambda e: 'break')
textAndTags=(('#you can click here','comment'),('\n','normal'),
('#to choose items','comment'),('\n','normal'),('def','keyword'),
(' ','normal'),('func','definition'),('(param):','normal'),
('\n ','normal'),('"""string"""','string'),('\n var0 = ','normal'),
("'string'",'string'),('\n var1 = ','normal'),("'selected'",'hilite'),
('\n var2 = ','normal'),("'found'",'hit'),
('\n var3 = ','normal'),('list', 'builtin'), ('(','normal'),
('None', 'keyword'),(')\n\n','normal'),
(' error ','error'),(' ','normal'),('cursor |','cursor'),
('\n ','normal'),('shell','console'),(' ','normal'),('stdout','stdout'),
(' ','normal'),('stderr','stderr'),('\n','normal'))
for txTa in textAndTags:
text.insert(END,txTa[0],txTa[1])
for element in self.themeElements:
text.tag_bind(self.themeElements[element][0],'<ButtonPress-1>',
lambda event,elem=element: event.widget.winfo_toplevel()
.highlightTarget.set(elem))
text.config(state=DISABLED)
self.frameColourSet=Frame(frameCustom,relief=SOLID,borderwidth=1)
frameFgBg=Frame(frameCustom)
buttonSetColour=Button(self.frameColourSet,text='Choose Colour for :',
command=self.GetColour,highlightthickness=0)
self.optMenuHighlightTarget=DynOptionMenu(self.frameColourSet,
self.highlightTarget,None,highlightthickness=0)#,command=self.SetHighlightTargetBinding
self.radioFg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=1,text='Foreground',command=self.SetColourSampleBinding)
self.radioBg=Radiobutton(frameFgBg,variable=self.fgHilite,
value=0,text='Background',command=self.SetColourSampleBinding)
self.fgHilite.set(1)
buttonSaveCustomTheme=Button(frameCustom,
text='Save as New Custom Theme',command=self.SaveAsNewTheme)
#frameTheme
labelTypeTitle=Label(frameTheme,text='Select : ')
self.radioThemeBuiltin=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=1,command=self.SetThemeType,text='a Built-in Theme')
self.radioThemeCustom=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
value=0,command=self.SetThemeType,text='a Custom Theme')
self.optMenuThemeBuiltin=DynOptionMenu(frameTheme,
self.builtinTheme,None,command=None)
self.optMenuThemeCustom=DynOptionMenu(frameTheme,
self.customTheme,None,command=None)
self.buttonDeleteCustomTheme=Button(frameTheme,text='Delete Custom Theme',
command=self.DeleteCustomTheme)
##widget packing
#body
frameCustom.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameTheme.pack(side=LEFT,padx=5,pady=5,fill=Y)
#frameCustom
self.frameColourSet.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=X)
frameFgBg.pack(side=TOP,padx=5,pady=0)
self.textHighlightSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,
fill=BOTH)
buttonSetColour.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=4)
self.optMenuHighlightTarget.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=3)
self.radioFg.pack(side=LEFT,anchor=E)
self.radioBg.pack(side=RIGHT,anchor=W)
buttonSaveCustomTheme.pack(side=BOTTOM,fill=X,padx=5,pady=5)
#frameTheme
labelTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
self.radioThemeBuiltin.pack(side=TOP,anchor=W,padx=5)
self.radioThemeCustom.pack(side=TOP,anchor=W,padx=5,pady=2)
self.optMenuThemeBuiltin.pack(side=TOP,fill=X,padx=5,pady=5)
self.optMenuThemeCustom.pack(side=TOP,fill=X,anchor=W,padx=5,pady=5)
self.buttonDeleteCustomTheme.pack(side=TOP,fill=X,padx=5,pady=5)
return frame
def CreatePageKeys(self):
#tkVars
self.bindingTarget=StringVar(self)
self.builtinKeys=StringVar(self)
self.customKeys=StringVar(self)
self.keysAreBuiltin=BooleanVar(self)
self.keyBinding=StringVar(self)
##widget creation
#body frame
frame=self.tabPages.pages['Keys'].frame
#body section frames
frameCustom=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Custom Key Bindings ')
frameKeySets=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Key Set ')
#frameCustom
frameTarget=Frame(frameCustom)
labelTargetTitle=Label(frameTarget,text='Action - Key(s)')
scrollTargetY=Scrollbar(frameTarget)
scrollTargetX=Scrollbar(frameTarget,orient=HORIZONTAL)
self.listBindings=Listbox(frameTarget,takefocus=FALSE,
exportselection=FALSE)
self.listBindings.bind('<ButtonRelease-1>',self.KeyBindingSelected)
scrollTargetY.config(command=self.listBindings.yview)
scrollTargetX.config(command=self.listBindings.xview)
self.listBindings.config(yscrollcommand=scrollTargetY.set)
self.listBindings.config(xscrollcommand=scrollTargetX.set)
self.buttonNewKeys=Button(frameCustom,text='Get New Keys for Selection',
command=self.GetNewKeys,state=DISABLED)
#frameKeySets
frames = [Frame(frameKeySets, padx=2, pady=2, borderwidth=0)
for i in range(2)]
self.radioKeysBuiltin=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=1,command=self.SetKeysType,text='Use a Built-in Key Set')
self.radioKeysCustom=Radiobutton(frames[0],variable=self.keysAreBuiltin,
value=0,command=self.SetKeysType,text='Use a Custom Key Set')
self.optMenuKeysBuiltin=DynOptionMenu(frames[0],
self.builtinKeys,None,command=None)
self.optMenuKeysCustom=DynOptionMenu(frames[0],
self.customKeys,None,command=None)
self.buttonDeleteCustomKeys=Button(frames[1],text='Delete Custom Key Set',
command=self.DeleteCustomKeys)
buttonSaveCustomKeys=Button(frames[1],
text='Save as New Custom Key Set',command=self.SaveAsNewKeySet)
##widget packing
#body
frameCustom.pack(side=BOTTOM,padx=5,pady=5,expand=TRUE,fill=BOTH)
frameKeySets.pack(side=BOTTOM,padx=5,pady=5,fill=BOTH)
#frameCustom
self.buttonNewKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5)
frameTarget.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frame target
frameTarget.columnconfigure(0,weight=1)
frameTarget.rowconfigure(1,weight=1)
labelTargetTitle.grid(row=0,column=0,columnspan=2,sticky=W)
self.listBindings.grid(row=1,column=0,sticky=NSEW)
scrollTargetY.grid(row=1,column=1,sticky=NS)
scrollTargetX.grid(row=2,column=0,sticky=EW)
#frameKeySets
self.radioKeysBuiltin.grid(row=0, column=0, sticky=W+NS)
self.radioKeysCustom.grid(row=1, column=0, sticky=W+NS)
self.optMenuKeysBuiltin.grid(row=0, column=1, sticky=NSEW)
self.optMenuKeysCustom.grid(row=1, column=1, sticky=NSEW)
self.buttonDeleteCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
buttonSaveCustomKeys.pack(side=LEFT,fill=X,expand=True,padx=2)
frames[0].pack(side=TOP, fill=BOTH, expand=True)
frames[1].pack(side=TOP, fill=X, expand=True, pady=2)
return frame
def CreatePageGeneral(self):
#tkVars
self.winWidth=StringVar(self)
self.winHeight=StringVar(self)
self.paraWidth=StringVar(self)
self.startupEdit=IntVar(self)
self.autoSave=IntVar(self)
self.encoding=StringVar(self)
self.userHelpBrowser=BooleanVar(self)
self.helpBrowser=StringVar(self)
#widget creation
#body
frame=self.tabPages.pages['General'].frame
#body section frames
frameRun=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Startup Preferences ')
frameSave=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Autosave Preferences ')
frameWinSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameParaSize=Frame(frame,borderwidth=2,relief=GROOVE)
frameHelp=LabelFrame(frame,borderwidth=2,relief=GROOVE,
text=' Additional Help Sources ')
#frameRun
labelRunChoiceTitle=Label(frameRun,text='At Startup')
radioStartupEdit=Radiobutton(frameRun,variable=self.startupEdit,
value=1,command=self.SetKeysType,text="Open Edit Window")
radioStartupShell=Radiobutton(frameRun,variable=self.startupEdit,
value=0,command=self.SetKeysType,text='Open Shell Window')
#frameSave
labelRunSaveTitle=Label(frameSave,text='At Start of Run (F5) ')
radioSaveAsk=Radiobutton(frameSave,variable=self.autoSave,
value=0,command=self.SetKeysType,text="Prompt to Save")
radioSaveAuto=Radiobutton(frameSave,variable=self.autoSave,
value=1,command=self.SetKeysType,text='No Prompt')
#frameWinSize
labelWinSizeTitle=Label(frameWinSize,text='Initial Window Size'+
' (in characters)')
labelWinWidthTitle=Label(frameWinSize,text='Width')
entryWinWidth=Entry(frameWinSize,textvariable=self.winWidth,
width=3)
labelWinHeightTitle=Label(frameWinSize,text='Height')
entryWinHeight=Entry(frameWinSize,textvariable=self.winHeight,
width=3)
#paragraphFormatWidth
labelParaWidthTitle=Label(frameParaSize,text='Paragraph reformat'+
' width (in characters)')
entryParaWidth=Entry(frameParaSize,textvariable=self.paraWidth,
width=3)
#frameHelp
frameHelpList=Frame(frameHelp)
frameHelpListButtons=Frame(frameHelpList)
scrollHelpList=Scrollbar(frameHelpList)
self.listHelp=Listbox(frameHelpList,height=5,takefocus=FALSE,
exportselection=FALSE)
scrollHelpList.config(command=self.listHelp.yview)
self.listHelp.config(yscrollcommand=scrollHelpList.set)
self.listHelp.bind('<ButtonRelease-1>',self.HelpSourceSelected)
self.buttonHelpListEdit=Button(frameHelpListButtons,text='Edit',
state=DISABLED,width=8,command=self.HelpListItemEdit)
self.buttonHelpListAdd=Button(frameHelpListButtons,text='Add',
width=8,command=self.HelpListItemAdd)
self.buttonHelpListRemove=Button(frameHelpListButtons,text='Remove',
state=DISABLED,width=8,command=self.HelpListItemRemove)
#widget packing
#body
frameRun.pack(side=TOP,padx=5,pady=5,fill=X)
frameSave.pack(side=TOP,padx=5,pady=5,fill=X)
frameWinSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameParaSize.pack(side=TOP,padx=5,pady=5,fill=X)
frameHelp.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
#frameRun
labelRunChoiceTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioStartupShell.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioStartupEdit.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameSave
labelRunSaveTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
radioSaveAuto.pack(side=RIGHT,anchor=W,padx=5,pady=5)
radioSaveAsk.pack(side=RIGHT,anchor=W,padx=5,pady=5)
#frameWinSize
labelWinSizeTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryWinHeight.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinHeightTitle.pack(side=RIGHT,anchor=E,pady=5)
entryWinWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
labelWinWidthTitle.pack(side=RIGHT,anchor=E,pady=5)
#paragraphFormatWidth
labelParaWidthTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
entryParaWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
#frameHelp
frameHelpListButtons.pack(side=RIGHT,padx=5,pady=5,fill=Y)
frameHelpList.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
scrollHelpList.pack(side=RIGHT,anchor=W,fill=Y)
self.listHelp.pack(side=LEFT,anchor=E,expand=TRUE,fill=BOTH)
self.buttonHelpListEdit.pack(side=TOP,anchor=W,pady=5)
self.buttonHelpListAdd.pack(side=TOP,anchor=W)
self.buttonHelpListRemove.pack(side=TOP,anchor=W,pady=5)
return frame
def AttachVarCallbacks(self):
self.fontSize.trace_variable('w',self.VarChanged_fontSize)
self.fontName.trace_variable('w',self.VarChanged_fontName)
self.fontBold.trace_variable('w',self.VarChanged_fontBold)
self.spaceNum.trace_variable('w',self.VarChanged_spaceNum)
self.colour.trace_variable('w',self.VarChanged_colour)
self.builtinTheme.trace_variable('w',self.VarChanged_builtinTheme)
self.customTheme.trace_variable('w',self.VarChanged_customTheme)
self.themeIsBuiltin.trace_variable('w',self.VarChanged_themeIsBuiltin)
self.highlightTarget.trace_variable('w',self.VarChanged_highlightTarget)
self.keyBinding.trace_variable('w',self.VarChanged_keyBinding)
self.builtinKeys.trace_variable('w',self.VarChanged_builtinKeys)
self.customKeys.trace_variable('w',self.VarChanged_customKeys)
self.keysAreBuiltin.trace_variable('w',self.VarChanged_keysAreBuiltin)
self.winWidth.trace_variable('w',self.VarChanged_winWidth)
self.winHeight.trace_variable('w',self.VarChanged_winHeight)
self.paraWidth.trace_variable('w',self.VarChanged_paraWidth)
self.startupEdit.trace_variable('w',self.VarChanged_startupEdit)
self.autoSave.trace_variable('w',self.VarChanged_autoSave)
self.encoding.trace_variable('w',self.VarChanged_encoding)
def VarChanged_fontSize(self,*params):
value=self.fontSize.get()
self.AddChangedItem('main','EditorWindow','font-size',value)
def VarChanged_fontName(self,*params):
value=self.fontName.get()
self.AddChangedItem('main','EditorWindow','font',value)
def VarChanged_fontBold(self,*params):
value=self.fontBold.get()
self.AddChangedItem('main','EditorWindow','font-bold',value)
def VarChanged_spaceNum(self,*params):
value=self.spaceNum.get()
self.AddChangedItem('main','Indent','num-spaces',value)
def VarChanged_colour(self,*params):
self.OnNewColourSet()
def VarChanged_builtinTheme(self,*params):
value=self.builtinTheme.get()
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_customTheme(self,*params):
value=self.customTheme.get()
if value != '- no custom themes -':
self.AddChangedItem('main','Theme','name',value)
self.PaintThemeSample()
def VarChanged_themeIsBuiltin(self,*params):
value=self.themeIsBuiltin.get()
self.AddChangedItem('main','Theme','default',value)
if value:
self.VarChanged_builtinTheme()
else:
self.VarChanged_customTheme()
def VarChanged_highlightTarget(self,*params):
self.SetHighlightTarget()
def VarChanged_keyBinding(self,*params):
value=self.keyBinding.get()
keySet=self.customKeys.get()
event=self.listBindings.get(ANCHOR).split()[0]
if idleConf.IsCoreBinding(event):
#this is a core keybinding
self.AddChangedItem('keys',keySet,event,value)
else: #this is an extension key binding
extName=idleConf.GetExtnNameForEvent(event)
extKeybindSection=extName+'_cfgBindings'
self.AddChangedItem('extensions',extKeybindSection,event,value)
def VarChanged_builtinKeys(self,*params):
value=self.builtinKeys.get()
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_customKeys(self,*params):
value=self.customKeys.get()
if value != '- no custom keys -':
self.AddChangedItem('main','Keys','name',value)
self.LoadKeysList(value)
def VarChanged_keysAreBuiltin(self,*params):
value=self.keysAreBuiltin.get()
self.AddChangedItem('main','Keys','default',value)
if value:
self.VarChanged_builtinKeys()
else:
self.VarChanged_customKeys()
def VarChanged_winWidth(self,*params):
value=self.winWidth.get()
self.AddChangedItem('main','EditorWindow','width',value)
def VarChanged_winHeight(self,*params):
value=self.winHeight.get()
self.AddChangedItem('main','EditorWindow','height',value)
def VarChanged_paraWidth(self,*params):
value=self.paraWidth.get()
self.AddChangedItem('main','FormatParagraph','paragraph',value)
def VarChanged_startupEdit(self,*params):
value=self.startupEdit.get()
self.AddChangedItem('main','General','editor-on-startup',value)
def VarChanged_autoSave(self,*params):
value=self.autoSave.get()
self.AddChangedItem('main','General','autosave',value)
def VarChanged_encoding(self,*params):
value=self.encoding.get()
self.AddChangedItem('main','EditorWindow','encoding',value)
def ResetChangedItems(self):
#When any config item is changed in this dialog, an entry
#should be made in the relevant section (config type) of this
#dictionary. The key should be the config file section name and the
#value a dictionary, whose key:value pairs are item=value pairs for
#that config file section.
self.changedItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
def AddChangedItem(self,type,section,item,value):
value=str(value) #make sure we use a string
if section not in self.changedItems[type]:
self.changedItems[type][section]={}
self.changedItems[type][section][item]=value
def GetDefaultItems(self):
dItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
for configType in dItems:
sections=idleConf.GetSectionList('default',configType)
for section in sections:
dItems[configType][section]={}
options=idleConf.defaultCfg[configType].GetOptionList(section)
for option in options:
dItems[configType][section][option]=(
idleConf.defaultCfg[configType].Get(section,option))
return dItems
def SetThemeType(self):
if self.themeIsBuiltin.get():
self.optMenuThemeBuiltin.config(state=NORMAL)
self.optMenuThemeCustom.config(state=DISABLED)
self.buttonDeleteCustomTheme.config(state=DISABLED)
else:
self.optMenuThemeBuiltin.config(state=DISABLED)
self.radioThemeCustom.config(state=NORMAL)
self.optMenuThemeCustom.config(state=NORMAL)
self.buttonDeleteCustomTheme.config(state=NORMAL)
def SetKeysType(self):
if self.keysAreBuiltin.get():
self.optMenuKeysBuiltin.config(state=NORMAL)
self.optMenuKeysCustom.config(state=DISABLED)
self.buttonDeleteCustomKeys.config(state=DISABLED)
else:
self.optMenuKeysBuiltin.config(state=DISABLED)
self.radioKeysCustom.config(state=NORMAL)
self.optMenuKeysCustom.config(state=NORMAL)
self.buttonDeleteCustomKeys.config(state=NORMAL)
def GetNewKeys(self):
listIndex=self.listBindings.index(ANCHOR)
binding=self.listBindings.get(listIndex)
bindName=binding.split()[0] #first part, up to first space
if self.keysAreBuiltin.get():
currentKeySetName=self.builtinKeys.get()
else:
currentKeySetName=self.customKeys.get()
currentBindings=idleConf.GetCurrentKeySet()
if currentKeySetName in self.changedItems['keys']: #unsaved changes
keySetChanges=self.changedItems['keys'][currentKeySetName]
for event in keySetChanges:
currentBindings[event]=keySetChanges[event].split()
currentKeySequences = list(currentBindings.values())
newKeys=GetKeysDialog(self,'Get New Keys',bindName,
currentKeySequences).result
if newKeys: #new keys were specified
if self.keysAreBuiltin.get(): #current key set is a built-in
message=('Your changes will be saved as a new Custom Key Set. '+
'Enter a name for your new Custom Key Set below.')
newKeySet=self.GetNewKeysName(message)
if not newKeySet: #user cancelled custom key set creation
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
return
else: #create new custom key set based on previously active key set
self.CreateNewKeySet(newKeySet)
self.listBindings.delete(listIndex)
self.listBindings.insert(listIndex,bindName+' - '+newKeys)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
self.keyBinding.set(newKeys)
else:
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def GetNewKeysName(self,message):
usedNames=(idleConf.GetSectionList('user','keys')+
idleConf.GetSectionList('default','keys'))
newKeySet=GetCfgSectionNameDialog(self,'New Custom Key Set',
message,usedNames).result
return newKeySet
def SaveAsNewKeySet(self):
newKeysName=self.GetNewKeysName('New Key Set Name:')
if newKeysName:
self.CreateNewKeySet(newKeysName)
def KeyBindingSelected(self,event):
self.buttonNewKeys.config(state=NORMAL)
def CreateNewKeySet(self,newKeySetName):
#creates new custom key set based on the previously active key set,
#and makes the new key set active
if self.keysAreBuiltin.get():
prevKeySetName=self.builtinKeys.get()
else:
prevKeySetName=self.customKeys.get()
prevKeys=idleConf.GetCoreKeys(prevKeySetName)
newKeys={}
for event in prevKeys: #add key set to changed items
eventName=event[2:-2] #trim off the angle brackets
binding=' '.join(prevKeys[event])
newKeys[eventName]=binding
#handle any unsaved changes to prev key set
if prevKeySetName in self.changedItems['keys']:
keySetChanges=self.changedItems['keys'][prevKeySetName]
for event in keySetChanges:
newKeys[event]=keySetChanges[event]
#save the new theme
self.SaveNewKeySet(newKeySetName,newKeys)
#change gui over to the new key set
customKeyList=idleConf.GetSectionList('user','keys')
customKeyList.sort()
self.optMenuKeysCustom.SetMenu(customKeyList,newKeySetName)
self.keysAreBuiltin.set(0)
self.SetKeysType()
def LoadKeysList(self,keySetName):
reselect=0
newKeySet=0
if self.listBindings.curselection():
reselect=1
listIndex=self.listBindings.index(ANCHOR)
keySet=idleConf.GetKeySet(keySetName)
bindNames = list(keySet.keys())
bindNames.sort()
self.listBindings.delete(0,END)
for bindName in bindNames:
key=' '.join(keySet[bindName]) #make key(s) into a string
bindName=bindName[2:-2] #trim off the angle brackets
if keySetName in self.changedItems['keys']:
#handle any unsaved changes to this key set
if bindName in self.changedItems['keys'][keySetName]:
key=self.changedItems['keys'][keySetName][bindName]
self.listBindings.insert(END, bindName+' - '+key)
if reselect:
self.listBindings.see(listIndex)
self.listBindings.select_set(listIndex)
self.listBindings.select_anchor(listIndex)
def DeleteCustomKeys(self):
keySetName=self.customKeys.get()
if not tkMessageBox.askyesno('Delete Key Set','Are you sure you wish '+
'to delete the key set %r ?' % (keySetName),
parent=self):
return
#remove key set from config
idleConf.userCfg['keys'].remove_section(keySetName)
if keySetName in self.changedItems['keys']:
del(self.changedItems['keys'][keySetName])
#write changes
idleConf.userCfg['keys'].Save()
#reload user key set list
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.optMenuKeysCustom.SetMenu(itemList,'- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
#revert to default key set
self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys','default'))
self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetKeysType()
def DeleteCustomTheme(self):
themeName=self.customTheme.get()
if not tkMessageBox.askyesno('Delete Theme','Are you sure you wish '+
'to delete the theme %r ?' % (themeName,),
parent=self):
return
#remove theme from config
idleConf.userCfg['highlight'].remove_section(themeName)
if themeName in self.changedItems['highlight']:
del(self.changedItems['highlight'][themeName])
#write changes
idleConf.userCfg['highlight'].Save()
#reload user theme list
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.optMenuThemeCustom.SetMenu(itemList,'- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
#revert to default theme
self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme','default'))
self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme','name'))
#user can't back out of these changes, they must be applied now
self.Apply()
self.SetThemeType()
def GetColour(self):
target=self.highlightTarget.get()
prevColour=self.frameColourSet.cget('bg')
rgbTuplet, colourString = tkColorChooser.askcolor(parent=self,
title='Pick new colour for : '+target,initialcolor=prevColour)
if colourString and (colourString!=prevColour):
#user didn't cancel, and they chose a new colour
if self.themeIsBuiltin.get(): #current theme is a built-in
message=('Your changes will be saved as a new Custom Theme. '+
'Enter a name for your new Custom Theme below.')
newTheme=self.GetNewThemeName(message)
if not newTheme: #user cancelled custom theme creation
return
else: #create new custom theme based on previously active theme
self.CreateNewTheme(newTheme)
self.colour.set(colourString)
else: #current theme is user defined
self.colour.set(colourString)
def OnNewColourSet(self):
newColour=self.colour.get()
self.frameColourSet.config(bg=newColour)#set sample
if self.fgHilite.get(): plane='foreground'
else: plane='background'
sampleElement=self.themeElements[self.highlightTarget.get()][0]
self.textHighlightSample.tag_config(sampleElement, **{plane:newColour})
theme=self.customTheme.get()
themeElement=sampleElement+'-'+plane
self.AddChangedItem('highlight',theme,themeElement,newColour)
def GetNewThemeName(self,message):
usedNames=(idleConf.GetSectionList('user','highlight')+
idleConf.GetSectionList('default','highlight'))
newTheme=GetCfgSectionNameDialog(self,'New Custom Theme',
message,usedNames).result
return newTheme
def SaveAsNewTheme(self):
newThemeName=self.GetNewThemeName('New Theme Name:')
if newThemeName:
self.CreateNewTheme(newThemeName)
def CreateNewTheme(self,newThemeName):
#creates new custom theme based on the previously active theme,
#and makes the new theme active
if self.themeIsBuiltin.get():
themeType='default'
themeName=self.builtinTheme.get()
else:
themeType='user'
themeName=self.customTheme.get()
newTheme=idleConf.GetThemeDict(themeType,themeName)
#apply any of the old theme's unsaved changes to the new theme
if themeName in self.changedItems['highlight']:
themeChanges=self.changedItems['highlight'][themeName]
for element in themeChanges:
newTheme[element]=themeChanges[element]
#save the new theme
self.SaveNewTheme(newThemeName,newTheme)
#change gui over to the new theme
customThemeList=idleConf.GetSectionList('user','highlight')
customThemeList.sort()
self.optMenuThemeCustom.SetMenu(customThemeList,newThemeName)
self.themeIsBuiltin.set(0)
self.SetThemeType()
def OnListFontButtonRelease(self,event):
font = self.listFontName.get(ANCHOR)
self.fontName.set(font.lower())
self.SetFontSample()
def SetFontSample(self,event=None):
fontName=self.fontName.get()
if self.fontBold.get():
fontWeight=tkFont.BOLD
else:
fontWeight=tkFont.NORMAL
newFont = (fontName, self.fontSize.get(), fontWeight)
self.labelFontSample.config(font=newFont)
self.textHighlightSample.configure(font=newFont)
def SetHighlightTarget(self):
if self.highlightTarget.get()=='Cursor': #bg not possible
self.radioFg.config(state=DISABLED)
self.radioBg.config(state=DISABLED)
self.fgHilite.set(1)
else: #both fg and bg can be set
self.radioFg.config(state=NORMAL)
self.radioBg.config(state=NORMAL)
self.fgHilite.set(1)
self.SetColourSample()
def SetColourSampleBinding(self,*args):
self.SetColourSample()
def SetColourSample(self):
#set the colour smaple area
tag=self.themeElements[self.highlightTarget.get()][0]
if self.fgHilite.get(): plane='foreground'
else: plane='background'
colour=self.textHighlightSample.tag_cget(tag,plane)
self.frameColourSet.config(bg=colour)
def PaintThemeSample(self):
if self.themeIsBuiltin.get(): #a default theme
theme=self.builtinTheme.get()
else: #a user theme
theme=self.customTheme.get()
for elementTitle in self.themeElements:
element=self.themeElements[elementTitle][0]
colours=idleConf.GetHighlight(theme,element)
if element=='cursor': #cursor sample needs special painting
colours['background']=idleConf.GetHighlight(theme,
'normal', fgBg='bg')
#handle any unsaved changes to this theme
if theme in self.changedItems['highlight']:
themeDict=self.changedItems['highlight'][theme]
if element+'-foreground' in themeDict:
colours['foreground']=themeDict[element+'-foreground']
if element+'-background' in themeDict:
colours['background']=themeDict[element+'-background']
self.textHighlightSample.tag_config(element, **colours)
self.SetColourSample()
def HelpSourceSelected(self,event):
self.SetHelpListButtonStates()
def SetHelpListButtonStates(self):
if self.listHelp.size()<1: #no entries in list
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
else: #there are some entries
if self.listHelp.curselection(): #there currently is a selection
self.buttonHelpListEdit.config(state=NORMAL)
self.buttonHelpListRemove.config(state=NORMAL)
else: #there currently is not a selection
self.buttonHelpListEdit.config(state=DISABLED)
self.buttonHelpListRemove.config(state=DISABLED)
def HelpListItemAdd(self):
helpSource=GetHelpSourceDialog(self,'New Help Source').result
if helpSource:
self.userHelpList.append( (helpSource[0],helpSource[1]) )
self.listHelp.insert(END,helpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemEdit(self):
itemIndex=self.listHelp.index(ANCHOR)
helpSource=self.userHelpList[itemIndex]
newHelpSource=GetHelpSourceDialog(self,'Edit Help Source',
menuItem=helpSource[0],filePath=helpSource[1]).result
if (not newHelpSource) or (newHelpSource==helpSource):
return #no changes
self.userHelpList[itemIndex]=newHelpSource
self.listHelp.delete(itemIndex)
self.listHelp.insert(itemIndex,newHelpSource[0])
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def HelpListItemRemove(self):
itemIndex=self.listHelp.index(ANCHOR)
del(self.userHelpList[itemIndex])
self.listHelp.delete(itemIndex)
self.UpdateUserHelpChangedItems()
self.SetHelpListButtonStates()
def UpdateUserHelpChangedItems(self):
"Clear and rebuild the HelpFiles section in self.changedItems"
self.changedItems['main']['HelpFiles'] = {}
for num in range(1,len(self.userHelpList)+1):
self.AddChangedItem('main','HelpFiles',str(num),
';'.join(self.userHelpList[num-1][:2]))
def LoadFontCfg(self):
##base editor font selection list
fonts=list(tkFont.families(self))
fonts.sort()
for font in fonts:
self.listFontName.insert(END,font)
configuredFont=idleConf.GetOption('main','EditorWindow','font',
default='courier')
lc_configuredFont = configuredFont.lower()
self.fontName.set(lc_configuredFont)
lc_fonts = [s.lower() for s in fonts]
if lc_configuredFont in lc_fonts:
currentFontIndex = lc_fonts.index(lc_configuredFont)
self.listFontName.see(currentFontIndex)
self.listFontName.select_set(currentFontIndex)
self.listFontName.select_anchor(currentFontIndex)
##font size dropdown
fontSize=idleConf.GetOption('main', 'EditorWindow', 'font-size',
type='int', default='10')
self.optMenuFontSize.SetMenu(('7','8','9','10','11','12','13','14',
'16','18','20','22'), fontSize )
##fontWeight
self.fontBold.set(idleConf.GetOption('main','EditorWindow',
'font-bold',default=0,type='bool'))
##font sample
self.SetFontSample()
def LoadTabCfg(self):
##indent sizes
spaceNum=idleConf.GetOption('main','Indent','num-spaces',
default=4,type='int')
self.spaceNum.set(spaceNum)
def LoadThemeCfg(self):
##current theme type radiobutton
self.themeIsBuiltin.set(idleConf.GetOption('main','Theme','default',
type='bool',default=1))
##currently set theme
currentOption=idleConf.CurrentTheme()
##load available theme option menus
if self.themeIsBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
if not itemList:
self.radioThemeCustom.config(state=DISABLED)
self.customTheme.set('- no custom themes -')
else:
self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
else: #user theme selected
itemList=idleConf.GetSectionList('user','highlight')
itemList.sort()
self.optMenuThemeCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','highlight')
itemList.sort()
self.optMenuThemeBuiltin.SetMenu(itemList,itemList[0])
self.SetThemeType()
##load theme element option menu
themeNames = list(self.themeElements.keys())
themeNames.sort(key=lambda x: self.themeElements[x][1])
self.optMenuHighlightTarget.SetMenu(themeNames,themeNames[0])
self.PaintThemeSample()
self.SetHighlightTarget()
def LoadKeyCfg(self):
##current keys type radiobutton
self.keysAreBuiltin.set(idleConf.GetOption('main','Keys','default',
type='bool',default=1))
##currently set keys
currentOption=idleConf.CurrentKeys()
##load available keyset option menus
if self.keysAreBuiltin.get(): #default theme selected
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
if not itemList:
self.radioKeysCustom.config(state=DISABLED)
self.customKeys.set('- no custom keys -')
else:
self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
else: #user key set selected
itemList=idleConf.GetSectionList('user','keys')
itemList.sort()
self.optMenuKeysCustom.SetMenu(itemList,currentOption)
itemList=idleConf.GetSectionList('default','keys')
itemList.sort()
self.optMenuKeysBuiltin.SetMenu(itemList,itemList[0])
self.SetKeysType()
##load keyset element list
keySetName=idleConf.CurrentKeys()
self.LoadKeysList(keySetName)
def LoadGeneralCfg(self):
#startup state
self.startupEdit.set(idleConf.GetOption('main','General',
'editor-on-startup',default=1,type='bool'))
#autosave state
self.autoSave.set(idleConf.GetOption('main', 'General', 'autosave',
default=0, type='bool'))
#initial window size
self.winWidth.set(idleConf.GetOption('main','EditorWindow','width',
type='int'))
self.winHeight.set(idleConf.GetOption('main','EditorWindow','height',
type='int'))
#initial paragraph reformat size
self.paraWidth.set(idleConf.GetOption('main','FormatParagraph','paragraph',
type='int'))
# default source encoding
self.encoding.set(idleConf.GetOption('main', 'EditorWindow',
'encoding', default='none'))
# additional help sources
self.userHelpList = idleConf.GetAllExtraHelpSourcesList()
for helpItem in self.userHelpList:
self.listHelp.insert(END,helpItem[0])
self.SetHelpListButtonStates()
def LoadConfigs(self):
"""
load configuration from default and user config files and populate
the widgets on the config dialog pages.
"""
### fonts / tabs page
self.LoadFontCfg()
self.LoadTabCfg()
### highlighting page
self.LoadThemeCfg()
### keys page
self.LoadKeyCfg()
### general page
self.LoadGeneralCfg()
def SaveNewKeySet(self,keySetName,keySet):
"""
save a newly created core key set.
keySetName - string, the name of the new key set
keySet - dictionary containing the new key set
"""
if not idleConf.userCfg['keys'].has_section(keySetName):
idleConf.userCfg['keys'].add_section(keySetName)
for event in keySet:
value=keySet[event]
idleConf.userCfg['keys'].SetOption(keySetName,event,value)
def SaveNewTheme(self,themeName,theme):
"""
save a newly created theme.
themeName - string, the name of the new theme
theme - dictionary containing the new theme
"""
if not idleConf.userCfg['highlight'].has_section(themeName):
idleConf.userCfg['highlight'].add_section(themeName)
for element in theme:
value=theme[element]
idleConf.userCfg['highlight'].SetOption(themeName,element,value)
def SetUserValue(self,configType,section,item,value):
if idleConf.defaultCfg[configType].has_option(section,item):
if idleConf.defaultCfg[configType].Get(section,item)==value:
#the setting equals a default setting, remove it from user cfg
return idleConf.userCfg[configType].RemoveOption(section,item)
#if we got here set the option
return idleConf.userCfg[configType].SetOption(section,item,value)
def SaveAllChangedConfigs(self):
"Save configuration changes to the user config file."
idleConf.userCfg['main'].Save()
for configType in self.changedItems:
cfgTypeHasChanges = False
for section in self.changedItems[configType]:
if section == 'HelpFiles':
#this section gets completely replaced
idleConf.userCfg['main'].remove_section('HelpFiles')
cfgTypeHasChanges = True
for item in self.changedItems[configType][section]:
value = self.changedItems[configType][section][item]
if self.SetUserValue(configType,section,item,value):
cfgTypeHasChanges = True
if cfgTypeHasChanges:
idleConf.userCfg[configType].Save()
for configType in ['keys', 'highlight']:
# save these even if unchanged!
idleConf.userCfg[configType].Save()
self.ResetChangedItems() #clear the changed items dict
def DeactivateCurrentConfig(self):
#Before a config is saved, some cleanup of current
#config must be done - remove the previous keybindings
winInstances = self.parent.instance_dict.keys()
for instance in winInstances:
instance.RemoveKeybindings()
def ActivateConfigChanges(self):
"Dynamically apply configuration changes"
winInstances = self.parent.instance_dict.keys()
for instance in winInstances:
instance.ResetColorizer()
instance.ResetFont()
instance.set_notabs_indentwidth()
instance.ApplyKeybindings()
instance.reset_help_menu_entries()
def Cancel(self):
self.destroy()
def Ok(self):
self.Apply()
self.destroy()
def Apply(self):
self.DeactivateCurrentConfig()
self.SaveAllChangedConfigs()
self.ActivateConfigChanges()
def Help(self):
pass
if __name__ == '__main__':
#test the dialog
root=Tk()
Button(root,text='Dialog',
command=lambda:ConfigDialog(root,'Settings')).pack()
root.instance_dict={}
root.mainloop()
| PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/idlelib/configDialog.py | Python | gpl-2.0 | 52,843 |
# Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# To test "virsh hostname" command
from libvirttestapi.utils import process
required_params = ()
optional_params = {}
VIRSH_HOSTNAME = "virsh hostname"
def hostname(params):
"""check virsh hostname command
"""
logger = params['logger']
ret = process.run(VIRSH_HOSTNAME, shell=True, ignore_status=True)
if ret.exit_status:
logger.error("executing " + "\"" + VIRSH_HOSTNAME + "\"" + " failed")
return 1
virsh_ret = ret.stdout
logger.info("the output of " + "\"" + VIRSH_HOSTNAME + "\"" + " is %s" % virsh_ret)
ret = process.run("hostname", shell=True, ignore_status=True)
if ret.exit_status:
logger.error("executing " + "\"" + "hostname" + "\"" + " failed")
return 1
host_ret = ret.stdout
if virsh_ret[:-1] != host_ret:
logger.error("the output of " + VIRSH_HOSTNAME + " is not right")
return 1
else:
logger.info(VIRSH_HOSTNAME + " testing succeeded")
return 0
| libvirt/libvirt-test-API | libvirttestapi/repos/domain/hostname.py | Python | gpl-2.0 | 1,070 |
# import_export_batches/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
import codecs
import csv
from datetime import date, timedelta
from django.db import models
from django.db.models import Q
from django.utils.http import urlquote
from django.utils.timezone import localtime, now
from election.models import ElectionManager
from electoral_district.controllers import electoral_district_import_from_xml_data
from exception.models import handle_exception
import json
import magic
from organization.models import ORGANIZATION_TYPE_CHOICES, UNKNOWN, alphanumeric
from party.controllers import retrieve_all_party_names_and_ids_api, party_import_from_xml_data
from politician.models import GENDER_CHOICES, UNKNOWN
import urllib
from urllib.request import Request, urlopen
from voter_guide.models import ORGANIZATION_WORD
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists, \
LANGUAGE_CODE_ENGLISH, LANGUAGE_CODE_SPANISH
import xml.etree.ElementTree as ElementTree
POSITION = 'POSITION'
ANY_STANCE = 'ANY_STANCE' # This is a way to indicate when we want to return any stance (support, oppose, no_stance)
SUPPORT = 'SUPPORT'
STILL_DECIDING = 'STILL_DECIDING'
NO_STANCE = 'NO_STANCE' # DALE 2016-8-29 We will want to deprecate NO_STANCE and replace with INFORMATION_ONLY
INFORMATION_ONLY = 'INFO_ONLY'
OPPOSE = 'OPPOSE'
PERCENT_RATING = 'PERCENT_RATING'
POSITION_CHOICES = (
# ('SUPPORT_STRONG', 'Strong Supports'), # I do not believe we will be offering 'SUPPORT_STRONG' as an option
(SUPPORT, 'Supports'),
(STILL_DECIDING, 'Still deciding'), # Still undecided
(NO_STANCE, 'No stance'), # We don't know the stance
(INFORMATION_ONLY, 'Information only'), # This entry is meant as food-for-thought and is not advocating
(OPPOSE, 'Opposes'),
(PERCENT_RATING, 'Percentage point rating'),
# ('OPPOSE_STRONG', 'Strongly Opposes'), # I do not believe we will be offering 'OPPOSE_STRONG' as an option
)
NO_STANCE = 'NO_STANCE'
CANDIDATE = 'CANDIDATE'
CONTEST_OFFICE = 'CONTEST_OFFICE'
ELECTED_OFFICE = 'ELECTED_OFFICE'
IMPORT_BALLOT_ITEM = 'IMPORT_BALLOT_ITEM'
IMPORT_POLLING_LOCATION = 'IMPORT_POLLING_LOCATION'
IMPORT_VOTER = 'IMPORT_VOTER'
MEASURE = 'MEASURE'
POLITICIAN = 'POLITICIAN'
KIND_OF_BATCH_CHOICES = (
(MEASURE, 'Measure'),
(ELECTED_OFFICE, 'ElectedOffice'),
(CONTEST_OFFICE, 'ContestOffice'),
(CANDIDATE, 'Candidate'),
(IMPORT_BALLOT_ITEM, 'Ballot Returned'),
(IMPORT_POLLING_LOCATION, 'Map Point'),
(ORGANIZATION_WORD, 'Organization'),
(POSITION, 'Position'),
(POLITICIAN, 'Politician'),
)
IMPORT_TO_BE_DETERMINED = 'IMPORT_TO_BE_DETERMINED'
DO_NOT_PROCESS = 'DO_NOT_PROCESS'
CLEAN_DATA_MANUALLY = 'CLEAN_DATA_MANUALLY'
IMPORT_CREATE = 'IMPORT_CREATE' # kind_of_action
IMPORT_DELETE = 'IMPORT_DELETE' # kind_of_action
IMPORT_ALREADY_DELETED = 'IMPORT_ALREADY_DELETED' # kind_of_action
IMPORT_ADD_TO_EXISTING = 'IMPORT_ADD_TO_EXISTING' # kind_of_action
IMPORT_DATA_ALREADY_MATCHING = 'IMPORT_DATA_ALREADY_MATCHING'
IMPORT_QUERY_ERROR = 'IMPORT_QUERY_ERROR'
KIND_OF_ACTION_CHOICES = (
(IMPORT_TO_BE_DETERMINED, 'To Be Determined'),
(DO_NOT_PROCESS, 'Do not process'),
(IMPORT_CREATE, 'Create'),
(IMPORT_ADD_TO_EXISTING, 'Add to Existing'),
)
BATCH_SET_SOURCE_CTCL = 'CTCL'
BATCH_SET_SOURCE_IMPORT_EXPORT_ENDORSEMENTS = 'IMPORT_EXPORT_ENDORSEMENTS'
BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS = 'IMPORT_BALLOTPEDIA_BALLOT_ITEMS'
BATCH_SET_SOURCE_IMPORT_CTCL_BALLOT_ITEMS = 'IMPORT_CTCL_BALLOT_ITEMS'
BATCH_SET_SOURCE_IMPORT_VOTE_USA_BALLOT_ITEMS = 'IMPORT_VOTE_USA_BALLOT_ITEMS'
# Match incoming headers (on left), and place the values in the variable name on the
# right in `create_batch_row_action_candidate` (This dict doesn't actually remap the values)
BATCH_IMPORT_KEYS_ACCEPTED_FOR_CANDIDATES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id',
'ballotpedia_candidate_name': 'ballotpedia_candidate_name',
'ballotpedia_candidate_summary': 'ballotpedia_candidate_summary',
'ballotpedia_candidate_url': 'ballotpedia_candidate_url',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_image_id': 'ballotpedia_image_id',
'ballotpedia_office_id': 'ballotpedia_office_id * (elected_office)', # For matching only
'ballotpedia_person_id': 'ballotpedia_person_id',
'ballotpedia_race_id': 'ballotpedia_race_id * (contest_office)', # For matching only
'ballotpedia url': 'vote_usa_ballotpedia_candidate_url',
'birth_day_text': 'birth_day_text',
'candidate': 'vote_usa_candidate_name',
'candidate_batch_id': 'candidate_batch_id',
'candidate_contact_form_url': 'candidate_contact_form_url',
'candidate_ctcl_uuid': 'candidate_ctcl_uuid',
'candidate_ctcl_person_id': 'candidate_ctcl_person_id',
'candidate_email': 'candidate_email',
'candidate_gender': 'candidate_gender',
'candidate_is_top_ticket': 'candidate_is_top_ticket',
'candidate_is_incumbent': 'candidate_is_incumbent',
'candidate_name': 'candidate_name',
'candidate_participation_status': 'candidate_participation_status',
'candidate_party_name': 'candidate_party_name',
'candidate_profile_image_url': 'candidate_profile_image_url',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_url': 'candidate_url (website)',
'contest_office_name': 'contest_office_name *', # For matching only
'contest_office_we_vote_id': 'contest_office_we_vote_id *', # For matching only
'crowdpac_candidate_id': 'crowdpac_candidate_id',
'election_day': 'election_day',
'email': 'vote_usa_candidate_email',
'facebook_url': 'facebook_url',
'facebook url': 'vote_usa_facebook_url',
'google_civic_election_id': 'google_civic_election_id',
'party': 'vote_usa_party_name',
'photo_url': 'photo_url',
'photo_url_from_ctcl': 'photo_url_from_ctcl',
'photo_url_from_vote_usa': 'photo_url_from_vote_usa',
'photo300 url': 'vote_usa_profile_image_url_https',
'state_code': 'state_code',
'state code': 'vote_usa_state_code',
'twitter url': 'vote_usa_candidate_twitter_url',
'voteusa office id': 'vote_usa_office_id',
'voteusa politician id': 'vote_usa_politician_id',
'website url': 'vote_usa_candidate_url',
}
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_CANDIDATES_TO_BALLOTPEDIA_CANDIDATES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id',
'ballotpedia_candidate_name': 'ballotpedia_candidate_name',
'ballotpedia_candidate_summary': 'ballotpedia_candidate_summary',
'ballotpedia_candidate_url': 'ballotpedia_candidate_url',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_image_id': 'ballotpedia_image_id',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_person_id': 'ballotpedia_person_id',
'ballotpedia_race_id': 'ballotpedia_race_id',
'birth_day_text': 'birth_day_text',
'candidate_email': 'candidate_email',
'candidate_gender': 'candidate_gender',
'candidate_is_incumbent': 'is_incumbent',
'candidate_participation_status': 'candidate_participation_status',
'candidate_party_name': 'candidate_party_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_url': 'candidate_url',
'candidate_contact_form_url': 'candidate_contact_form_url',
'crowdpac_candidate_id': 'crowdpac_candidate_id',
'facebook_url': 'facebook_url',
'state_code': 'state_code',
}
# TODO: Not updated from Ballotpedia yet
BATCH_HEADER_MAP_CANDIDATES_TO_CTCL_CANDIDATES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id',
'ballotpedia_candidate_name': 'ballotpedia_candidate_name',
'ballotpedia_candidate_summary': 'ballotpedia_candidate_summary',
'ballotpedia_candidate_url': 'ballotpedia_candidate_url',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_image_id': 'ballotpedia_image_id',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_person_id': 'ballotpedia_person_id',
'ballotpedia_race_id': 'ballotpedia_race_id',
'birth_day_text': 'birth_day_text',
'candidate_email': 'candidate_email',
'candidate_gender': 'candidate_gender',
'candidate_is_incumbent': 'is_incumbent',
'candidate_participation_status': 'candidate_participation_status',
'candidate_party_name': 'candidate_party_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_url': 'candidate_url',
'candidate_contact_form_url': 'candidate_contact_form_url',
'crowdpac_candidate_id': 'crowdpac_candidate_id',
'facebook_url': 'facebook_url',
'state_code': 'state_code',
}
# TODO: Not updated from Ballotpedia yet
BATCH_HEADER_MAP_CANDIDATES_TO_VOTE_USA_CANDIDATES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id',
'ballotpedia_candidate_name': 'ballotpedia_candidate_name',
'ballotpedia_candidate_summary': 'ballotpedia_candidate_summary',
'ballotpedia_candidate_url': 'ballotpedia_candidate_url',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_image_id': 'ballotpedia_image_id',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_person_id': 'ballotpedia_person_id',
'ballotpedia_race_id': 'ballotpedia_race_id',
'birth_day_text': 'birth_day_text',
'candidate_email': 'candidate_email',
'candidate_gender': 'candidate_gender',
'candidate_is_incumbent': 'is_incumbent',
'candidate_participation_status': 'candidate_participation_status',
'candidate_party_name': 'candidate_party_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_url': 'candidate_url',
'candidate_contact_form_url': 'candidate_contact_form_url',
'crowdpac_candidate_id': 'crowdpac_candidate_id',
'facebook_url': 'facebook_url',
'state_code': 'state_code',
}
# Match incoming headers (on left), and place the values in the variable name on the
# right in `create_batch_row_action_contest_office` (This dict doesn't actually remap the values)
BATCH_IMPORT_KEYS_ACCEPTED_FOR_CONTEST_OFFICES = {
'ballotpedia_candidate_id': 'ballotpedia_candidate_id *', # For matching only
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_is_marquee': 'ballotpedia_is_marquee',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_office_name': 'ballotpedia_office_name',
'ballotpedia_office_url': 'ballotpedia_office_url',
'ballotpedia_person_id': 'ballotpedia_person_id *', # For matching only
'ballotpedia_race_id': 'ballotpedia_race_id',
'ballotpedia_race_office_level': 'ballotpedia_race_office_level',
'candidate_name': 'candidate_name *', # For matching only
'candidate_selection_id1': 'candidate_selection_id1 *', # For matching only
'candidate_selection_id2': 'candidate_selection_id2 *', # For matching only
'candidate_selection_id3': 'candidate_selection_id3 *', # For matching only
'candidate_selection_id4': 'candidate_selection_id4 *', # For matching only
'candidate_selection_id5': 'candidate_selection_id5 *', # For matching only
'candidate_selection_id6': 'candidate_selection_id6 *', # For matching only
'candidate_selection_id7': 'candidate_selection_id7 *', # For matching only
'candidate_selection_id8': 'candidate_selection_id8 *', # For matching only
'candidate_selection_id9': 'candidate_selection_id9 *', # For matching only
'candidate_selection_id10': 'candidate_selection_id10 *', # For matching only
'contest_office_name': 'contest_office_name',
'race_office_level': 'race_office_level',
'contest_office_batch_id': 'contest_office_batch_id',
'contest_office_ctcl_uuid': 'contest_office_ctcl_uuid',
'contest_office_votes_allowed': 'contest_office_votes_allowed',
'contest_office_number_elected': 'contest_office_number_elected',
'contest_office_district_name': 'contest_office_district_name',
'district_id': 'district_id',
'elected_office_id': 'elected_office_id',
'election_day': 'election_day',
'electoral_district_id': 'electoral_district_id',
'google_civic_election_id': 'google_civic_election_id',
'is_ballotpedia_general_election': 'is_ballotpedia_general_election',
'is_ballotpedia_general_runoff_election': 'is_ballotpedia_general_runoff_election',
'is_ballotpedia_primary_election': 'is_ballotpedia_primary_election',
'is_ballotpedia_primary_runoff_election': 'is_ballotpedia_primary_runoff_election',
'state_code': 'state_code',
'voteusa office id': 'vote_usa_office_id',
'office': 'vote_usa_office_name',
'district': 'vote_usa_district_number', # The district number should be in 'district_id'
'state code': 'vote_usa_state_code',
}
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_CONTEST_OFFICES_TO_BALLOTPEDIA_RACES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_is_marquee': 'ballotpedia_is_marquee',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_office_name': 'office_name',
'ballotpedia_race_id': 'ballotpedia_race_id',
'ballotpedia_race_office_level': 'office_level',
'ballotpedia_office_url': 'url',
'contest_office_number_elected': 'number_of_seats',
'contest_office_district_name': 'office_district_name',
'election_day': 'election_date',
'is_ballotpedia_general_election': 'is_ballotpedia_general_election',
'is_ballotpedia_general_runoff_election': 'is_ballotpedia_general_runoff_election',
'is_ballotpedia_primary_election': 'is_ballotpedia_primary_election',
'is_ballotpedia_primary_runoff_election': 'is_ballotpedia_primary_runoff_election',
'state_code': 'office_district_state',
}
# TODO: Not updated from Ballotpedia yet
BATCH_HEADER_MAP_CONTEST_OFFICES_TO_CTCL_OFFICES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_is_marquee': 'ballotpedia_is_marquee',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_office_name': 'office_name',
'ballotpedia_race_id': 'ballotpedia_race_id',
'ballotpedia_race_office_level': 'office_level',
'ballotpedia_office_url': 'url',
'contest_office_number_elected': 'number_of_seats',
'contest_office_district_name': 'office_district_name',
'election_day': 'election_date',
'is_ballotpedia_general_election': 'is_ballotpedia_general_election',
'is_ballotpedia_general_runoff_election': 'is_ballotpedia_general_runoff_election',
'is_ballotpedia_primary_election': 'is_ballotpedia_primary_election',
'is_ballotpedia_primary_runoff_election': 'is_ballotpedia_primary_runoff_election',
'state_code': 'office_district_state',
}
# TODO: Not updated from Ballotpedia yet
BATCH_HEADER_MAP_CONTEST_OFFICES_TO_VOTE_USA_OFFICES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_is_marquee': 'ballotpedia_is_marquee',
'ballotpedia_office_id': 'ballotpedia_office_id',
'ballotpedia_office_name': 'office_name',
'ballotpedia_race_id': 'ballotpedia_race_id',
'ballotpedia_race_office_level': 'office_level',
'ballotpedia_office_url': 'url',
'contest_office_number_elected': 'number_of_seats',
'contest_office_district_name': 'office_district_name',
'election_day': 'election_date',
'is_ballotpedia_general_election': 'is_ballotpedia_general_election',
'is_ballotpedia_general_runoff_election': 'is_ballotpedia_general_runoff_election',
'is_ballotpedia_primary_election': 'is_ballotpedia_primary_election',
'is_ballotpedia_primary_runoff_election': 'is_ballotpedia_primary_runoff_election',
'state_code': 'office_district_state',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_ELECTED_OFFICES = {
'elected_office_name': 'elected_office_name',
'electoral_district_id': 'electoral_district_id',
'state_code': 'state_code',
'elected_office_ctcl_uuid': 'elected_office_ctcl_uuid',
'elected_office_description': 'elected_office_description',
'elected_office_is_partisan': 'elected_office_is_partisan',
'elected_office_name_es': 'elected_office_name_es',
'elected_office_description_es': 'elected_office_description_es',
'elected_office_batch_id': 'elected_office_batch_id',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_MEASURES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_measure_id': 'ballotpedia_measure_id',
'ballotpedia_measure_name': 'ballotpedia_measure_name',
'ballotpedia_measure_status': 'ballotpedia_measure_status',
'ballotpedia_measure_summary': 'ballotpedia_measure_summary',
'ballotpedia_measure_text': 'ballotpedia_measure_text',
'ballotpedia_measure_url': 'ballotpedia_measure_url',
'ballotpedia_yes_vote_description': 'ballotpedia_yes_vote_description',
'ballotpedia_no_vote_description': 'ballotpedia_no_vote_description',
'ctcl_uuid': 'ctcl_uuid',
'election_day_text': 'election_day_text',
'electoral_district_id': 'electoral_district_id',
'measure_title': 'measure_title',
'measure_name': 'measure_name',
'measure_text': 'measure_text',
'measure_subtitle': 'measure_subtitle',
'state_code': 'state_code',
}
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_MEASURES_TO_BALLOTPEDIA_MEASURES = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_election_id': 'ballotpedia_election_id',
'ballotpedia_measure_id': 'ballotpedia_measure_id',
'ballotpedia_measure_name': 'name',
'ballotpedia_measure_status': 'status',
'ballotpedia_measure_summary': 'summary',
'ballotpedia_measure_text': 'text',
'ballotpedia_measure_url': 'ballotpedia_measure_url',
'ballotpedia_yes_vote_description': 'ballotpedia_yes_vote_description',
'ballotpedia_no_vote_description': 'ballotpedia_no_vote_description',
'election_day_text': 'election_day_text',
'state_code': 'state_code',
}
BATCH_HEADER_MAP_MEASURES_TO_CTCL_MEASURES = {
'contest_measure_we_vote_id': 'contest_measure_we_vote_id',
'contest_measure_id': 'contest_measure_id',
'contest_measure_name': 'contest_measure_name',
'contest_measure_text': 'measure_text',
'contest_measure_url': 'measure_url',
'ctcl_uuid': 'ctcl_uuid',
'election_day_text': 'election_day_text',
'local_ballot_order': 'local_ballot_order',
'no_vote_description': 'no_vote_description',
'yes_vote_description': 'yes_vote_description',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'state_code': 'state_code',
'voter_id': 'voter_id',
}
BATCH_HEADER_MAP_MEASURES_TO_VOTE_USA_MEASURES = {
'contest_measure_we_vote_id': 'contest_measure_we_vote_id',
'contest_measure_id': 'contest_measure_id',
'contest_measure_name': 'contest_measure_name',
'contest_measure_text': 'measure_text',
'contest_measure_url': 'measure_url',
'election_day_text': 'election_day_text',
'local_ballot_order': 'local_ballot_order',
'no_vote_description': 'no_vote_description',
'yes_vote_description': 'yes_vote_description',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'state_code': 'state_code',
'voter_id': 'voter_id',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_ORGANIZATIONS = {
'organization_address': 'organization_address',
'organization_city': 'organization_city',
'organization_contact_form_url': 'organization_contact_form_url',
'organization_contact_name': 'organization_contact_name',
'organization_facebook': 'organization_facebook',
'organization_instagram': 'organization_instagram',
'organization_name': 'organization_name',
'organization_phone1': 'organization_phone1',
'organization_phone2': 'organization_phone2',
'organization_state': 'organization_state',
'organization_twitter_handle': 'organization_twitter_handle',
'organization_website': 'organization_website',
'organization_we_vote_id': 'organization_we_vote_id',
'organization_zip': 'organization_zip',
'organization_type': 'organization_type',
'state_served_code': 'state_served_code',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLLING_LOCATIONS = {
'city': 'city',
'county_name': 'county_name',
'full_address': 'full_address',
'latitude': 'latitude',
'longitude': 'longitude',
'line1': 'line1',
'line2': 'line2',
'location_name': 'location_name',
'polling_location_deleted': 'polling_location_deleted',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'precinct_name': 'precinct_name',
'source_code': 'source_code',
'state': 'state',
'use_for_bulk_retrieve': 'use_for_bulk_retrieve',
'zip_long': 'zip_long',
}
BATCH_HEADER_MAP_FOR_POLLING_LOCATIONS = {
'city': 'city',
'county_name': 'county_name',
'full_address': 'full_address',
'latitude': 'latitude',
'longitude': 'longitude',
'line1': 'line1',
'line2': 'line2',
'location_name': 'location_name',
'polling_location_deleted': 'polling_location_deleted',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'precinct_name': 'precinct_name',
'source_code': 'source_code',
'state': 'state',
'use_for_bulk_retrieve': 'use_for_bulk_retrieve',
'zip_long': 'zip_long',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLITICIANS = {
'politician_full_name': 'politician_full_name',
'politician_ctcl_uuid': 'politician_ctcl_uuid',
'politician_twitter_url': 'politician_twitter_url',
'politician_facebook_id': 'politician_facebook_id',
'politician_party_name': 'politician_party_name',
'politician_first_name': 'politician_first_name',
'politician_middle_name': 'politician_middle_name',
'politician_last_name': 'politician_last_name',
'politician_website_url': 'politician_website_url',
'politician_email_address': 'politician_email_address',
'politician_youtube_id': 'politician_youtube_id',
'politician_googleplus_id': 'politician_googleplus_id',
'politician_phone_number': 'politician_phone_number',
'politician_batch_id': 'politician_batch_id',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_POSITIONS = {
'position_we_vote_id': 'position_we_vote_id',
'candidate_name': 'candidate_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_we_vote_id': 'candidate_we_vote_id',
'contest_office_name': 'contest_office_name',
'race_office_level': 'race_office_level',
'contest_measure_title': 'contest_measure_title',
'election_day': 'election_day',
'grade_rating': 'grade_rating',
'google_civic_election_id': 'google_civic_election_id',
'more_info_url': 'more_info_url',
'stance': 'stance (SUPPORT or OPPOSE)',
'support': 'support (TRUE or FALSE)',
'oppose': 'oppose (TRUE or FALSE)',
'percent_rating': 'percent_rating',
'statement_text': 'statement_text',
'state_code': 'state_code',
'organization_name': 'organization_name',
'organization_we_vote_id': 'organization_we_vote_id',
'organization_twitter_handle': 'organization_twitter_handle (position owner)',
}
BATCH_HEADER_MAP_FOR_POSITIONS = {
'position_we_vote_id': 'position_we_vote_id',
'candidate_name': 'candidate_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'candidate_we_vote_id': 'candidate_we_vote_id',
'contest_office_name': 'contest_office_name',
'race_office_level': 'race_office_level',
'contest_measure_title': 'contest_measure_title',
'election_day': 'election_day',
'grade_rating': 'grade_rating',
'google_civic_election_id': 'google_civic_election_id',
'measure_title': 'measure_title',
'measure_we_vote_id': 'measure_we_vote_id',
'more_info_url': 'more_info_url',
'stance': 'stance',
'support': 'support',
'oppose': 'oppose',
'percent_rating': 'percent_rating',
'statement_text': 'statement_text',
'state_code': 'state_code',
'organization_name': 'organization_name',
'organization_we_vote_id': 'organization_we_vote_id',
'organization_twitter_handle': 'organization_twitter_handle',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_BALLOT_ITEMS = {
'contest_office_we_vote_id': 'contest_office_we_vote_id',
'contest_office_id': 'contest_office_id',
'contest_office_name': 'contest_office_name',
'candidate_name': 'candidate_name',
'candidate_twitter_handle': 'candidate_twitter_handle',
'contest_measure_we_vote_id': 'contest_measure_we_vote_id',
'contest_measure_id': 'contest_measure_id',
'contest_measure_name': 'contest_measure_name',
'contest_measure_text': 'contest_measure_text',
'contest_measure_url': 'contest_measure_url',
'election_day_text': 'election_day_text',
'local_ballot_order': 'local_ballot_order',
'no_vote_description': 'no_vote_description',
'yes_vote_description': 'yes_vote_description',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'state_code': 'state_code',
'voter_id': 'voter_id',
}
BATCH_HEADER_MAP_BALLOT_ITEMS_TO_BALLOTPEDIA_BALLOT_ITEMS = BATCH_IMPORT_KEYS_ACCEPTED_FOR_BALLOT_ITEMS
BATCH_HEADER_MAP_BALLOT_ITEMS_GOOGLE_CIVIC_EMULATION = {
'contest_office_we_vote_id': 'contest_office_we_vote_id',
'contest_office_id': 'contest_office_id',
'contest_office_name': 'contest_office_name',
'contest_measure_we_vote_id': 'contest_measure_we_vote_id',
'contest_measure_id': 'contest_measure_id',
'contest_measure_name': 'contest_measure_name',
'contest_measure_text': 'contest_measure_text',
'contest_measure_url': 'contest_measure_url',
'election_day_text': 'election_day_text',
'local_ballot_order': 'local_ballot_order',
'no_vote_description': 'no_vote_description',
'yes_vote_description': 'yes_vote_description',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'state_code': 'state_code',
'voter_id': 'voter_id',
}
BATCH_HEADER_MAP_BALLOT_ITEMS_TO_CTCL_BALLOT_ITEMS = BATCH_HEADER_MAP_BALLOT_ITEMS_GOOGLE_CIVIC_EMULATION
BATCH_HEADER_MAP_BALLOT_ITEMS_TO_VOTE_USA_BALLOT_ITEMS = BATCH_HEADER_MAP_BALLOT_ITEMS_GOOGLE_CIVIC_EMULATION
# We Vote contest office key on the left, and Ballotpedia field name on right
# This gives us the option of putting the same field from a remote source into two We Vote fields
BATCH_HEADER_MAP_BALLOT_ITEMS_TO_BALLOTPEDIA_VOTER_DISTRICTS = {
'ballotpedia_district_id': 'ballotpedia_district_id',
'ballotpedia_district_name': 'ballotpedia_district_name',
'contest_measure_id': 'contest_measure_id',
'contest_measure_we_vote_id': 'contest_measure_we_vote_id',
'contest_office_we_vote_id': 'contest_office_we_vote_id',
'contest_office_id': 'contest_office_id',
'election_day_text': 'election_day_text',
'local_ballot_order': 'local_ballot_order',
'polling_location_we_vote_id': 'polling_location_we_vote_id',
'state_code': 'state_code',
}
BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS = {
'first_name': 'first_name',
'middle_name': 'middle_name',
'last_name': 'last_name',
'email': 'email',
'newsletter_opt_in': 'newsletter_opt_in',
'we_vote_id': 'we_vote_id',
'twitter_screen_name': 'twitter_screen_name',
'date_joined': 'date_joined',
'date_last_changed': 'date_last_changed',
}
# BatchProcess constants
ACTIVITY_NOTICE_PROCESS = "ACTIVITY_NOTICE_PROCESS"
API_REFRESH_REQUEST = "API_REFRESH_REQUEST"
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID = "AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID"
AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT = "AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT"
CALCULATE_ORGANIZATION_DAILY_METRICS = "CALCULATE_ORGANIZATION_DAILY_METRICS"
CALCULATE_ORGANIZATION_ELECTION_METRICS = "CALCULATE_ORGANIZATION_ELECTION_METRICS"
CALCULATE_SITEWIDE_DAILY_METRICS = "CALCULATE_SITEWIDE_DAILY_METRICS"
CALCULATE_SITEWIDE_ELECTION_METRICS = "CALCULATE_SITEWIDE_ELECTION_METRICS"
CALCULATE_SITEWIDE_VOTER_METRICS = "CALCULATE_SITEWIDE_VOTER_METRICS"
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS = "RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS"
REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS = "REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS"
REFRESH_BALLOT_ITEMS_FROM_VOTERS = "REFRESH_BALLOT_ITEMS_FROM_VOTERS"
SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE = "SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE"
UPDATE_TWITTER_DATA_FROM_TWITTER = "UPDATE_TWITTER_DATA_FROM_TWITTER"
KIND_OF_PROCESS_CHOICES = (
(ACTIVITY_NOTICE_PROCESS, 'Create, update, or schedule to send Activity Notices'),
(API_REFRESH_REQUEST, 'Make sure we have cached a recent return from a specific API'),
(AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID, 'Add election id to AnalyticsAction'),
(AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT, 'Mark first AnalyticsAction per day'),
(CALCULATE_SITEWIDE_VOTER_METRICS, 'Sitewide voter metrics for all time'),
(CALCULATE_SITEWIDE_DAILY_METRICS, 'Sitewide daily metrics'),
(CALCULATE_SITEWIDE_ELECTION_METRICS, 'Sitewide election metrics'),
(CALCULATE_ORGANIZATION_DAILY_METRICS, 'Organization specific daily metrics'),
(CALCULATE_ORGANIZATION_ELECTION_METRICS, 'Organization specific election metrics'),
(RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, 'Retrieve Ballot Items from Map Points'),
(REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, 'Refresh Ballot Items from BallotReturned Map Points'),
(REFRESH_BALLOT_ITEMS_FROM_VOTERS, 'Refresh Ballot Items from Voter Custom Addresses'),
(SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE, 'Search for Candidate Twitter Handles'),
)
logger = wevote_functions.admin.get_logger(__name__)
def get_value_if_index_in_list(incoming_list, index):
try:
return incoming_list[index]
except IndexError:
return ""
def get_value_from_dict(structured_json, field_name):
try:
return structured_json[field_name]
except KeyError:
return ""
except IndexError:
return ""
def get_header_map_value_if_index_in_list(incoming_list, index, kind_of_batch=""):
try:
# The header_value is a value like "Organization Name" or "Street Address"
original_header_value = incoming_list[index]
original_header_value_str = str(original_header_value)
original_header_value_str = original_header_value_str.lower()
# We want to check to see if there is a suggested We Vote header for this value
batch_manager = BatchManager()
header_value_recognized_by_we_vote = batch_manager.fetch_batch_header_translation_suggestion(
kind_of_batch, original_header_value_str)
if positive_value_exists(header_value_recognized_by_we_vote):
return header_value_recognized_by_we_vote
else:
return original_header_value_str
except IndexError:
return ""
class BatchManager(models.Manager):
def __unicode__(self):
return "BatchManager"
pass
def create_batch_from_uri(self, batch_uri, kind_of_batch, google_civic_election_id, organization_we_vote_id):
# Retrieve the CSV
response = urllib.request.urlopen(batch_uri)
csv_data = csv.reader(codecs.iterdecode(response, 'utf-8'))
batch_file_name = ""
return self.create_batch_from_csv_data(
batch_file_name, csv_data, kind_of_batch, google_civic_election_id, organization_we_vote_id)
def create_batch_from_local_file_upload(
self, batch_file, kind_of_batch, google_civic_election_id, organization_we_vote_id,
polling_location_we_vote_id=""):
status = ''
if (batch_file.content_type == 'text/csv') or (batch_file.content_type == 'application/octet-stream') \
or (batch_file.content_type == 'application/vnd.ms-excel'):
csv_data = csv.reader(codecs.iterdecode(batch_file, 'utf-8'), delimiter=',')
batch_file_name = batch_file.name
return self.create_batch_from_csv_data(
batch_file_name, csv_data, kind_of_batch, google_civic_election_id, organization_we_vote_id,
polling_location_we_vote_id)
else:
status += "CONTENT_TYPE: " + str(batch_file.content_type) + " "
status += "CREATE_BATCH_FILE_TYPE_NOT_RECOGNIZED "
results = {
'success': False,
'status': status,
'batch_header_id': 0,
'batch_saved': False,
'number_of_batch_rows': 0,
}
return results
def create_batch_from_voter_object_list(self, objects_list):
"""
Creates a batch from a list of voter objects
:param objects_list: list of voter objects
:return:
"""
status = ''
success = False
number_of_voters = 0
google_civic_election_id = 0
if not objects_list:
results = {
'success': False,
'status': "IMPORT_VOTERS_FAILED",
'number_of_voters': 0,
}
return results
first_line = True
batch_header_id = 0
batch_header_map_id = 0
for one_entry in objects_list:
first_name = one_entry.first_name
middle_name = one_entry.middle_name
last_name = one_entry.last_name
email = one_entry.email
we_vote_id = one_entry.we_vote_id
twitter_screen_name = one_entry.twitter_screen_name
newsletter_opt_in = one_entry.is_opt_in_newsletter()
date_joined = one_entry.date_joined
date_last_changed = one_entry.date_last_changed
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['first_name'],
batch_header_column_001=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['middle_name'],
batch_header_column_002=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['last_name'],
batch_header_column_003=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['email'],
batch_header_column_004=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['newsletter_opt_in'],
batch_header_column_005=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['we_vote_id'],
batch_header_column_006=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['twitter_screen_name'],
batch_header_column_007=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['date_joined'],
batch_header_column_008=BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS['date_last_changed'],
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='first_name',
batch_header_map_001='middle_name',
batch_header_map_002='last_name',
batch_header_map_003='email',
batch_header_map_004='newsletter_opt_in',
batch_header_map_005='we_vote_id',
batch_header_map_006='twitter_screen_name',
batch_header_map_007='date_joined',
batch_header_map_008='date_last_changed',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "EXPORT_VOTERS batch_header_id-" + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
kind_of_batch=IMPORT_VOTER,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=first_name,
batch_row_001=middle_name,
batch_row_002=last_name,
batch_row_003=email,
batch_row_004=newsletter_opt_in,
batch_row_005=we_vote_id,
batch_row_006=twitter_screen_name,
batch_row_007=date_joined,
batch_row_008=date_last_changed,
)
number_of_voters += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_voters': number_of_voters,
'google_civic_election_id': google_civic_election_id,
}
return results
def create_batch_from_csv_data(self, file_name, csv_data, kind_of_batch, google_civic_election_id=0,
organization_we_vote_id="", polling_location_we_vote_id=""):
first_line = True
success = False
status = ""
number_of_batch_rows = 0
# limit_for_testing = 5
# Retrieve from JSON
# request = Request(batch_uri, headers={'User-Agent': 'Mozilla/5.0'})
# url_processor = urlopen(request)
# data = url_processor.read()
# incoming_data = data.decode('utf-8')
# structured_json = json.loads(incoming_data)
# for one_entry in structured_json:
batch_header_id = 0
batch_header_map_id = 0
for line in csv_data:
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000=get_value_if_index_in_list(line, 0),
batch_header_column_001=get_value_if_index_in_list(line, 1),
batch_header_column_002=get_value_if_index_in_list(line, 2),
batch_header_column_003=get_value_if_index_in_list(line, 3),
batch_header_column_004=get_value_if_index_in_list(line, 4),
batch_header_column_005=get_value_if_index_in_list(line, 5),
batch_header_column_006=get_value_if_index_in_list(line, 6),
batch_header_column_007=get_value_if_index_in_list(line, 7),
batch_header_column_008=get_value_if_index_in_list(line, 8),
batch_header_column_009=get_value_if_index_in_list(line, 9),
batch_header_column_010=get_value_if_index_in_list(line, 10),
batch_header_column_011=get_value_if_index_in_list(line, 11),
batch_header_column_012=get_value_if_index_in_list(line, 12),
batch_header_column_013=get_value_if_index_in_list(line, 13),
batch_header_column_014=get_value_if_index_in_list(line, 14),
batch_header_column_015=get_value_if_index_in_list(line, 15),
batch_header_column_016=get_value_if_index_in_list(line, 16),
batch_header_column_017=get_value_if_index_in_list(line, 17),
batch_header_column_018=get_value_if_index_in_list(line, 18),
batch_header_column_019=get_value_if_index_in_list(line, 19),
batch_header_column_020=get_value_if_index_in_list(line, 20),
batch_header_column_021=get_value_if_index_in_list(line, 21),
batch_header_column_022=get_value_if_index_in_list(line, 22),
batch_header_column_023=get_value_if_index_in_list(line, 23),
batch_header_column_024=get_value_if_index_in_list(line, 24),
batch_header_column_025=get_value_if_index_in_list(line, 25),
batch_header_column_026=get_value_if_index_in_list(line, 26),
batch_header_column_027=get_value_if_index_in_list(line, 27),
batch_header_column_028=get_value_if_index_in_list(line, 28),
batch_header_column_029=get_value_if_index_in_list(line, 29),
batch_header_column_030=get_value_if_index_in_list(line, 30),
batch_header_column_031=get_value_if_index_in_list(line, 31),
batch_header_column_032=get_value_if_index_in_list(line, 32),
batch_header_column_033=get_value_if_index_in_list(line, 33),
batch_header_column_034=get_value_if_index_in_list(line, 34),
batch_header_column_035=get_value_if_index_in_list(line, 35),
batch_header_column_036=get_value_if_index_in_list(line, 36),
batch_header_column_037=get_value_if_index_in_list(line, 37),
batch_header_column_038=get_value_if_index_in_list(line, 38),
batch_header_column_039=get_value_if_index_in_list(line, 39),
batch_header_column_040=get_value_if_index_in_list(line, 40),
batch_header_column_041=get_value_if_index_in_list(line, 41),
batch_header_column_042=get_value_if_index_in_list(line, 42),
batch_header_column_043=get_value_if_index_in_list(line, 43),
batch_header_column_044=get_value_if_index_in_list(line, 44),
batch_header_column_045=get_value_if_index_in_list(line, 45),
batch_header_column_046=get_value_if_index_in_list(line, 46),
batch_header_column_047=get_value_if_index_in_list(line, 47),
batch_header_column_048=get_value_if_index_in_list(line, 48),
batch_header_column_049=get_value_if_index_in_list(line, 49),
batch_header_column_050=get_value_if_index_in_list(line, 50),
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
# For each line, check for translation suggestions
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000=get_header_map_value_if_index_in_list(line, 0, kind_of_batch),
batch_header_map_001=get_header_map_value_if_index_in_list(line, 1, kind_of_batch),
batch_header_map_002=get_header_map_value_if_index_in_list(line, 2, kind_of_batch),
batch_header_map_003=get_header_map_value_if_index_in_list(line, 3, kind_of_batch),
batch_header_map_004=get_header_map_value_if_index_in_list(line, 4, kind_of_batch),
batch_header_map_005=get_header_map_value_if_index_in_list(line, 5, kind_of_batch),
batch_header_map_006=get_header_map_value_if_index_in_list(line, 6, kind_of_batch),
batch_header_map_007=get_header_map_value_if_index_in_list(line, 7, kind_of_batch),
batch_header_map_008=get_header_map_value_if_index_in_list(line, 8, kind_of_batch),
batch_header_map_009=get_header_map_value_if_index_in_list(line, 9, kind_of_batch),
batch_header_map_010=get_header_map_value_if_index_in_list(line, 10, kind_of_batch),
batch_header_map_011=get_header_map_value_if_index_in_list(line, 11, kind_of_batch),
batch_header_map_012=get_header_map_value_if_index_in_list(line, 12, kind_of_batch),
batch_header_map_013=get_header_map_value_if_index_in_list(line, 13, kind_of_batch),
batch_header_map_014=get_header_map_value_if_index_in_list(line, 14, kind_of_batch),
batch_header_map_015=get_header_map_value_if_index_in_list(line, 15, kind_of_batch),
batch_header_map_016=get_header_map_value_if_index_in_list(line, 16, kind_of_batch),
batch_header_map_017=get_header_map_value_if_index_in_list(line, 17, kind_of_batch),
batch_header_map_018=get_header_map_value_if_index_in_list(line, 18, kind_of_batch),
batch_header_map_019=get_header_map_value_if_index_in_list(line, 19, kind_of_batch),
batch_header_map_020=get_header_map_value_if_index_in_list(line, 20, kind_of_batch),
batch_header_map_021=get_header_map_value_if_index_in_list(line, 21, kind_of_batch),
batch_header_map_022=get_header_map_value_if_index_in_list(line, 22, kind_of_batch),
batch_header_map_023=get_header_map_value_if_index_in_list(line, 23, kind_of_batch),
batch_header_map_024=get_header_map_value_if_index_in_list(line, 24, kind_of_batch),
batch_header_map_025=get_header_map_value_if_index_in_list(line, 25, kind_of_batch),
batch_header_map_026=get_header_map_value_if_index_in_list(line, 26, kind_of_batch),
batch_header_map_027=get_header_map_value_if_index_in_list(line, 27, kind_of_batch),
batch_header_map_028=get_header_map_value_if_index_in_list(line, 28, kind_of_batch),
batch_header_map_029=get_header_map_value_if_index_in_list(line, 29, kind_of_batch),
batch_header_map_030=get_header_map_value_if_index_in_list(line, 30, kind_of_batch),
batch_header_map_031=get_header_map_value_if_index_in_list(line, 31, kind_of_batch),
batch_header_map_032=get_header_map_value_if_index_in_list(line, 32, kind_of_batch),
batch_header_map_033=get_header_map_value_if_index_in_list(line, 33, kind_of_batch),
batch_header_map_034=get_header_map_value_if_index_in_list(line, 34, kind_of_batch),
batch_header_map_035=get_header_map_value_if_index_in_list(line, 35, kind_of_batch),
batch_header_map_036=get_header_map_value_if_index_in_list(line, 36, kind_of_batch),
batch_header_map_037=get_header_map_value_if_index_in_list(line, 37, kind_of_batch),
batch_header_map_038=get_header_map_value_if_index_in_list(line, 38, kind_of_batch),
batch_header_map_039=get_header_map_value_if_index_in_list(line, 39, kind_of_batch),
batch_header_map_040=get_header_map_value_if_index_in_list(line, 40, kind_of_batch),
batch_header_map_041=get_header_map_value_if_index_in_list(line, 41, kind_of_batch),
batch_header_map_042=get_header_map_value_if_index_in_list(line, 42, kind_of_batch),
batch_header_map_043=get_header_map_value_if_index_in_list(line, 43, kind_of_batch),
batch_header_map_044=get_header_map_value_if_index_in_list(line, 44, kind_of_batch),
batch_header_map_045=get_header_map_value_if_index_in_list(line, 45, kind_of_batch),
batch_header_map_046=get_header_map_value_if_index_in_list(line, 46, kind_of_batch),
batch_header_map_047=get_header_map_value_if_index_in_list(line, 47, kind_of_batch),
batch_header_map_048=get_header_map_value_if_index_in_list(line, 48, kind_of_batch),
batch_header_map_049=get_header_map_value_if_index_in_list(line, 49, kind_of_batch),
batch_header_map_050=get_header_map_value_if_index_in_list(line, 50, kind_of_batch),
)
batch_header_map_id = batch_header_map.id
status += "BATCH_HEADER_MAP_SAVED "
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
if positive_value_exists(file_name):
batch_name = str(batch_header_id) + ": " + file_name
if not positive_value_exists(batch_name):
batch_name = str(batch_header_id) + ": " + kind_of_batch
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch=kind_of_batch,
organization_we_vote_id=organization_we_vote_id,
polling_location_we_vote_id=polling_location_we_vote_id,
# source_uri=batch_uri,
)
status += "BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += "EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
else:
# if number_of_batch_rows >= limit_for_testing:
# break
if positive_value_exists(batch_header_id):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=get_value_if_index_in_list(line, 0),
batch_row_001=get_value_if_index_in_list(line, 1),
batch_row_002=get_value_if_index_in_list(line, 2),
batch_row_003=get_value_if_index_in_list(line, 3),
batch_row_004=get_value_if_index_in_list(line, 4),
batch_row_005=get_value_if_index_in_list(line, 5),
batch_row_006=get_value_if_index_in_list(line, 6),
batch_row_007=get_value_if_index_in_list(line, 7),
batch_row_008=get_value_if_index_in_list(line, 8),
batch_row_009=get_value_if_index_in_list(line, 9),
batch_row_010=get_value_if_index_in_list(line, 10),
batch_row_011=get_value_if_index_in_list(line, 11),
batch_row_012=get_value_if_index_in_list(line, 12),
batch_row_013=get_value_if_index_in_list(line, 13),
batch_row_014=get_value_if_index_in_list(line, 14),
batch_row_015=get_value_if_index_in_list(line, 15),
batch_row_016=get_value_if_index_in_list(line, 16),
batch_row_017=get_value_if_index_in_list(line, 17),
batch_row_018=get_value_if_index_in_list(line, 18),
batch_row_019=get_value_if_index_in_list(line, 19),
batch_row_020=get_value_if_index_in_list(line, 20),
batch_row_021=get_value_if_index_in_list(line, 21),
batch_row_022=get_value_if_index_in_list(line, 22),
batch_row_023=get_value_if_index_in_list(line, 23),
batch_row_024=get_value_if_index_in_list(line, 24),
batch_row_025=get_value_if_index_in_list(line, 25),
batch_row_026=get_value_if_index_in_list(line, 26),
batch_row_027=get_value_if_index_in_list(line, 27),
batch_row_028=get_value_if_index_in_list(line, 28),
batch_row_029=get_value_if_index_in_list(line, 29),
batch_row_030=get_value_if_index_in_list(line, 30),
batch_row_031=get_value_if_index_in_list(line, 31),
batch_row_032=get_value_if_index_in_list(line, 32),
batch_row_033=get_value_if_index_in_list(line, 33),
batch_row_034=get_value_if_index_in_list(line, 34),
batch_row_035=get_value_if_index_in_list(line, 35),
batch_row_036=get_value_if_index_in_list(line, 36),
batch_row_037=get_value_if_index_in_list(line, 37),
batch_row_038=get_value_if_index_in_list(line, 38),
batch_row_039=get_value_if_index_in_list(line, 39),
batch_row_040=get_value_if_index_in_list(line, 40),
batch_row_041=get_value_if_index_in_list(line, 41),
batch_row_042=get_value_if_index_in_list(line, 42),
batch_row_043=get_value_if_index_in_list(line, 43),
batch_row_044=get_value_if_index_in_list(line, 44),
batch_row_045=get_value_if_index_in_list(line, 45),
batch_row_046=get_value_if_index_in_list(line, 46),
batch_row_047=get_value_if_index_in_list(line, 47),
batch_row_048=get_value_if_index_in_list(line, 48),
batch_row_049=get_value_if_index_in_list(line, 49),
batch_row_050=get_value_if_index_in_list(line, 50),
google_civic_election_id=google_civic_election_id,
polling_location_we_vote_id=polling_location_we_vote_id,
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += "EXCEPTION_BATCH_ROW: " + str(e) + " "
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def create_batch_from_json(self, file_name, structured_json_list, mapping_dict, kind_of_batch,
google_civic_election_id=0, organization_we_vote_id="", polling_location_we_vote_id="",
batch_set_id=0, state_code=""):
success = False
status = ""
number_of_batch_rows = 0
# limit_for_testing = 5
batch_header_id = 0
batch_header_map_id = 0
batch_name = ""
if not len(structured_json_list):
# If there aren't any values, don't create a batch
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
# We want an array with integers 0 - n as the keys, and the field names as the values
we_vote_keys = list(mapping_dict.keys())
remote_source_keys = list(mapping_dict.values())
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000=get_value_if_index_in_list(remote_source_keys, 0),
batch_header_column_001=get_value_if_index_in_list(remote_source_keys, 1),
batch_header_column_002=get_value_if_index_in_list(remote_source_keys, 2),
batch_header_column_003=get_value_if_index_in_list(remote_source_keys, 3),
batch_header_column_004=get_value_if_index_in_list(remote_source_keys, 4),
batch_header_column_005=get_value_if_index_in_list(remote_source_keys, 5),
batch_header_column_006=get_value_if_index_in_list(remote_source_keys, 6),
batch_header_column_007=get_value_if_index_in_list(remote_source_keys, 7),
batch_header_column_008=get_value_if_index_in_list(remote_source_keys, 8),
batch_header_column_009=get_value_if_index_in_list(remote_source_keys, 9),
batch_header_column_010=get_value_if_index_in_list(remote_source_keys, 10),
batch_header_column_011=get_value_if_index_in_list(remote_source_keys, 11),
batch_header_column_012=get_value_if_index_in_list(remote_source_keys, 12),
batch_header_column_013=get_value_if_index_in_list(remote_source_keys, 13),
batch_header_column_014=get_value_if_index_in_list(remote_source_keys, 14),
batch_header_column_015=get_value_if_index_in_list(remote_source_keys, 15),
batch_header_column_016=get_value_if_index_in_list(remote_source_keys, 16),
batch_header_column_017=get_value_if_index_in_list(remote_source_keys, 17),
batch_header_column_018=get_value_if_index_in_list(remote_source_keys, 18),
batch_header_column_019=get_value_if_index_in_list(remote_source_keys, 19),
batch_header_column_020=get_value_if_index_in_list(remote_source_keys, 20),
batch_header_column_021=get_value_if_index_in_list(remote_source_keys, 21),
batch_header_column_022=get_value_if_index_in_list(remote_source_keys, 22),
batch_header_column_023=get_value_if_index_in_list(remote_source_keys, 23),
batch_header_column_024=get_value_if_index_in_list(remote_source_keys, 24),
batch_header_column_025=get_value_if_index_in_list(remote_source_keys, 25),
batch_header_column_026=get_value_if_index_in_list(remote_source_keys, 26),
batch_header_column_027=get_value_if_index_in_list(remote_source_keys, 27),
batch_header_column_028=get_value_if_index_in_list(remote_source_keys, 28),
batch_header_column_029=get_value_if_index_in_list(remote_source_keys, 29),
batch_header_column_030=get_value_if_index_in_list(remote_source_keys, 30),
batch_header_column_031=get_value_if_index_in_list(remote_source_keys, 31),
batch_header_column_032=get_value_if_index_in_list(remote_source_keys, 32),
batch_header_column_033=get_value_if_index_in_list(remote_source_keys, 33),
batch_header_column_034=get_value_if_index_in_list(remote_source_keys, 34),
batch_header_column_035=get_value_if_index_in_list(remote_source_keys, 35),
batch_header_column_036=get_value_if_index_in_list(remote_source_keys, 36),
batch_header_column_037=get_value_if_index_in_list(remote_source_keys, 37),
batch_header_column_038=get_value_if_index_in_list(remote_source_keys, 38),
batch_header_column_039=get_value_if_index_in_list(remote_source_keys, 39),
batch_header_column_040=get_value_if_index_in_list(remote_source_keys, 40),
batch_header_column_041=get_value_if_index_in_list(remote_source_keys, 41),
batch_header_column_042=get_value_if_index_in_list(remote_source_keys, 42),
batch_header_column_043=get_value_if_index_in_list(remote_source_keys, 43),
batch_header_column_044=get_value_if_index_in_list(remote_source_keys, 44),
batch_header_column_045=get_value_if_index_in_list(remote_source_keys, 45),
batch_header_column_046=get_value_if_index_in_list(remote_source_keys, 46),
batch_header_column_047=get_value_if_index_in_list(remote_source_keys, 47),
batch_header_column_048=get_value_if_index_in_list(remote_source_keys, 48),
batch_header_column_049=get_value_if_index_in_list(remote_source_keys, 49),
batch_header_column_050=get_value_if_index_in_list(remote_source_keys, 50),
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
# For each line, check for translation suggestions
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000=get_value_if_index_in_list(we_vote_keys, 0),
batch_header_map_001=get_value_if_index_in_list(we_vote_keys, 1),
batch_header_map_002=get_value_if_index_in_list(we_vote_keys, 2),
batch_header_map_003=get_value_if_index_in_list(we_vote_keys, 3),
batch_header_map_004=get_value_if_index_in_list(we_vote_keys, 4),
batch_header_map_005=get_value_if_index_in_list(we_vote_keys, 5),
batch_header_map_006=get_value_if_index_in_list(we_vote_keys, 6),
batch_header_map_007=get_value_if_index_in_list(we_vote_keys, 7),
batch_header_map_008=get_value_if_index_in_list(we_vote_keys, 8),
batch_header_map_009=get_value_if_index_in_list(we_vote_keys, 9),
batch_header_map_010=get_value_if_index_in_list(we_vote_keys, 10),
batch_header_map_011=get_value_if_index_in_list(we_vote_keys, 11),
batch_header_map_012=get_value_if_index_in_list(we_vote_keys, 12),
batch_header_map_013=get_value_if_index_in_list(we_vote_keys, 13),
batch_header_map_014=get_value_if_index_in_list(we_vote_keys, 14),
batch_header_map_015=get_value_if_index_in_list(we_vote_keys, 15),
batch_header_map_016=get_value_if_index_in_list(we_vote_keys, 16),
batch_header_map_017=get_value_if_index_in_list(we_vote_keys, 17),
batch_header_map_018=get_value_if_index_in_list(we_vote_keys, 18),
batch_header_map_019=get_value_if_index_in_list(we_vote_keys, 19),
batch_header_map_020=get_value_if_index_in_list(we_vote_keys, 20),
batch_header_map_021=get_value_if_index_in_list(we_vote_keys, 21),
batch_header_map_022=get_value_if_index_in_list(we_vote_keys, 22),
batch_header_map_023=get_value_if_index_in_list(we_vote_keys, 23),
batch_header_map_024=get_value_if_index_in_list(we_vote_keys, 24),
batch_header_map_025=get_value_if_index_in_list(we_vote_keys, 25),
batch_header_map_026=get_value_if_index_in_list(we_vote_keys, 26),
batch_header_map_027=get_value_if_index_in_list(we_vote_keys, 27),
batch_header_map_028=get_value_if_index_in_list(we_vote_keys, 28),
batch_header_map_029=get_value_if_index_in_list(we_vote_keys, 29),
batch_header_map_030=get_value_if_index_in_list(we_vote_keys, 30),
batch_header_map_031=get_value_if_index_in_list(we_vote_keys, 31),
batch_header_map_032=get_value_if_index_in_list(we_vote_keys, 32),
batch_header_map_033=get_value_if_index_in_list(we_vote_keys, 33),
batch_header_map_034=get_value_if_index_in_list(we_vote_keys, 34),
batch_header_map_035=get_value_if_index_in_list(we_vote_keys, 35),
batch_header_map_036=get_value_if_index_in_list(we_vote_keys, 36),
batch_header_map_037=get_value_if_index_in_list(we_vote_keys, 37),
batch_header_map_038=get_value_if_index_in_list(we_vote_keys, 38),
batch_header_map_039=get_value_if_index_in_list(we_vote_keys, 39),
batch_header_map_040=get_value_if_index_in_list(we_vote_keys, 40),
batch_header_map_041=get_value_if_index_in_list(we_vote_keys, 41),
batch_header_map_042=get_value_if_index_in_list(we_vote_keys, 42),
batch_header_map_043=get_value_if_index_in_list(we_vote_keys, 43),
batch_header_map_044=get_value_if_index_in_list(we_vote_keys, 44),
batch_header_map_045=get_value_if_index_in_list(we_vote_keys, 45),
batch_header_map_046=get_value_if_index_in_list(we_vote_keys, 46),
batch_header_map_047=get_value_if_index_in_list(we_vote_keys, 47),
batch_header_map_048=get_value_if_index_in_list(we_vote_keys, 48),
batch_header_map_049=get_value_if_index_in_list(we_vote_keys, 49),
batch_header_map_050=get_value_if_index_in_list(we_vote_keys, 50),
)
batch_header_map_id = batch_header_map.id
# status += "BATCH_HEADER_MAP_SAVED_FOR_JSON "
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
if positive_value_exists(file_name):
batch_name = str(batch_header_id) + ": " + file_name
if not positive_value_exists(batch_name):
batch_name = str(batch_header_id) + ": " + kind_of_batch
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_batch=kind_of_batch,
organization_we_vote_id=organization_we_vote_id,
polling_location_we_vote_id=polling_location_we_vote_id,
# source_uri=batch_uri,
)
# status += "BATCH_DESCRIPTION_SAVED_FOR_JSON "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += "EXCEPTION_BATCH_HEADER_FOR_JSON: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
if positive_value_exists(batch_header_id):
for one_dict in structured_json_list:
# if number_of_batch_rows >= limit_for_testing:
# break
local_google_civic_election_id = google_civic_election_id # Use it if it came in to this function
if not positive_value_exists(google_civic_election_id):
local_google_civic_election_id = get_value_from_dict(one_dict, 'google_civic_election_id')
local_polling_location_we_vote_id = polling_location_we_vote_id # Use it if it came in to this function
if not positive_value_exists(polling_location_we_vote_id):
local_polling_location_we_vote_id = get_value_from_dict(one_dict, 'polling_location_we_vote_id')
local_state_code = state_code # Use it if it came in to this function
if not positive_value_exists(state_code):
local_state_code = get_value_from_dict(one_dict, 'state_code')
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 0)),
batch_row_001=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 1)),
batch_row_002=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 2)),
batch_row_003=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 3)),
batch_row_004=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 4)),
batch_row_005=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 5)),
batch_row_006=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 6)),
batch_row_007=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 7)),
batch_row_008=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 8)),
batch_row_009=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 9)),
batch_row_010=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 10)),
batch_row_011=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 11)),
batch_row_012=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 12)),
batch_row_013=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 13)),
batch_row_014=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 14)),
batch_row_015=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 15)),
batch_row_016=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 16)),
batch_row_017=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 17)),
batch_row_018=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 18)),
batch_row_019=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 19)),
batch_row_020=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 20)),
batch_row_021=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 21)),
batch_row_022=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 22)),
batch_row_023=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 23)),
batch_row_024=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 24)),
batch_row_025=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 25)),
batch_row_026=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 26)),
batch_row_027=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 27)),
batch_row_028=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 28)),
batch_row_029=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 29)),
batch_row_030=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 30)),
batch_row_031=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 31)),
batch_row_032=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 32)),
batch_row_033=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 33)),
batch_row_034=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 34)),
batch_row_035=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 35)),
batch_row_036=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 36)),
batch_row_037=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 37)),
batch_row_038=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 38)),
batch_row_039=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 39)),
batch_row_040=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 40)),
batch_row_041=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 41)),
batch_row_042=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 42)),
batch_row_043=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 43)),
batch_row_044=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 44)),
batch_row_045=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 45)),
batch_row_046=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 46)),
batch_row_047=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 47)),
batch_row_048=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 48)),
batch_row_049=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 49)),
batch_row_050=get_value_from_dict(one_dict, get_value_if_index_in_list(remote_source_keys, 50)),
google_civic_election_id=local_google_civic_election_id,
polling_location_we_vote_id=local_polling_location_we_vote_id,
state_code=local_state_code,
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += "EXCEPTION_BATCH_ROW_FOR_JSON: " + str(e) + " "
break
else:
status += "NO_BATCH_HEADER_ID "
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
# I don't believe this is currently in use. There is also a function of this same name in controllers.py
def create_batch_header_translation_suggestion(
self, kind_of_batch, header_value_recognized_by_we_vote, incoming_alternate_header_value):
"""
:param kind_of_batch:
:param header_value_recognized_by_we_vote:
:param incoming_alternate_header_value:
:return:
"""
success = False
status = ""
suggestion_created = False
suggestion_updated = False
header_value_recognized_by_we_vote = header_value_recognized_by_we_vote.lower()
incoming_alternate_header_value = incoming_alternate_header_value.lower()
if not positive_value_exists(kind_of_batch) or not positive_value_exists(header_value_recognized_by_we_vote) \
or not positive_value_exists(incoming_alternate_header_value):
status += "CREATE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
'suggestion_created': suggestion_created,
'suggestion_updated': suggestion_updated,
}
return results
if kind_of_batch == CANDIDATE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_CANDIDATES
elif kind_of_batch == CONTEST_OFFICE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_CONTEST_OFFICES
elif kind_of_batch == ELECTED_OFFICE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_ELECTED_OFFICES
elif kind_of_batch == MEASURE:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_MEASURES
elif kind_of_batch == ORGANIZATION_WORD:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_ORGANIZATIONS
elif kind_of_batch == POLITICIAN:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLITICIANS
elif kind_of_batch == POSITION:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_POSITIONS
elif kind_of_batch == IMPORT_BALLOT_ITEM:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_BALLOT_ITEMS
elif kind_of_batch == IMPORT_POLLING_LOCATION:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_POLLING_LOCATIONS
elif kind_of_batch == IMPORT_VOTER:
batch_import_keys_accepted = BATCH_IMPORT_KEYS_ACCEPTED_FOR_VOTERS
else:
batch_import_keys_accepted = {}
if incoming_alternate_header_value in batch_import_keys_accepted:
success = True
status += "SUGGESTION_IS_BATCH_IMPORT_KEY "
results = {
'success': success,
'status': status,
'suggestion_created': suggestion_created,
'suggestion_updated': suggestion_updated,
}
return results
try:
batch_header_translation_suggestion, suggestion_created = \
BatchHeaderTranslationSuggestion.objects.update_or_create(
kind_of_batch=kind_of_batch,
header_value_recognized_by_we_vote=header_value_recognized_by_we_vote,
incoming_alternate_header_value=incoming_alternate_header_value)
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED " + str(e) + " "
results = {
'success': success,
'status': status,
'suggestion_created': suggestion_created,
'suggestion_updated': suggestion_updated,
}
return results
def fetch_batch_row_count(self, batch_header_id):
"""
:param batch_header_id:
:return:
"""
try:
batch_row_query = BatchRow.objects.filter(batch_header_id=batch_header_id)
batch_row_count = batch_row_query.count()
except BatchRow.DoesNotExist:
batch_row_count = 0
except Exception as e:
batch_row_count = 0
return batch_row_count
def fetch_batch_row_action_count(self, batch_header_id, kind_of_batch, kind_of_action=''):
"""
:param batch_header_id:
:param kind_of_batch:
:param kind_of_action:
:return:
"""
batch_row_action_count = 0
try:
if kind_of_batch == CANDIDATE:
batch_row_action_query = BatchRowActionCandidate.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == CONTEST_OFFICE:
batch_row_action_query = BatchRowActionContestOffice.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ELECTED_OFFICE:
batch_row_action_query = BatchRowActionElectedOffice.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == IMPORT_BALLOT_ITEM:
batch_row_action_query = BatchRowActionBallotItem.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == IMPORT_POLLING_LOCATION:
batch_row_action_query = BatchRowActionPollingLocation.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == MEASURE:
batch_row_action_query = BatchRowActionMeasure.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ORGANIZATION_WORD:
batch_row_action_query = BatchRowActionOrganization.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POLITICIAN:
batch_row_action_query = BatchRowActionPolitician.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POSITION:
batch_row_action_query = BatchRowActionPosition.objects.filter(batch_header_id=batch_header_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
except Exception as e:
batch_row_action_count = 0
return batch_row_action_count
def fetch_batch_row_action_count_in_batch_set(self, batch_set_id, kind_of_batch, kind_of_action=''):
"""
:param batch_set_id:
:param kind_of_batch:
:param kind_of_action:
:return:
"""
batch_row_action_count = 0
try:
if kind_of_batch == CANDIDATE:
batch_row_action_query = BatchRowActionCandidate.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == CONTEST_OFFICE:
batch_row_action_query = BatchRowActionContestOffice.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ELECTED_OFFICE:
batch_row_action_query = BatchRowActionElectedOffice.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == IMPORT_BALLOT_ITEM:
batch_row_action_query = BatchRowActionBallotItem.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == IMPORT_POLLING_LOCATION:
batch_row_action_query = BatchRowActionPollingLocation.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == MEASURE:
batch_row_action_query = BatchRowActionMeasure.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == ORGANIZATION_WORD:
batch_row_action_query = BatchRowActionOrganization.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POLITICIAN:
batch_row_action_query = BatchRowActionPolitician.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
elif kind_of_batch == POSITION:
batch_row_action_query = BatchRowActionPosition.objects.filter(batch_set_id=batch_set_id)
if positive_value_exists(kind_of_action):
batch_row_action_query = batch_row_action_query.filter(kind_of_action__iexact=kind_of_action)
batch_row_action_count = batch_row_action_query.count()
except Exception as e:
batch_row_action_count = 0
return batch_row_action_count
def retrieve_unprocessed_batch_set_info_by_election_and_set_source(
self, google_civic_election_id, batch_set_source, state_code=''):
batch_set_query = BatchSet.objects.using('readonly').all()
batch_set_query = batch_set_query.filter(google_civic_election_id=google_civic_election_id)
batch_set_query = batch_set_query.filter(batch_set_source__iexact=batch_set_source)
if positive_value_exists(state_code):
batch_set_query = batch_set_query.filter(state_code__iexact=state_code)
batch_set_query = batch_set_query.order_by('-id')
batch_set_list = list(batch_set_query)
batch_of_ballot_items_not_processed = 0
batch_set_id = 0
total_ballot_locations_count = 0
if positive_value_exists(len(batch_set_list)):
one_batch_set = batch_set_list[0]
batch_set_id = one_batch_set.id
batch_description_query = BatchDescription.objects.using('readonly').all()
batch_description_query = batch_description_query.filter(batch_set_id=one_batch_set.id)
total_ballot_locations_count = batch_description_query.count()
batch_description_list = list(batch_description_query)
for one_batch_description in batch_description_list:
# For each Batch Description, see if there are BatchRowActionBallotItem entries
batch_row_action_ballot_item_query = BatchRowActionBallotItem.objects.all()
batch_row_action_ballot_item_query = batch_row_action_ballot_item_query.filter(
batch_header_id=one_batch_description.batch_header_id)
batch_row_action_ballot_item_query = batch_row_action_ballot_item_query.filter(
kind_of_action=IMPORT_ADD_TO_EXISTING)
# If there aren't any "update" entries, count as unprocessed
if not positive_value_exists(batch_row_action_ballot_item_query.count()):
batch_of_ballot_items_not_processed += 1
results = {
'batches_not_processed': batch_of_ballot_items_not_processed,
'batch_set_id': batch_set_id,
}
return results
def retrieve_batch_header_translation_suggestion(self, kind_of_batch, incoming_alternate_header_value):
"""
We are looking at one header value from a file imported by an admin or volunteer. We want to see if
there are any suggestions for headers already recognized by We Vote. Ex/ "Organization" -> "organization_name"
:param kind_of_batch:
:param incoming_alternate_header_value:
:return:
"""
success = False
status = ""
batch_header_translation_suggestion_found = False
if not positive_value_exists(kind_of_batch) or not positive_value_exists(incoming_alternate_header_value):
status += "RETRIEVE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': BatchHeaderTranslationSuggestion(),
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
try:
# Note that we don't care about case sensitivity when we search for the alternate value
batch_header_translation_suggestion = BatchHeaderTranslationSuggestion.objects.get(
kind_of_batch=kind_of_batch,
incoming_alternate_header_value__iexact=incoming_alternate_header_value)
batch_header_translation_suggestion_found = True
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
batch_header_translation_suggestion = None
batch_header_translation_suggestion_found = False
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': batch_header_translation_suggestion,
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
def create_batch_row_translation_map( # TODO This hasn't been built
self, kind_of_batch, header_value_recognized_by_we_vote, incoming_alternate_header_value):
success = False
status = ""
if not positive_value_exists(kind_of_batch) or not positive_value_exists(header_value_recognized_by_we_vote) \
or not positive_value_exists(incoming_alternate_header_value):
status += "CREATE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
}
return results
try:
header_value_recognized_by_we_vote = header_value_recognized_by_we_vote.lower()
incoming_alternate_header_value = incoming_alternate_header_value.lower()
batch_header_translation_suggestion, created = BatchHeaderTranslationSuggestion.objects.update_or_create(
kind_of_batch=kind_of_batch,
header_value_recognized_by_we_vote=header_value_recognized_by_we_vote,
incoming_alternate_header_value=incoming_alternate_header_value)
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED: " + str(e) + " "
results = {
'success': success,
'status': status,
}
return results
def retrieve_batch_row_translation_map(self, kind_of_batch, incoming_alternate_header_value):
# TODO This hasn't been built yet
success = False
status = ""
batch_header_translation_suggestion_found = False
if not positive_value_exists(kind_of_batch) or not positive_value_exists(incoming_alternate_header_value):
status += "RETRIEVE_BATCH_HEADER_TRANSLATION_SUGGESTION-MISSING_REQUIRED_VARIABLE "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': BatchHeaderTranslationSuggestion(),
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
try:
# Note that we don't care about case sensitivity when we search for the alternate value
batch_header_translation_suggestion = BatchHeaderTranslationSuggestion.objects.get(
kind_of_batch=kind_of_batch,
incoming_alternate_header_value__iexact=incoming_alternate_header_value)
batch_header_translation_suggestion_found = True
success = True
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVED "
except Exception as e:
batch_header_translation_suggestion = None
batch_header_translation_suggestion_found = False
success = False
status += "BATCH_HEADER_TRANSLATION_SUGGESTION_SAVE_FAILED: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_header_translation_suggestion': batch_header_translation_suggestion,
'batch_header_translation_suggestion_found': batch_header_translation_suggestion_found,
}
return results
def retrieve_batch_row_action_organization(self, batch_header_id, batch_row_id):
status = ""
try:
batch_row_action_organization = BatchRowActionOrganization.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_ORGANIZATION_RETRIEVED "
except BatchRowActionOrganization.DoesNotExist:
batch_row_action_organization = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_ORGANIZATION_NOT_FOUND "
except Exception as e:
batch_row_action_organization = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_ORGANIZATION_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_organization': batch_row_action_organization,
}
return results
def retrieve_batch_row_action_measure(self, batch_header_id, batch_row_id):
status = ""
try:
batch_row_action_measure = BatchRowActionMeasure.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_MEASURE_RETRIEVED "
except BatchRowActionMeasure.DoesNotExist:
batch_row_action_measure = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_MEASURE_NOT_FOUND "
except Exception as e:
batch_row_action_measure = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_MEASURE_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_measure': batch_row_action_measure,
}
return results
def retrieve_batch_row_action_elected_office(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionElectedOffice table
:param batch_header_id:
:param batch_row_id:
:return:
"""
status = ""
try:
batch_row_action_elected_office = BatchRowActionElectedOffice.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_ELECTED_OFFICE_RETRIEVED "
except BatchRowActionElectedOffice.DoesNotExist:
batch_row_action_elected_office = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_ELECTED_OFFICE_NOT_FOUND "
except Exception as e:
batch_row_action_elected_office = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_ELECTED_OFFICE_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_elected_office': batch_row_action_elected_office,
}
return results
def retrieve_batch_row_action_contest_office(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionContestOffice table
:param batch_header_id:
:param batch_row_id:
:return:
"""
status = ""
try:
batch_row_action_contest_office = BatchRowActionContestOffice.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_CONTEST_OFFICE_RETRIEVED "
except BatchRowActionContestOffice.DoesNotExist:
batch_row_action_contest_office = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_CONTEST_OFFICE_NOT_FOUND "
except Exception as e:
batch_row_action_contest_office = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_CONTEST_OFFICE_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_contest_office': batch_row_action_contest_office,
}
return results
def retrieve_batch_row_action_politician(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionPolitician table
:param batch_header_id:
:param batch_row_id:
:return:
"""
status = ""
try:
batch_row_action_politician = BatchRowActionPolitician.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_POLITICIAN_RETRIEVED "
except BatchRowActionPolitician.DoesNotExist:
batch_row_action_politician = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_POLITICIAN_NOT_FOUND "
except Exception as e:
batch_row_action_politician = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_POLITICIAN_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_politician': batch_row_action_politician,
}
return results
def retrieve_batch_row_action_polling_location(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionPollingLocation table
:param batch_header_id:
:param batch_row_id:
:return:
"""
status = ""
try:
batch_row_action_polling_location = \
BatchRowActionPollingLocation.objects.get(batch_header_id=batch_header_id, batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_POLLING_LOCATION_RETRIEVED "
except BatchRowActionPollingLocation.DoesNotExist:
batch_row_action_polling_location = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_POLLING_LOCATION_NOT_FOUND "
except Exception as e:
batch_row_action_polling_location = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_POLLING_LOCATION_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_polling_location': batch_row_action_polling_location,
}
return results
def retrieve_batch_row_action_position(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionPosition table
:param batch_header_id:
:param batch_row_id:
:return:
"""
status = ""
try:
batch_row_action_position = BatchRowActionPosition.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_POSITION_RETRIEVED "
except BatchRowActionPosition.DoesNotExist:
batch_row_action_position = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_POSITION_NOT_FOUND "
except Exception as e:
batch_row_action_position = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_POSITION_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_position': batch_row_action_position,
}
return results
def retrieve_batch_row_action_ballot_item(self, batch_header_id, batch_row_id=0, ballot_item_id=0):
"""
Retrieves data from BatchRowActionBallotItem table
:param batch_header_id:
:param batch_row_id:
:param ballot_item_id:
:return:
"""
batch_row_action_ballot_item = None
status = ""
try:
if positive_value_exists(batch_row_id):
batch_row_action_ballot_item = BatchRowActionBallotItem.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_BALLOT_ITEM_RETRIEVED_BY_BATCH_ROW_ID "
elif positive_value_exists(ballot_item_id):
batch_row_action_ballot_item = BatchRowActionBallotItem.objects.get(batch_header_id=batch_header_id,
ballot_item_id=ballot_item_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_BALLOT_ITEM_RETRIEVED_BY_BALLOT_ITEM_ID "
else:
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_BALLOT_ITEM_NOT_RETRIEVED-MISSING_REQUIRED_VARIABLE "
except BatchRowActionBallotItem.DoesNotExist:
batch_row_action_ballot_item = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_BALLOT_ITEM_NOT_FOUND "
except Exception as e:
batch_row_action_ballot_item = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_BALLOT_ITEM_RETRIEVE_ERROR: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_ballot_item': batch_row_action_ballot_item,
}
return results
def retrieve_batch_row_action_ballot_item_list(self, batch_header_id, limit_to_kind_of_action_list=[]):
"""
Retrieves data from BatchRowActionBallotItem table
:param batch_header_id:
:param limit_to_kind_of_action_list:
:return:
"""
batch_row_action_list = []
status = ""
try:
batch_row_action_ballot_item_query = BatchRowActionBallotItem.objects.filter(
batch_header_id=batch_header_id)
if positive_value_exists(limit_to_kind_of_action_list):
batch_row_action_ballot_item_query = batch_row_action_ballot_item_query.filter(
kind_of_action__in=limit_to_kind_of_action_list)
batch_row_action_list = list(batch_row_action_ballot_item_query)
batch_row_action_list_found = True
success = True
status += "BATCH_ROW_ACTION_BALLOT_ITEM_LIST_RETRIEVED_BY_BATCH_HEADER_ID "
except Exception as e:
batch_row_action_list_found = False
success = False
status += "BATCH_ROW_ACTION_BALLOT_ITEM_LIST_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_list_found': batch_row_action_list_found,
'batch_row_action_list': batch_row_action_list,
}
return results
def retrieve_batch_row_action_candidate(self, batch_header_id, batch_row_id):
"""
Retrieves data from BatchRowActionCandidate table
:param batch_header_id:
:param batch_row_id:
:return:
"""
status = ""
try:
batch_row_action_candidate = BatchRowActionCandidate.objects.get(batch_header_id=batch_header_id,
batch_row_id=batch_row_id)
batch_row_action_found = True
success = True
status += "BATCH_ROW_ACTION_CANDIDATE_RETRIEVED "
except BatchRowActionCandidate.DoesNotExist:
batch_row_action_candidate = None
batch_row_action_found = False
success = True
status += "BATCH_ROW_ACTION_CANDIDATE_NOT_FOUND "
except Exception as e:
batch_row_action_candidate = None
batch_row_action_found = False
success = False
status += "BATCH_ROW_ACTION_CANDIDATE_RETRIEVE_ERROR: " + str(e) + " "
results = {
'success': success,
'status': status,
'batch_row_action_found': batch_row_action_found,
'batch_row_action_candidate': batch_row_action_candidate,
}
return results
def retrieve_value_from_batch_row(self, batch_header_name_we_want, batch_header_map, one_batch_row):
index_number = 0
batch_header_name_we_want = batch_header_name_we_want.lower().strip()
number_of_columns = 50
while index_number < number_of_columns:
index_number_string = "00" + str(index_number)
index_number_string = index_number_string[-3:]
batch_header_map_attribute_name = "batch_header_map_" + index_number_string
# If this position in the batch_header_map matches the batch_header_name_we_want, then we know what column
# to look in within one_batch_row for the value
value_from_batch_header_map = getattr(batch_header_map, batch_header_map_attribute_name)
if value_from_batch_header_map is None:
# Break out when we stop getting batch_header_map values
return ""
value_from_batch_header_map = value_from_batch_header_map.replace('"', '')
value_from_batch_header_map = value_from_batch_header_map.replace('', '')
value_from_batch_header_map = value_from_batch_header_map.lower().strip()
if batch_header_name_we_want == value_from_batch_header_map:
one_batch_row_attribute_name = "batch_row_" + index_number_string
value_from_batch_row = getattr(one_batch_row, one_batch_row_attribute_name)
if isinstance(value_from_batch_row, str):
return value_from_batch_row.strip()
else:
return value_from_batch_row
index_number += 1
return ""
def retrieve_column_name_from_batch_row(self, batch_header_name_we_want, batch_header_map):
"""
Given column name from batch_header_map, retrieve equivalent column name from batch row
:param batch_header_name_we_want:
:param batch_header_map:
:return:
"""
index_number = 0
batch_header_name_we_want = batch_header_name_we_want.lower().strip()
number_of_columns = 50
while index_number < number_of_columns:
index_number_string = "00" + str(index_number)
index_number_string = index_number_string[-3:]
batch_header_map_attribute_name = "batch_header_map_" + index_number_string
# If this position in the batch_header_map matches the batch_header_name_we_want, then we know what column
# to look in within one_batch_row for the value, eg: batch_header_map_000 --> measure_batch_id
value_from_batch_header_map = getattr(batch_header_map, batch_header_map_attribute_name)
if value_from_batch_header_map is None:
# Break out when we stop getting batch_header_map values
return ""
if batch_header_name_we_want == value_from_batch_header_map.lower().strip():
one_batch_row_attribute_name = "batch_row_" + index_number_string
return one_batch_row_attribute_name
index_number += 1
return ""
def find_file_type(self, batch_uri):
"""
Determines the file type based on file extension. If no known extension, it gets the file type information from
file magic.
:param batch_uri:
:return: filetype - XML, json, csv
"""
# check for file extension
batch_uri = batch_uri.lower()
file_extension = batch_uri.split('.')
if 'xml' in file_extension:
filetype = 'xml'
elif 'json' in file_extension:
filetype = 'json'
elif 'csv' in file_extension:
filetype = 'csv'
else:
# if the filetype is neither xml, json nor csv, get the file type info from magic
file = urllib.request.urlopen(batch_uri)
filetype = magic.from_buffer(file.read())
file.close()
return filetype
def find_possible_matches(self, kind_of_batch, batch_row_name, incoming_batch_row_value,
google_civic_election_id, state_code):
if kind_of_batch == CONTEST_OFFICE:
# TODO DALE
pass
possible_matches = {
'New York City Mayor': 'New York City Mayor'
}
results = {
'possible_matches_found': True,
'possible_matches': possible_matches
}
return results
def create_batch_vip_xml(self, batch_uri, kind_of_batch, google_civic_election_id, organization_we_vote_id):
"""
Retrieves CTCL data from an xml file - Measure, Office, Candidate, Politician
:param batch_uri:
:param kind_of_batch:
:param google_civic_election_id:
:param organization_we_vote_id:
:return:
"""
# Retrieve from XML
request = urllib.request.urlopen(batch_uri)
# xml_data = request.read()
# xml_data = xmltodict.parse(xml_data)
# # xml_data_list_json = list(xml_data)
# structured_json = json.dumps(xml_data)
xml_tree = ElementTree.parse(request)
request.close()
xml_root = xml_tree.getroot()
if xml_root:
if kind_of_batch == MEASURE:
return self.store_measure_xml(batch_uri, google_civic_election_id, organization_we_vote_id, xml_root)
elif kind_of_batch == ELECTED_OFFICE:
return self.store_elected_office_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root)
elif kind_of_batch == CONTEST_OFFICE:
return self.store_contest_office_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root)
elif kind_of_batch == CANDIDATE:
return self.store_candidate_xml(batch_uri, google_civic_election_id, organization_we_vote_id, xml_root)
elif kind_of_batch == POLITICIAN:
return self.store_politician_xml(batch_uri, google_civic_election_id, organization_we_vote_id, xml_root)
else:
results = {
'success': False,
'status': '',
'batch_header_id': 0,
'batch_saved': False,
'number_of_batch_rows': 0,
}
return results
def store_measure_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id=0):
"""
Retrieves Measure data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id:
:return:
"""
# Process BallotMeasureContest data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Look for BallotMeasureContest and create the batch_header first. BallotMeasureContest is the direct child node
# of VipObject
ballot_measure_xml_node = xml_root.findall('BallotMeasureContest')
# if ballot_measure_xml_node is not None:
for one_ballot_measure in ballot_measure_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under BallotMeasureContest: id, BallotTitle, BallotSubTitle,
# ElectoralDistrictId, other::ctcl-uid
ballot_measure_id = one_ballot_measure.attrib['id']
ballot_measure_subtitle_node = one_ballot_measure.find('BallotSubTitle/Text')
if ballot_measure_subtitle_node is not None:
ballot_measure_subtitle = ballot_measure_subtitle_node.text
else:
ballot_measure_subtitle = ''
ballot_measure_title_node = one_ballot_measure.find('BallotTitle')
if ballot_measure_title_node is not None:
ballot_measure_title = one_ballot_measure.find('BallotTitle/Text').text
else:
ballot_measure_title = ''
electoral_district_id_node = one_ballot_measure.find('ElectoralDistrictId')
if electoral_district_id_node is not None:
electoral_district_id = electoral_district_id_node.text
else:
electoral_district_id = ''
ctcl_uuid_node = one_ballot_measure.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_ballot_measure.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
else:
ctcl_uuid = ''
ballot_measure_name_node = one_ballot_measure.find('Name')
if ballot_measure_name_node is not None:
ballot_measure_name = ballot_measure_name_node.text
else:
ballot_measure_name = ''
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='BallotSubTitle',
batch_header_column_002='BallotTitle',
batch_header_column_003='ElectoralDistrictId',
batch_header_column_004='other::ctcl-uuid',
batch_header_column_005='Name',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='measure_batch_id',
batch_header_map_001='measure_subtitle',
batch_header_map_002='measure_title',
batch_header_map_003='electoral_district_id',
batch_header_map_004='measure_ctcl_uuid',
batch_header_map_005='measure_name'
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED "
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "MEASURE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='MEASURE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for measure_id, title OR subtitle or name AND ctcl_uuid
if (positive_value_exists(ballot_measure_id) and positive_value_exists(ctcl_uuid) and
(positive_value_exists(ballot_measure_subtitle) or positive_value_exists(ballot_measure_title) or
positive_value_exists(ballot_measure_name))):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=ballot_measure_id,
batch_row_001=ballot_measure_subtitle,
batch_row_002=ballot_measure_title,
batch_row_003=electoral_district_id,
batch_row_004=ctcl_uuid,
batch_row_005=ballot_measure_name
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_elected_office_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves Office data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# Process VIP Office data
number_of_batch_rows = 0
first_line = True
success = False
status = ''
limit_for_testing = 0
batch_header_id = 0
# Look for Office and create the batch_header first. Office is the direct child node
# of VipObject
elected_office_xml_node = xml_root.findall('Office')
# if ballot_measure_xml_node is not None:
for one_elected_office in elected_office_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under Office: id, Name, Description, ElectoralDistrictId,
# IsPartisan, other::ctcl-uid
elected_office_id = one_elected_office.attrib['id']
elected_office_name_node = one_elected_office.find("./Name/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if elected_office_name_node is not None:
elected_office_name = elected_office_name_node.text
else:
elected_office_name = ""
elected_office_name_es_node = one_elected_office.find("./Name/Text/[@language='"+LANGUAGE_CODE_SPANISH+"']")
if elected_office_name_es_node is not None:
elected_office_name_es = elected_office_name_es_node.text
else:
elected_office_name_es = ""
elected_office_description_node = one_elected_office.find(
"Description/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if elected_office_description_node is not None:
elected_office_description = elected_office_description_node.text
else:
elected_office_description = ""
elected_office_description_es_node = one_elected_office.find(
"Description/Text/[@language='"+LANGUAGE_CODE_SPANISH+"']")
if elected_office_description_es_node is not None:
elected_office_description_es = elected_office_description_es_node.text
else:
elected_office_description_es = ""
electoral_district_id_node = one_elected_office.find('ElectoralDistrictId')
if electoral_district_id_node is not None:
electoral_district_id = electoral_district_id_node.text
else:
electoral_district_id = ""
elected_office_is_partisan_node = one_elected_office.find('IsPartisan')
if elected_office_is_partisan_node is not None:
elected_office_is_partisan = elected_office_is_partisan_node.text
else:
elected_office_is_partisan = ""
ctcl_uuid = ""
ctcl_uuid_node = one_elected_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_elected_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='NameEnglish',
batch_header_column_002='NameSpanish',
batch_header_column_003='DescriptionEnglish',
batch_header_column_004='DescriptionSpanish',
batch_header_column_005='ElectoralDistrictId',
batch_header_column_006='IsPartisan',
batch_header_column_007='other::ctcl-uuid',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='elected_office_batch_id',
batch_header_map_001='elected_office_name',
batch_header_map_002='elected_office_name_es',
batch_header_map_003='elected_office_description',
batch_header_map_004='elected_office_description_es',
batch_header_map_005='electoral_district_id',
batch_header_map_006='elected_office_is_partisan',
batch_header_map_007='elected_office_ctcl_uuid',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ELECTED_OFFICE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='ELECTED_OFFICE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for office_batch_id or electoral_district or name AND ctcl_uuid
if positive_value_exists(elected_office_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(electoral_district_id) or positive_value_exists(elected_office_name)) or \
positive_value_exists(elected_office_name_es):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=elected_office_id,
batch_row_001=elected_office_name,
batch_row_002=elected_office_name_es,
batch_row_003=elected_office_description,
batch_row_004=elected_office_description_es,
batch_row_005=electoral_district_id,
batch_row_006=elected_office_is_partisan,
batch_row_007=ctcl_uuid
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_contest_office_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves ContestOffice data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
from import_export_ctcl.controllers import retrieve_candidate_from_candidate_selection
# Process VIP CandidateContest data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
candidate_selection_id_key_list = [
'candidate_selection_id_1', 'candidate_selection_id_2', 'candidate_selection_id_3',
'candidate_selection_id_4', 'candidate_selection_id_5', 'candidate_selection_id_6',
'candidate_selection_id_7', 'candidate_selection_id_8', 'candidate_selection_id_9',
'candidate_selection_id_10']
# Look for CandidateContest and create the batch_header first. CandidateContest is the direct child node
# of VipObject
contest_office_xml_node = xml_root.findall('CandidateContest')
# if contest_office_xml_node is not None:
for one_contest_office in contest_office_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under CandidateContest: id, Name, OfficeId, ElectoralDistrictId,
# other::ctcl-uid, VotesAllowed, NumberElected
contest_office_id = one_contest_office.attrib['id']
contest_office_name_node = one_contest_office.find('Name')
if contest_office_name_node is not None:
contest_office_name = contest_office_name_node.text
else:
contest_office_name = ""
contest_office_number_elected_node = one_contest_office.find('NumberElected')
if contest_office_number_elected_node is not None:
contest_office_number_elected = contest_office_number_elected_node.text
else:
contest_office_number_elected = ""
electoral_district_id_node = one_contest_office.find('ElectoralDistrictId')
if electoral_district_id_node is not None:
electoral_district_id = electoral_district_id_node.text
else:
electoral_district_id = ""
contest_office_votes_allowed_node = one_contest_office.find('VotesAllowed')
if contest_office_votes_allowed_node is not None:
contest_office_votes_allowed = contest_office_votes_allowed_node.text
else:
contest_office_votes_allowed = ""
elected_office_id_node = one_contest_office.find('OfficeIds')
if elected_office_id_node is not None:
elected_office_id = elected_office_id_node.text
else:
elected_office_id = ""
ctcl_uuid = ""
ctcl_uuid_node = one_contest_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_contest_office.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
candidate_selection_ids_dict = {}
ballot_selection_ids_node = one_contest_office.find('./BallotSelectionIds')
if ballot_selection_ids_node is not None:
ballot_selection_ids_str = ballot_selection_ids_node.text
if ballot_selection_ids_str:
ballot_selection_ids_value_list = ballot_selection_ids_str.split()
# for len in ballot_selection_ids_list words,
# Assuming that there are maximum 10 ballot selection ids for a given contest office
ballot_selection_ids_dict = dict(
zip(candidate_selection_id_key_list, ballot_selection_ids_value_list))
# move this to batchrowactionContestOffice create if we run into performance/load issue
candidate_selection_list = []
for key, value in ballot_selection_ids_dict.items():
results = retrieve_candidate_from_candidate_selection(value, batch_set_id)
if results['candidate_id_found']:
candidate_selection_item = results['candidate_selection']
candidate_value = candidate_selection_item.contest_office_id
candidate_selection_list.append(candidate_value)
candidate_selection_ids_dict = dict(zip(candidate_selection_id_key_list, candidate_selection_list))
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='Name',
batch_header_column_002='OfficeIds',
batch_header_column_003='ElectoralDistrictId',
batch_header_column_004='VotesAllowed',
batch_header_column_005='NumberElected',
batch_header_column_006='other::ctcl-uuid',
batch_header_column_007='CandidateSelectionId1',
batch_header_column_008='CandidateSelectionId2',
batch_header_column_009='CandidateSelectionId3',
batch_header_column_010='CandidateSelectionId4',
batch_header_column_011='CandidateSelectionId5',
batch_header_column_012='CandidateSelectionId6',
batch_header_column_013='CandidateSelectionId7',
batch_header_column_014='CandidateSelectionId8',
batch_header_column_015='CandidateSelectionId9',
batch_header_column_016='CandidateSelectionId10',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='contest_office_batch_id',
batch_header_map_001='contest_office_name',
batch_header_map_002='elected_office_id',
batch_header_map_003='electoral_district_id',
batch_header_map_004='contest_office_votes_allowed',
batch_header_map_005='contest_office_number_elected',
batch_header_map_006='contest_office_ctcl_uuid',
batch_header_map_007='candidate_selection_id1',
batch_header_map_008='candidate_selection_id2',
batch_header_map_009='candidate_selection_id3',
batch_header_map_010='candidate_selection_id4',
batch_header_map_011='candidate_selection_id5',
batch_header_map_012='candidate_selection_id6',
batch_header_map_013='candidate_selection_id7',
batch_header_map_014='candidate_selection_id8',
batch_header_map_015='candidate_selection_id9',
batch_header_map_016='candidate_selection_id10',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "CONTEST_OFFICE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CONTEST_OFFICE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for contest_office_batch_id or electoral_district or name AND ctcl_uuid
if positive_value_exists(contest_office_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(electoral_district_id) or positive_value_exists(contest_office_name)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=contest_office_id,
batch_row_001=contest_office_name,
batch_row_002=elected_office_id,
batch_row_003=electoral_district_id,
batch_row_004=contest_office_votes_allowed,
batch_row_005=contest_office_number_elected,
batch_row_006=ctcl_uuid,
batch_row_007=candidate_selection_ids_dict.get('candidate_selection_id_1', ''),
batch_row_008=candidate_selection_ids_dict.get('candidate_selection_id_2', ''),
batch_row_009=candidate_selection_ids_dict.get('candidate_selection_id_3', ''),
batch_row_010=candidate_selection_ids_dict.get('candidate_selection_id_4', ''),
batch_row_011=candidate_selection_ids_dict.get('candidate_selection_id_5', ''),
batch_row_012=candidate_selection_ids_dict.get('candidate_selection_id_6', ''),
batch_row_013=candidate_selection_ids_dict.get('candidate_selection_id_7', ''),
batch_row_014=candidate_selection_ids_dict.get('candidate_selection_id_8', ''),
batch_row_015=candidate_selection_ids_dict.get('candidate_selection_id_9', ''),
batch_row_016=candidate_selection_ids_dict.get('candidate_selection_id_10', ''),
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_politician_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves Politician data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# Process VIP Person data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Get party names and their corresponding party ids
party_details_list = retrieve_all_party_names_and_ids_api()
# Look for Person and create the batch_header first. Person is the direct child node
# of VipObject
person_xml_node = xml_root.findall('Person')
for one_person in person_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under Person: id, FullName, FirstName, LastName, MiddleName, PartyId, Email,
# PhoneNumber, Website, Twitter, ctcl-uuid
person_id = one_person.attrib['id']
person_full_name_node = one_person.find("./FullName/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if person_full_name_node is not None:
person_full_name = person_full_name_node.text
else:
person_full_name = ''
person_first_name_node = one_person.find('FirstName')
if person_first_name_node is not None:
person_first_name = person_first_name_node.text
else:
person_first_name = ''
person_middle_name_node = one_person.find('MiddleName')
if person_middle_name_node is not None:
person_middle_name = person_middle_name_node.text
else:
person_middle_name = ''
person_last_name_node = one_person.find('LastName')
if person_last_name_node is not None:
person_last_name = person_last_name_node.text
else:
person_last_name = ''
person_party_name = ''
person_party_id_node = one_person.find('PartyId')
if person_party_id_node is not None:
person_party_id = person_party_id_node.text
# get party name from candidate_party_id
if party_details_list is not None:
# party_details_dict = [entry for entry in party_details_list]
for one_party in party_details_list:
# get the party name matching person_party_id
try:
party_id_temp = one_party.get('party_id_temp')
if person_party_id == party_id_temp:
person_party_name = one_party.get('party_name')
break
except Exception as e:
pass
person_email_id_node = one_person.find('./ContactInformation/Email')
if person_email_id_node is not None:
person_email_id = person_email_id_node.text
else:
person_email_id = ''
person_phone_number_node = one_person.find('./ContactInformation/Phone')
if person_phone_number_node is not None:
person_phone_number = person_phone_number_node.text
else:
person_phone_number = ''
person_website_url_node = one_person.find("./ContactInformation/Uri/[@annotation='website']")
if person_website_url_node is not None:
person_website_url = person_website_url_node.text
else:
person_website_url = ''
person_facebook_id_node = one_person.find("./ContactInformation/Uri/[@annotation='facebook']")
if person_facebook_id_node is not None:
person_facebook_id = person_facebook_id_node.text
else:
person_facebook_id = ''
person_twitter_id_node = one_person.find("./ContactInformation/Uri/[@annotation='twitter']")
if person_twitter_id_node is not None:
person_twitter_id = person_twitter_id_node.text
else:
person_twitter_id = ''
person_youtube_id_node = one_person.find("./ContactInformation/Uri/[@annotation='youtube']")
if person_youtube_id_node is not None:
person_youtube_id = person_youtube_id_node.text
else:
person_youtube_id = ''
person_googleplus_id_node = one_person.find("./ContactInformation/Uri/[@annotation='googleplus']")
if person_googleplus_id_node is not None:
person_googleplus_id = person_googleplus_id_node.text
else:
person_googleplus_id = ''
ctcl_uuid = ""
ctcl_uuid_node = one_person.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_person.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='FullName',
batch_header_column_002='FirstName',
batch_header_column_003='MiddleName',
batch_header_column_004='LastName',
batch_header_column_005='PartyName',
batch_header_column_006='Email',
batch_header_column_007='Phone',
batch_header_column_008='uri::website',
batch_header_column_009='uri::facebook',
batch_header_column_010='uri::twitter',
batch_header_column_011='uri::youtube',
batch_header_column_012='uri::googleplus',
batch_header_column_013='other::ctcl-uuid',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='politician_batch_id',
batch_header_map_001='politician_full_name',
batch_header_map_002='politician_first_name',
batch_header_map_003='politician_middle_name',
batch_header_map_004='politician_last_name',
batch_header_map_005='politician_party_name',
batch_header_map_006='politician_email_address',
batch_header_map_007='politician_phone_number',
batch_header_map_008='politician_website_url',
batch_header_map_009='politician_facebook_id',
batch_header_map_010='politician_twitter_url',
batch_header_map_011='politician_youtube_id',
batch_header_map_012='politician_googleplus_id',
batch_header_map_013='politician_ctcl_uuid',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "POLITICIAN " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='POLITICIAN',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for office_batch_id or electoral_district or name AND ctcl_uuid
# if positive_value_exists(person_id) and ctcl_uuid is not None or person_full_name is not None or \
# person_first_name is not None:
if positive_value_exists(person_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(person_full_name) or positive_value_exists(person_first_name)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=person_id,
batch_row_001=person_full_name,
batch_row_002=person_first_name,
batch_row_003=person_middle_name,
batch_row_004=person_last_name,
batch_row_005=person_party_name,
batch_row_006=person_email_id,
batch_row_007=person_phone_number,
batch_row_008=person_website_url,
batch_row_009=person_facebook_id,
batch_row_010=person_twitter_id,
batch_row_011=person_youtube_id,
batch_row_012=person_googleplus_id,
batch_row_013=ctcl_uuid,
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_candidate_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves Candidate data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# Process VIP Candidate data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Call party api to get corresponding party name from party id
party_details_list = retrieve_all_party_names_and_ids_api()
# Look for Candidate and create the batch_header first. Candidate is the direct child node
# of VipObject
candidate_xml_node = xml_root.findall('Candidate')
for one_candidate in candidate_xml_node:
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
candidate_name_english = None
candidate_ctcl_person_id = ""
candidate_party_name = ""
ctcl_uuid = ""
# look for relevant child nodes under Candidate: id, BallotName, personId, PartyId, isTopTicket,
# other::ctcl-uid
candidate_id = one_candidate.attrib['id']
candidate_selection_id = one_candidate.find("./BallotSelectionIds")
candidate_name_node_english = one_candidate.find("./BallotName/Text/[@language='"+LANGUAGE_CODE_ENGLISH+"']")
if candidate_name_node_english is not None:
candidate_name_english = candidate_name_node_english.text
candidate_ctcl_person_id_node = one_candidate.find('./PersonId')
if candidate_ctcl_person_id_node is not None:
candidate_ctcl_person_id = candidate_ctcl_person_id_node.text
candidate_party_id_node = one_candidate.find('./PartyId')
if candidate_party_id_node is not None:
candidate_party_id = candidate_party_id_node.text
# get party name from candidate_party_id
if party_details_list is not None:
# party_details_dict = [entry for entry in party_details_list]
for one_party in party_details_list:
# get the candidate party name matching candidate_party_id
if candidate_party_id == one_party.get('party_id_temp'):
candidate_party_name = one_party.get('party_name')
break
else:
candidate_party_name = ''
candidate_is_top_ticket_node = one_candidate.find('IsTopTicket')
if candidate_is_top_ticket_node is not None:
candidate_is_top_ticket = candidate_is_top_ticket_node.text
else:
candidate_is_top_ticket = ''
ctcl_uuid_node = one_candidate.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']")
if ctcl_uuid_node is not None:
ctcl_uuid = one_candidate.find(
"./ExternalIdentifiers/ExternalIdentifier/[OtherType='ctcl-uuid']/Value").text
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='PersonId',
batch_header_column_002='Name',
batch_header_column_003='PartyName',
batch_header_column_004='IsTopTicket',
batch_header_column_005='other::ctcl-uuid',
batch_header_column_006='other::CandidateSelectionId',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='candidate_batch_id',
batch_header_map_001='candidate_ctcl_person_id',
batch_header_map_002='candidate_name',
batch_header_map_003='candidate_party_name',
batch_header_map_004='candidate_is_top_ticket',
batch_header_map_005='candidate_ctcl_uuid',
batch_header_map_006='candidate_selection_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "CANDIDATE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CANDIDATE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for candidate_id or candidate_ctcl_person_id or name AND ctcl_uuid
if positive_value_exists(candidate_id) and positive_value_exists(ctcl_uuid) and \
(positive_value_exists(candidate_ctcl_person_id) or positive_value_exists(candidate_name_english)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=candidate_id,
batch_row_001=candidate_ctcl_person_id,
batch_row_002=candidate_name_english,
batch_row_003=candidate_party_name,
batch_row_004=candidate_is_top_ticket,
batch_row_005=ctcl_uuid,
batch_row_006=candidate_selection_id
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_state_data_from_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves state data from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# This state is not used right now. Parsing it for future reference
# Process VIP State data
number_of_batch_rows = 0
first_line = True
success = True
status = ''
limit_for_testing = 0
batch_header_id = 0
# Look for State and create the batch_header first. State is the direct child node of VipObject
# TODO Will this be a single node object or will there be multiple state nodes in a CTCL XML?
state_xml_node = xml_root.findall('State')
for one_state in state_xml_node:
state_name = None
if positive_value_exists(limit_for_testing) and number_of_batch_rows >= limit_for_testing:
break
# look for relevant child nodes under State: id, ocd-id, Name
state_id = one_state.attrib['id']
state_name_node = one_state.find('./Name')
if state_name_node is not None:
state_name = state_name_node.text
ocd_id_node = one_state.find("./ExternalIdentifiers/ExternalIdentifier/[Type='ocd-id']")
if ocd_id_node is not None:
ocd_id = one_state.find("./ExternalIdentifiers/ExternalIdentifier/[Type='ocd-id']/Value").text
else:
ocd_id = ''
if first_line:
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='Name',
batch_header_column_002='other::ocd-id',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='state_id',
batch_header_map_001='state_name',
batch_header_map_002='ocd_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "STATE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='STATE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
# check for state_id or name AND ocd_id
if positive_value_exists(state_id) and (positive_value_exists(state_name)):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=state_id,
batch_row_001=state_name,
batch_row_002=ocd_id,
)
number_of_batch_rows += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def store_election_metadata_from_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves election metadata from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# This election metadata is not used right now. Parsing it for future reference
# Process VIP Election metadata
success = True
status = ''
batch_header_id = 0
# Look for Election and create the batch_header first. Election is the direct child node of VipObject
election_xml_node = xml_root.find('Election')
election_date_str = None
# look for relevant child nodes under Election: id, Date, StateId
if not election_xml_node:
results = {
'success': success,
'status': "STORE_ELECTION_METADATA_FROM_XML-ELECTION_NODE_NOT_FOUND",
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
election_id = election_xml_node.attrib['id']
election_date_xml_node = election_xml_node.find('./Date')
if election_date_xml_node is not None:
election_date = election_date_xml_node.text
state_id_node = election_xml_node.find("./StateId")
if state_id_node is not None:
state_id = state_id_node.text
else:
state_id = ''
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='Date',
batch_header_column_002='StateId',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='election_id',
batch_header_map_001='election_date',
batch_header_map_002='state_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ELECTION " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='ELECTION',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
# check for state_id or name AND ocd_id
if positive_value_exists(election_id) and positive_value_exists(election_date) and \
positive_value_exists(state_id):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=election_id,
batch_row_001=election_date,
batch_row_002=state_id,
)
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
def store_source_metadata_from_xml(self, batch_uri, google_civic_election_id, organization_we_vote_id, xml_root,
batch_set_id=0):
"""
Retrieves source metadata from CTCL xml file
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:param xml_root:
:param batch_set_id
:return:
"""
# This source data is not used for now. Parsing it for future reference
# Process VIP Source metadata
success = False
status = ''
batch_header_id = 0
# Look for Source and create the batch_header first. Election is the direct child node of VipObject
source_xml_node = xml_root.find('Source')
source_date_str = None
if not source_xml_node:
results = {
'success': success,
'status': "STORE_SOURCE_METADATA_FROM_XML-SOURCE_NODE_NOT_FOUND",
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
# look for relevant child nodes under Source: id, DateTime, Name, OrganizationUri, VipId
source_id = source_xml_node.attrib['id']
source_datetime_xml_node = source_xml_node.find('./DateTime')
if source_datetime_xml_node is not None:
source_datetime = source_datetime_xml_node.text
source_name_node = source_xml_node.find("./Name")
if source_name_node is not None:
source_name = source_xml_node.text
else:
source_name = ''
organization_uri_node = source_xml_node.find("./OrganizationUri")
if organization_uri_node is not None:
organization_uri = source_xml_node.text
vip_id_node = source_xml_node.find("./VipId")
if vip_id_node is not None:
vip_id = source_xml_node.text
else:
vip_id = ''
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='DateTime',
batch_header_column_002='Name',
batch_header_column_003='OrganizationUri',
batch_header_column_004='VipId',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='source_id',
batch_header_map_001='source_datetime',
batch_header_map_002='source_name',
batch_header_map_003='organization_uri',
batch_header_map_004='vip_id'
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "SOURCE " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='SOURCE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
# check for state_id or name AND ocd_id
if positive_value_exists(source_id) and positive_value_exists(source_datetime) and \
positive_value_exists(source_name) and positive_value_exists(organization_uri):
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=source_id,
batch_row_001=source_datetime,
batch_row_002=source_name,
batch_row_003=organization_uri,
batch_row_004=vip_id
)
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
}
return results
def create_batch_set_vip_xml(self, batch_file, batch_uri, google_civic_election_id, organization_we_vote_id):
"""
Retrieves CTCL Batch Set data from an xml file - Measure, Office, Candidate, Politician
:param batch_file:
:param batch_uri:
:param google_civic_election_id:
:param organization_we_vote_id:
:return:
"""
from import_export_ctcl.controllers import create_candidate_selection_rows
import_date = date.today()
# Retrieve from XML
if batch_file:
xml_tree = ElementTree.parse(batch_file)
batch_set_name = batch_file.name + " - " + str(import_date)
else:
request = urllib.request.urlopen(batch_uri)
# xml_data = request.read()
# xml_data = xmltodict.parse(xml_data)
# # xml_data_list_json = list(xml_data)
# structured_json = json.dumps(xml_data)
xml_tree = ElementTree.parse(request)
request.close()
# set batch_set_name as file_name
batch_set_name_list = batch_uri.split('/')
batch_set_name = batch_set_name_list[len(batch_set_name_list) - 1] + " - " + str(import_date)
xml_root = xml_tree.getroot()
status = ''
success = False
number_of_batch_rows = 0
batch_set_id = 0
continue_batch_set_processing = True # Set to False if we run into a problem that requires we stop processing
if xml_root:
# create batch_set object
try:
batch_set = BatchSet.objects.create(batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_CTCL,
google_civic_election_id=google_civic_election_id,
source_uri=batch_uri, import_date=import_date)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED-VIP_XML "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
continue_batch_set_processing = False
batch_set_id = 0
status += " EXCEPTION_BATCH_SET-VIP_XML: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
# import Electoral District
skip_electoral_district = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_electoral_district:
electoral_district_list_found = False
electoral_district_item_list = xml_root.findall('ElectoralDistrict')
if not len(electoral_district_item_list):
continue_batch_set_processing = False
else:
results = electoral_district_import_from_xml_data(electoral_district_item_list)
if results['success']:
status += "CREATE_BATCH_SET_ELECTORAL_DISTRICT_IMPORTED-VIP_XML "
number_of_batch_rows += results['saved']
# TODO check this whether it should be only saved or updated Electoral districts
number_of_batch_rows += results['updated']
electoral_district_list_found = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET_ELECTORAL_DISTRICT_ERRORS-VIP_XML "
# import Party
skip_party = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_party:
party_list_found = False
party_item_list = xml_root.findall('Party')
if not len(party_item_list):
continue_batch_set_processing = False
status += " CREATE_BATCH_SET-PARTY_IMPORT_ERRORS-NO_party_item_list "
else:
results = party_import_from_xml_data(party_item_list)
if results['success']:
status += "CREATE_BATCH_SET_PARTY_IMPORTED-VIP_XML "
number_of_batch_rows += results['saved']
number_of_batch_rows += results['updated']
# TODO check this whether it should be only saved or updated Electoral districts
party_list_found = True
# A given data source may not always have electoral district and/or party data,
# but the referenced electoral district id or party id might be already present
# in the master database tables, hence commenting out below code
# if not electoral_district_list_found or not party_list_found:
# results = {
# 'success': False,
# 'status': status,
# 'batch_header_id': 0,
# 'batch_saved': False,
# 'number_of_batch_rows': 0,
# }
# return results
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-PARTY_IMPORT_ERRORS-VIP_XML "
# look for different data sets in the XML - ElectedOffice, ContestOffice, Candidate, Politician, Measure
# Elected Office
skip_elected_office = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_elected_office:
results = self.store_elected_office_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root, batch_set_id)
if results['success']:
# Elected Office data found
status += 'CREATE_BATCH_SET_ELECTED_OFFICE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-PARTY_IMPORT_ERRORS "
# Candidate-to-office-mappings
skip_candidate_mapping = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_candidate_mapping:
results = create_candidate_selection_rows(xml_root, batch_set_id)
if results['success']:
# Elected Office data found
status += 'CREATE_BATCH_SET_CANDIDATE_SELECTION_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CANDIDATE_SELECTION_ERRORS "
# ContestOffice entries
skip_contest_office = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_contest_office:
results = self.store_contest_office_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
# Contest Office data found
status += 'CREATE_BATCH_SET_CONTEST_OFFICE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CONTEST_OFFICE_ERRORS "
# Politician entries
skip_politician = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_politician:
results = self.store_politician_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_POLITICIAN_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-POLITICIAN_ERRORS "
# Candidate entries
skip_candidate = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_candidate:
results = self.store_candidate_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_CANDIDATE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CANDIDATE_ERRORS "
# Measure entries
skip_measure = False # We can set this to True during development to save time
if continue_batch_set_processing and not skip_measure:
results = self.store_measure_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_MEASURE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-MEASURE_ERRORS "
# State data entries
if continue_batch_set_processing:
results = self.store_state_data_from_xml(batch_uri, google_civic_election_id, organization_we_vote_id,
xml_root, batch_set_id)
if results['success']:
status += 'CREATE_BATCH_SET_STATE_DATA_FOUND'
number_of_batch_rows += results['number_of_batch_rows']
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-STATE_DATA_ERRORS "
# Election metadata entries
if continue_batch_set_processing:
results = self.store_election_metadata_from_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += ' CREATE_BATCH_SET_ELECTION_METADATA_FOUND '
number_of_batch_rows += 1
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-ELECTION_METADATA_ERRORS "
# Source metadata entries
if continue_batch_set_processing:
results = self.store_source_metadata_from_xml(
batch_uri, google_civic_election_id, organization_we_vote_id, xml_root, batch_set_id)
if results['success']:
status += ' CREATE_BATCH_SET_SOURCE_METADATA_FOUND '
number_of_batch_rows += 1
success = True
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-SOURCE_METADATA_ERRORS "
results = {
'success': success,
'status': status,
'batch_set_id': batch_set_id,
'batch_saved': success,
'number_of_batch_rows': number_of_batch_rows,
}
return results
def count_number_of_batch_action_rows(self, header_id, kind_of_batch):
"""
Return count of batch rows for a given header id
:param header_id:
:return:
"""
number_of_batch_action_rows = 0
if positive_value_exists(header_id):
if kind_of_batch == MEASURE:
number_of_batch_action_rows = BatchRowActionMeasure.objects.filter(batch_header_id=header_id).count()
elif kind_of_batch == ELECTED_OFFICE:
number_of_batch_action_rows = BatchRowActionElectedOffice.objects.filter(batch_header_id=header_id).\
count()
elif kind_of_batch == CONTEST_OFFICE:
number_of_batch_action_rows = BatchRowActionContestOffice.objects.filter(batch_header_id=header_id).\
count()
elif kind_of_batch == CANDIDATE:
number_of_batch_action_rows = BatchRowActionCandidate.objects.filter(batch_header_id=header_id).count()
elif kind_of_batch == POLITICIAN:
number_of_batch_action_rows = BatchRowActionPolitician.objects.filter(batch_header_id=header_id).count()
else:
number_of_batch_action_rows = 0
return number_of_batch_action_rows
def count_number_of_batches_in_batch_set(self, batch_set_id=0, batch_row_analyzed=None, batch_row_created=None):
number_of_batches = 0
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_list = list(batch_description_query)
for batch_description in batch_description_list:
batch_row_query = BatchRow.objects.filter(batch_header_id=batch_description.batch_header_id)
if batch_row_analyzed is not None:
batch_row_query = batch_row_query.filter(batch_row_analyzed=batch_row_analyzed)
if batch_row_created is not None:
batch_row_query = batch_row_query.filter(batch_row_created=batch_row_created)
batch_row_count = batch_row_query.count()
if positive_value_exists(batch_row_count):
number_of_batches += 1
return number_of_batches
def fetch_batch_header_translation_suggestion(self, kind_of_batch, alternate_header_value):
"""
We are looking at one header value from a file imported by an admin or volunteer. We want to see if
there are any suggestions for headers already recognized by We Vote.
:param kind_of_batch:
:param alternate_header_value:
:return:
"""
results = self.retrieve_batch_header_translation_suggestion(kind_of_batch, alternate_header_value)
if results['batch_header_translation_suggestion_found']:
batch_header_translation_suggestion = results['batch_header_translation_suggestion']
return batch_header_translation_suggestion.header_value_recognized_by_we_vote
return ""
# TODO This hasn't been built
def fetch_batch_row_translation_map(self, kind_of_batch, batch_row_name, incoming_alternate_row_value):
results = self.retrieve_batch_row_translation_map(kind_of_batch, incoming_alternate_row_value)
if results['batch_header_translation_suggestion_found']:
batch_header_translation_suggestion = results['batch_header_translation_suggestion']
return batch_header_translation_suggestion.header_value_recognized_by_we_vote
return ""
def fetch_elected_office_name_from_elected_office_ctcl_id(self, elected_office_ctcl_id, batch_set_id):
"""
Take in elected_office_ctcl_id and batch_set_id, look up BatchRow and return elected_office_name
:param elected_office_ctcl_id:
:param batch_set_id:
:return:
"""
elected_office_name = ''
batch_header_id = 0
# From batch_description, get the header_id using batch_set_id
# batch_header_id = get_batch_header_id_from_batch_description(batch_set_id, ELECTED_OFFICE)
try:
if positive_value_exists(batch_set_id):
batch_description_on_stage = BatchDescription.objects.get(batch_set_id=batch_set_id,
kind_of_batch=ELECTED_OFFICE)
if batch_description_on_stage:
batch_header_id = batch_description_on_stage.batch_header_id
except BatchDescription.DoesNotExist:
elected_office_name = ''
pass
# Lookup BatchRow with given header_id and elected_office_ctcl_id. But before doing that, we need to get batch
# row column name that matches 'elected_office_batch_id'
try:
batch_manager = BatchManager()
if positive_value_exists(batch_header_id) and elected_office_ctcl_id:
batch_header_map = BatchHeaderMap.objects.get(batch_header_id=batch_header_id)
# Get the column name in BatchRow that stores elected_office_batch_id - id taken from batch_header_map
# eg: batch_row_000 -> elected_office_batch_id
elected_office_id_column_name = batch_manager.retrieve_column_name_from_batch_row(
"elected_office_batch_id", batch_header_map)
# we found batch row column name corresponding to elected_office_batch_id, now look up batch_row table
# with given batch_header_id and elected_office_batch_id (batch_row_00)
batch_row_on_stage = BatchRow.objects.get(batch_header_id=batch_header_id,
**{ elected_office_id_column_name: elected_office_ctcl_id})
# we know the batch row, next retrieve value for elected_office_name eg: off1 -> NC State Senator
elected_office_name = batch_manager.retrieve_value_from_batch_row('elected_office_name',
batch_header_map, batch_row_on_stage)
except BatchRow.DoesNotExist:
elected_office_name = ''
return elected_office_name
def fetch_state_code_from_person_id_in_candidate(self, person_id, batch_set_id):
"""
Take in person_id, batch_set_id, look up BatchRowActionCandidate and return state_code
:param person_id:
:param batch_set_id:
:return:
"""
state_code = ''
batch_header_id = 0
# From batch_description, get the header_id using batch_set_id
# batch_header_id = get_batch_header_id_from_batch_description(batch_set_id, CANDIDATE)
try:
if positive_value_exists(batch_set_id):
batch_description_on_stage = BatchDescription.objects.get(batch_set_id=batch_set_id,
kind_of_batch=CANDIDATE)
if batch_description_on_stage:
batch_header_id = batch_description_on_stage.batch_header_id
except BatchDescription.DoesNotExist:
pass
try:
if positive_value_exists(batch_header_id) and person_id is not None:
batchrowaction_candidate = BatchRowActionCandidate.objects.get(batch_header_id=batch_header_id,
candidate_ctcl_person_id=person_id)
if batchrowaction_candidate is not None:
state_code = batchrowaction_candidate.state_code
if state_code is None:
return ''
except BatchRowActionCandidate.DoesNotExist:
state_code = ''
return state_code
def retrieve_election_details_from_election_day_or_state_code(self, election_day='', state_code='', read_only=True):
"""
Retrieve election_name and google_civic_election_id from election_day and/or state_code
:param election_day:
:param state_code:
:param read_only:
:return:
"""
success = False
election_name = ''
google_civic_election_id = ''
# election lookup using state & election day, and fetch google_civic_election_id
election_manager = ElectionManager()
election_results = election_manager.retrieve_elections_by_election_date(
election_day_text=election_day, read_only=read_only)
if election_results['success']:
election_list = election_results['election_list']
if len(election_list) == 1:
[election] = election_list
election_name = election.election_name
google_civic_election_id = election.google_civic_election_id
success = True
else:
# use state_code & election_date for lookup. If multiple entries found, do not set
# google_civic_election_id
election_results = election_manager.retrieve_elections_by_state_and_election_date(
state_code=state_code, election_day_text=election_day, read_only=read_only)
if election_results['success']:
election_list = election_results['election_list']
if len(election_list) == 1:
[election] = election_list
election_name = election.election_name
google_civic_election_id = election.google_civic_election_id
success = True
results = {
'success': success,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
def create_batch_set_organization_endorsements(self, organization):
"""
Create batch set for organization endorsements
:param organization:
:return:
"""
batch_set_id = 0
batch_saved = False
status = ''
success = False
number_of_batch_rows = 0
batch_set_id = 0
election_name = ''
structured_organization_endorsement_json = ''
google_civic_election_id = 0
organization_endorsements_api_url = organization.organization_endorsements_api_url
if not organization_endorsements_api_url:
results = {
'success': False,
'status': "CREATE_BATCH_SET_ORGANIZATION_ENDORSEMENTS-INVALID_URL",
'batch_saved': batch_saved,
'number_of_batch_rows': 0,
'election_name': election_name,
'batch_set_id': batch_set_id,
'google_civic_election_id': google_civic_election_id,
}
import_date = date.today()
try:
endorsement_req = urllib.request.Request(organization_endorsements_api_url,
headers={'User-Agent': 'Mozilla/5.0'})
endorsement_url = urlopen(endorsement_req)
# endorsement_url.close()
# structured_organization_endorsement_json = json.loads(endorsement_url)
organization_endorsement_url = endorsement_url.read()
organization_endorsement_json = organization_endorsement_url.decode('utf-8')
structured_organization_endorsement_json = json.loads(organization_endorsement_json)
batch_set_name_url = urlquote(organization_endorsements_api_url)
except Exception as e:
batch_set_id = 0
status += " EXCEPTION_BATCH_SET: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
if not structured_organization_endorsement_json:
results = {
'success': False,
'status': "CREATE_BATCH_SET_ORGANIZATION_ENDORSEMENT_FAILED",
'batch_saved': batch_saved,
'number_of_batch_rows': 0,
'election_name': election_name,
'batch_set_id': batch_set_id,
'google_civic_election_id': google_civic_election_id,
}
return results
# set batch_set_name as file_name
batch_set_name_list = batch_set_name_url.split('/')
batch_set_name = organization.organization_name + " - " + batch_set_name_list[len(batch_set_name_list) - 1] + \
" - " + str(import_date)
# create batch_set object
try:
batch_set = BatchSet.objects.create(batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_IMPORT_EXPORT_ENDORSEMENTS,
source_uri=batch_set_name_url, import_date=import_date)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED-ORG_ENDORSEMENTS "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
batch_set_id = 0
status += " EXCEPTION_BATCH_SET-ORG_ENDORSEMENTS: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
# import metadata like organization name, url, endorsement url, twitter url, org image url, email
# organization_name = structured_organization_endorsement_json['organization_name']
# organization_url = structured_organization_endorsement_json['organization_url']
# organization_endorsements_url = structured_organization_endorsement_json['organization_endorsements_url']
# organization_twitter_url = structured_organization_endorsement_json['organization_twitter_url']
# organization_image_url = structured_organization_endorsement_json['organization_image_url']
# organization_image_url_https = structured_organization_endorsement_json['organization_image_url_https']
# organization_email = structured_organization_endorsement_json['organization_email']
candidate_positions_list = structured_organization_endorsement_json['candidate_positions']
# measure_positions_list = structured_organization_endorsement_json['measure_positions']
organization_we_vote_id = organization.we_vote_id
organization_twitter_handle = organization.organization_twitter_handle
# import Offices from json
results = self.import_offices_from_endorsement_json(batch_set_name_url, batch_set_id, organization_we_vote_id,
candidate_positions_list)
if results['success']:
status += 'CREATE_BATCH_SET_OFFICE_DATA_IMPORTED-ORG_ENDORSEMENTS '
number_of_batch_rows += results['number_of_offices']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-OFFICE_ERRORS-ORG_ENDORSEMENTS "
# import Candidates from json
results = self.import_candidates_from_endorsement_json(batch_set_name_url, batch_set_id,
organization_we_vote_id, candidate_positions_list)
if results['success']:
status += 'CREATE_BATCH_SET_CANDIDATE_DATA_IMPORTED-ORG_ENDORSEMENTS '
number_of_batch_rows += results['number_of_candidates']
else:
continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET-CANDIDATE_ERRORS "
results = self.import_candidate_positions_from_endorsement_json(batch_set_name_url, batch_set_id,
organization_we_vote_id,
organization_twitter_handle,
candidate_positions_list)
if results['success']:
success = True
status += "CREATE_BATCH_SET_CANDIDATE_POSITIONS_IMPORTED "
number_of_batch_rows += results['number_of_candidate_positions']
# TODO check this whether it should be only saved or updated Candidate positionss
# number_of_batch_rows += results['updated']
batch_saved = True
election_name = results['election_name']
google_civic_election_id = results['google_civic_election_id']
else:
# continue_batch_set_processing = False
status += results['status']
status += " CREATE_BATCH_SET_CANDIDATE_POSITIONS_ERRORS "
results = {
'success': success,
'status': status,
'number_of_batch_rows': number_of_batch_rows,
'batch_saved': batch_saved,
'election_name': election_name,
'batch_set_id': batch_set_id,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_offices_from_endorsement_json(self, batch_uri='', batch_set_id='', organization_we_vote_id='',
candidate_positions_list=''):
"""
Import Offices from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param organization_we_vote_id:
:param candidate_positions_list:
:return:
"""
status = ''
success = False
number_of_offices = 0
first_line = True
election_day = ''
google_civic_election_id = 0
if not candidate_positions_list:
results = {
'success': False,
'status': "IMPORT_OFFICES_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'number_of_offices': 0,
'election_day': '',
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in candidate_positions_list:
# read office details for each candidate position
office_name = one_entry['office_name']
state_code = one_entry['state_code']
candidate_name = one_entry['name']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
party = one_entry['party']
office_ocd_division_id = one_entry['office_ocd_division_id']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='office_name',
batch_header_column_001='state_code',
batch_header_column_002='candidate_name',
batch_header_column_003='election_day',
batch_header_column_004='google_civic_election_id',
batch_header_column_005='party',
batch_header_column_006='office_ocd_division_id',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='contest_office_name',
batch_header_map_001='state_code',
batch_header_map_002='candidate_name',
batch_header_map_003='election_day',
batch_header_map_004='google_civic_election_id',
batch_header_map_005='party',
batch_header_map_006='office_ocd_division_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_OFFICES " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CONTEST_OFFICE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=office_name,
batch_row_001=state_code,
batch_row_002=candidate_name,
batch_row_003=election_day,
batch_row_004=google_civic_election_id,
batch_row_005=party,
batch_row_006=office_ocd_division_id,
)
number_of_offices += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_offices': number_of_offices,
'election_day': election_day,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_candidates_from_endorsement_json(self, batch_uri='', batch_set_id='', organization_we_vote_id='',
candidate_positions_list=''):
"""
Import Candidates from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param organization_we_vote_id:
:param candidate_positions_list:
:return:
"""
status = ''
success = False
number_of_candidates = 0
first_line = True
election_day = ''
google_civic_election_id = 0
if not candidate_positions_list:
results = {
'success': False,
'status': "IMPORT_CANDIDATES_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'number_of_candidates': 0,
'election_day': election_day,
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in candidate_positions_list:
# read position details for each candidate
candidate_name = one_entry['name']
candidate_facebook_url = one_entry['facebook_url']
candidate_twitter_url = one_entry['twitter_url']
candidate_website_url = one_entry['website_url']
candidate_contact_form_url = one_entry['candidate_contact_form_url']
party = one_entry['party']
contest_office_name = one_entry['office_name']
candidate_profile_image_url_https = one_entry['profile_image_url_https']
state_code = one_entry['state_code']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
candidate_ocd_division_id = one_entry['candidate_ocd_division_id']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='name',
batch_header_column_001='twitter_url',
batch_header_column_002='facebook_url',
batch_header_column_003='more_info_url',
batch_header_column_004='state_code',
batch_header_column_005='office_name',
batch_header_column_006='profile_image_url_https',
batch_header_column_007='party',
batch_header_column_008='election_day',
batch_header_column_009='google_civic_election_id',
batch_header_column_010='candidate_ocd_division_id',
batch_header_column_011='candidate_contact_form_url'
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='candidate_name',
batch_header_map_001='candidate_twitter_handle',
batch_header_map_002='facebook_url',
batch_header_map_003='candidate_url',
batch_header_map_004='state_code',
batch_header_map_005='contest_office_name',
batch_header_map_006='candidate_profile_image_url',
batch_header_map_007='candidate_party_name',
batch_header_map_008='election_day',
batch_header_map_009='google_civic_election_id',
batch_header_map_010='candidate_ocd_division_id',
batch_header_map_011='candidate_contact_form_url'
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_CANDIDATES " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='CANDIDATE',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=candidate_name,
batch_row_001=candidate_twitter_url,
batch_row_002=candidate_facebook_url,
batch_row_003=candidate_website_url,
batch_row_004=state_code,
batch_row_005=contest_office_name,
batch_row_006=candidate_profile_image_url_https,
batch_row_007=party,
batch_row_008=election_day,
batch_row_009=google_civic_election_id,
batch_row_010=candidate_ocd_division_id,
)
number_of_candidates += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_candidates': number_of_candidates,
'election_day': election_day,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_candidate_positions_from_endorsement_json(self, batch_uri, batch_set_id, organization_we_vote_id,
organization_twitter_handle, candidate_positions_list):
"""
Import candidate positions from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param organization_we_vote_id:
:param organization_twitter_handle:
:param candidate_positions_list:
:return:
"""
status = ''
success = False
number_of_candidate_positions = 0
first_line = True
election_name = ''
google_civic_election_id = 0
if not candidate_positions_list:
results = {
'success': False,
'status': "IMPORT_CANDIDATE_POSITIONS_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'candidate_positions_saved': False,
'number_of_candidate_positions': 0,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in candidate_positions_list:
# read position details for each candidate
candidate_name = one_entry['name']
stance = one_entry['stance']
percent_rating = one_entry['percent_rating']
grade_rating = one_entry['grade_rating']
candidate_twitter_url = one_entry['twitter_url']
candidate_website_url = one_entry['website_url']
candidate_contact_form_url = one_entry['candidate_contact_form_url']
candidate_position_description = one_entry['position_description']
office_name = one_entry['office_name']
state_code = one_entry['state_code']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
organization_position_url = one_entry['organization_position_url']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='name',
batch_header_column_001='stance',
batch_header_column_002='percent_rating',
batch_header_column_003='grade_rating',
batch_header_column_004='organization_twitter_handle',
batch_header_column_005='twitter_url',
batch_header_column_006='more_info_url',
batch_header_column_007='position_description',
batch_header_column_008='office_name',
batch_header_column_009='state_code',
batch_header_column_010='election_day',
batch_header_column_011='google_civic_election_id',
batch_header_column_012='organization_position_url',
batch_header_column_013='candidate_contact_form_url',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='candidate_name',
batch_header_map_001='stance',
batch_header_map_002='percent_rating',
batch_header_map_003='grade_rating',
batch_header_map_004='organization_twitter_handle',
batch_header_map_005='candidate_twitter_handle',
batch_header_map_006='more_info_url',
batch_header_map_007='statement_text',
batch_header_map_008='contest_office_name',
batch_header_map_009='state_code',
batch_header_map_010='election_day',
batch_header_map_011='google_civic_election_id',
batch_header_map_012='organization_position_url',
batch_header_map_013='candidate_contact_form_url',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_CANDIDATE_POSITIONS " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
google_civic_election_id=google_civic_election_id,
kind_of_batch='POSITION',
organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=candidate_name,
batch_row_001=stance,
batch_row_002=percent_rating,
batch_row_003=grade_rating,
batch_row_004=organization_twitter_handle,
batch_row_005=candidate_twitter_url,
batch_row_006=candidate_website_url,
batch_row_007=candidate_position_description,
batch_row_008=office_name,
batch_row_009=state_code,
batch_row_010=election_day,
batch_row_011=google_civic_election_id,
batch_row_012=organization_position_url,
batch_row_013=candidate_contact_form_url,
)
number_of_candidate_positions += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_candidate_positions': number_of_candidate_positions,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
def import_measure_positions_from_endorsement_json(self, batch_uri, batch_set_id, measure_positions_list):
"""
Import measure positions from organization endorsements json file
:param batch_uri:
:param batch_set_id:
:param measure_positions_list:
:return:
"""
status = ''
success = False
number_of_measure_positions = 0
first_line = True
election_name = ''
google_civic_election_id = 0
if not measure_positions_list:
results = {
'success': False,
'status': "IMPORT_MEASURE_POSITIONS_FROM_ENDORSEMENT_JSON-INVALID_DATA",
'measure_positions_saved': False,
'number_of_measure_positions': 0,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
# else:
for one_entry in measure_positions_list:
# read position details for each candidate
measure_name = one_entry['name']
stance = one_entry['stance']
measure_ocd_division_id = one_entry['measure_ocd_division_id']
organization_position_url = one_entry['organization_position_url']
measure_id = one_entry['id']
twitter_url = one_entry['twitter_url']
facebook_url = one_entry['facebook_url']
website_url = one_entry['website_url']
image_url = one_entry['image_url']
image_url_https = one_entry['image_url_https']
measure_position_description = one_entry['position_description']
state_code = one_entry['state_code']
election_day = one_entry['election_day']
google_civic_election_id = one_entry['google_civic_election_id']
if first_line:
# create batch_header and batch_header_map for candidate_positions
first_line = False
try:
batch_header = BatchHeader.objects.create(
batch_header_column_000='id',
batch_header_column_001='name',
batch_header_column_002='stance',
batch_header_column_003='measure_ocd_division_id',
batch_header_column_004='organization_position_url',
batch_header_column_005='twitter_url',
batch_header_column_006='facebook_url',
batch_header_column_007='website_url',
batch_header_column_008='image_url',
batch_header_column_009='image_url_https',
batch_header_column_010='position_description',
batch_header_column_011='state_code',
batch_header_column_012='election_day',
batch_header_column_013='google_civic_election_id',
)
batch_header_id = batch_header.id
if positive_value_exists(batch_header_id):
# Save an initial BatchHeaderMap
batch_header_map = BatchHeaderMap.objects.create(
batch_header_id=batch_header_id,
batch_header_map_000='measure_id',
batch_header_map_001='measure_title',
batch_header_map_002='stance',
batch_header_map_003='measure_ocd_division_id',
batch_header_map_004='organization_position_url',
batch_header_map_005='measure_twitter_handle',
batch_header_map_006='facebook_url',
batch_header_map_007='more_info_url',
batch_header_map_008='image_url',
batch_header_map_009='image_url_https',
batch_header_map_010='statement_text',
batch_header_map_011='state_code',
batch_header_map_012='election_day',
batch_header_map_013='google_civic_election_id',
)
batch_header_map_id = batch_header_map.id
status += " BATCH_HEADER_MAP_SAVED"
if positive_value_exists(batch_header_id) and positive_value_exists(batch_header_map_id):
# Now save the BatchDescription
batch_name = "ENDORSEMENTS_JSON_MEASURES " + " batch_header_id: " + str(batch_header_id)
batch_description_text = ""
batch_description = BatchDescription.objects.create(
batch_header_id=batch_header_id,
batch_header_map_id=batch_header_map_id,
batch_name=batch_name,
batch_description_text=batch_description_text,
# google_civic_election_id=google_civic_election_id,
kind_of_batch='POSITION',
# organization_we_vote_id=organization_we_vote_id,
source_uri=batch_uri,
batch_set_id=batch_set_id,
)
status += " BATCH_DESCRIPTION_SAVED "
success = True
except Exception as e:
batch_header_id = 0
status += " EXCEPTION_BATCH_HEADER: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
if not positive_value_exists(batch_header_id):
break
try:
batch_row = BatchRow.objects.create(
batch_header_id=batch_header_id,
batch_row_000=measure_id,
batch_row_001=measure_name,
batch_row_002=stance,
batch_row_003=measure_ocd_division_id,
batch_row_004=organization_position_url,
batch_row_005=twitter_url,
batch_row_006=facebook_url,
batch_row_007=website_url,
batch_row_008=image_url,
batch_row_009=image_url_https,
batch_row_010=measure_position_description,
batch_row_011=state_code,
batch_row_012=election_day,
batch_row_013=google_civic_election_id,
)
number_of_measure_positions += 1
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_ROW: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
break
results = {
'success': success,
'status': status,
'batch_header_id': batch_header_id,
'batch_saved': success,
'number_of_measure_positions': number_of_measure_positions,
'election_name': election_name,
'google_civic_election_id': google_civic_election_id,
}
return results
class BatchSet(models.Model):
"""
We call each imported CSV or JSON a “batch set”, and store basic information about it in this table.
"""
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True, db_index=True)
state_code = models.CharField(
verbose_name="state code for this data", max_length=2, null=True, blank=True, db_index=True)
batch_set_name = models.CharField(max_length=255)
batch_set_description_text = models.CharField(max_length=255)
batch_set_source = models.CharField(max_length=255)
batch_process_id = models.PositiveIntegerField(default=0, null=True, blank=True, db_index=True)
batch_process_ballot_item_chunk_id = models.PositiveIntegerField(default=0, null=True, blank=True, db_index=True)
source_uri = models.URLField(max_length=255, blank=True, null=True, verbose_name='uri where data is coming from')
import_date = models.DateTimeField(verbose_name="date when batch set was imported", null=True, auto_now=True)
class BatchDescription(models.Model):
"""
We call each imported CSV or JSON a “batch”, and store basic information about it in this table.
"""
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=True, null=False, db_index=True)
batch_set_id = models.PositiveIntegerField(
verbose_name="unique id of batch set row", unique=False, null=True, db_index=True)
batch_header_map_id = models.PositiveIntegerField(
verbose_name="unique id of header map", unique=True, null=False)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True, db_index=True)
batch_name = models.CharField(max_length=255)
kind_of_batch = models.CharField(max_length=32, choices=KIND_OF_BATCH_CHOICES, default=MEASURE, db_index=True)
organization_we_vote_id = models.CharField(
verbose_name="if for positions, the organization's we vote id", max_length=255, null=True, blank=True)
polling_location_we_vote_id = models.CharField(
verbose_name="if for ballot items, the map point we vote id", max_length=255, null=True, blank=True)
voter_id = models.IntegerField(null=True, blank=True)
batch_description_text = models.CharField(max_length=255)
# Have the batch rows under this description been analyzed?
batch_description_analyzed = models.BooleanField(default=False, db_index=True)
source_uri = models.URLField(max_length=255, blank=True, null=True, verbose_name='uri where data is coming from')
date_created = models.DateTimeField(verbose_name='date first saved', null=True, auto_now=True)
class BatchHeader(models.Model):
"""
When we get data, it will come with column headers. This table stores the headers from the import file.
"""
batch_header_column_000 = models.TextField(null=True, blank=True)
batch_header_column_001 = models.TextField(null=True, blank=True)
batch_header_column_002 = models.TextField(null=True, blank=True)
batch_header_column_003 = models.TextField(null=True, blank=True)
batch_header_column_004 = models.TextField(null=True, blank=True)
batch_header_column_005 = models.TextField(null=True, blank=True)
batch_header_column_006 = models.TextField(null=True, blank=True)
batch_header_column_007 = models.TextField(null=True, blank=True)
batch_header_column_008 = models.TextField(null=True, blank=True)
batch_header_column_009 = models.TextField(null=True, blank=True)
batch_header_column_010 = models.TextField(null=True, blank=True)
batch_header_column_011 = models.TextField(null=True, blank=True)
batch_header_column_012 = models.TextField(null=True, blank=True)
batch_header_column_013 = models.TextField(null=True, blank=True)
batch_header_column_014 = models.TextField(null=True, blank=True)
batch_header_column_015 = models.TextField(null=True, blank=True)
batch_header_column_016 = models.TextField(null=True, blank=True)
batch_header_column_017 = models.TextField(null=True, blank=True)
batch_header_column_018 = models.TextField(null=True, blank=True)
batch_header_column_019 = models.TextField(null=True, blank=True)
batch_header_column_020 = models.TextField(null=True, blank=True)
batch_header_column_021 = models.TextField(null=True, blank=True)
batch_header_column_022 = models.TextField(null=True, blank=True)
batch_header_column_023 = models.TextField(null=True, blank=True)
batch_header_column_024 = models.TextField(null=True, blank=True)
batch_header_column_025 = models.TextField(null=True, blank=True)
batch_header_column_026 = models.TextField(null=True, blank=True)
batch_header_column_027 = models.TextField(null=True, blank=True)
batch_header_column_028 = models.TextField(null=True, blank=True)
batch_header_column_029 = models.TextField(null=True, blank=True)
batch_header_column_030 = models.TextField(null=True, blank=True)
batch_header_column_031 = models.TextField(null=True, blank=True)
batch_header_column_032 = models.TextField(null=True, blank=True)
batch_header_column_033 = models.TextField(null=True, blank=True)
batch_header_column_034 = models.TextField(null=True, blank=True)
batch_header_column_035 = models.TextField(null=True, blank=True)
batch_header_column_036 = models.TextField(null=True, blank=True)
batch_header_column_037 = models.TextField(null=True, blank=True)
batch_header_column_038 = models.TextField(null=True, blank=True)
batch_header_column_039 = models.TextField(null=True, blank=True)
batch_header_column_040 = models.TextField(null=True, blank=True)
batch_header_column_041 = models.TextField(null=True, blank=True)
batch_header_column_042 = models.TextField(null=True, blank=True)
batch_header_column_043 = models.TextField(null=True, blank=True)
batch_header_column_044 = models.TextField(null=True, blank=True)
batch_header_column_045 = models.TextField(null=True, blank=True)
batch_header_column_046 = models.TextField(null=True, blank=True)
batch_header_column_047 = models.TextField(null=True, blank=True)
batch_header_column_048 = models.TextField(null=True, blank=True)
batch_header_column_049 = models.TextField(null=True, blank=True)
batch_header_column_050 = models.TextField(null=True, blank=True)
class BatchHeaderMap(models.Model):
"""
When we get data, it will come with column headers. This table stores the replacement header that matches
the We Vote internal field names.
"""
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=True, null=False, db_index=True)
batch_header_map_000 = models.TextField(null=True, blank=True)
batch_header_map_001 = models.TextField(null=True, blank=True)
batch_header_map_002 = models.TextField(null=True, blank=True)
batch_header_map_003 = models.TextField(null=True, blank=True)
batch_header_map_004 = models.TextField(null=True, blank=True)
batch_header_map_005 = models.TextField(null=True, blank=True)
batch_header_map_006 = models.TextField(null=True, blank=True)
batch_header_map_007 = models.TextField(null=True, blank=True)
batch_header_map_008 = models.TextField(null=True, blank=True)
batch_header_map_009 = models.TextField(null=True, blank=True)
batch_header_map_010 = models.TextField(null=True, blank=True)
batch_header_map_011 = models.TextField(null=True, blank=True)
batch_header_map_012 = models.TextField(null=True, blank=True)
batch_header_map_013 = models.TextField(null=True, blank=True)
batch_header_map_014 = models.TextField(null=True, blank=True)
batch_header_map_015 = models.TextField(null=True, blank=True)
batch_header_map_016 = models.TextField(null=True, blank=True)
batch_header_map_017 = models.TextField(null=True, blank=True)
batch_header_map_018 = models.TextField(null=True, blank=True)
batch_header_map_019 = models.TextField(null=True, blank=True)
batch_header_map_020 = models.TextField(null=True, blank=True)
batch_header_map_021 = models.TextField(null=True, blank=True)
batch_header_map_022 = models.TextField(null=True, blank=True)
batch_header_map_023 = models.TextField(null=True, blank=True)
batch_header_map_024 = models.TextField(null=True, blank=True)
batch_header_map_025 = models.TextField(null=True, blank=True)
batch_header_map_026 = models.TextField(null=True, blank=True)
batch_header_map_027 = models.TextField(null=True, blank=True)
batch_header_map_028 = models.TextField(null=True, blank=True)
batch_header_map_029 = models.TextField(null=True, blank=True)
batch_header_map_030 = models.TextField(null=True, blank=True)
batch_header_map_031 = models.TextField(null=True, blank=True)
batch_header_map_032 = models.TextField(null=True, blank=True)
batch_header_map_033 = models.TextField(null=True, blank=True)
batch_header_map_034 = models.TextField(null=True, blank=True)
batch_header_map_035 = models.TextField(null=True, blank=True)
batch_header_map_036 = models.TextField(null=True, blank=True)
batch_header_map_037 = models.TextField(null=True, blank=True)
batch_header_map_038 = models.TextField(null=True, blank=True)
batch_header_map_039 = models.TextField(null=True, blank=True)
batch_header_map_040 = models.TextField(null=True, blank=True)
batch_header_map_041 = models.TextField(null=True, blank=True)
batch_header_map_042 = models.TextField(null=True, blank=True)
batch_header_map_043 = models.TextField(null=True, blank=True)
batch_header_map_044 = models.TextField(null=True, blank=True)
batch_header_map_045 = models.TextField(null=True, blank=True)
batch_header_map_046 = models.TextField(null=True, blank=True)
batch_header_map_047 = models.TextField(null=True, blank=True)
batch_header_map_048 = models.TextField(null=True, blank=True)
batch_header_map_049 = models.TextField(null=True, blank=True)
batch_header_map_050 = models.TextField(null=True, blank=True)
class BatchRow(models.Model):
"""
Individual data rows
"""
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
# This is used when we have one batch_set that brings in election data for a variety of elections
google_civic_election_id = models.PositiveIntegerField(
verbose_name="election id", default=0, null=True, blank=True, db_index=True)
polling_location_we_vote_id = models.CharField(max_length=255, default=None, null=True, blank=True, unique=False)
voter_id = models.IntegerField(null=True, blank=True)
# This is useful for filtering while we are processing batch_rows
state_code = models.CharField(
verbose_name="state code for this data", max_length=2, null=True, blank=True, db_index=True)
batch_row_analyzed = models.BooleanField(default=False, db_index=True)
batch_row_created = models.BooleanField(default=False, db_index=True)
batch_row_000 = models.TextField(null=True, blank=True)
batch_row_001 = models.TextField(null=True, blank=True)
batch_row_002 = models.TextField(null=True, blank=True)
batch_row_003 = models.TextField(null=True, blank=True)
batch_row_004 = models.TextField(null=True, blank=True)
batch_row_005 = models.TextField(null=True, blank=True)
batch_row_006 = models.TextField(null=True, blank=True)
batch_row_007 = models.TextField(null=True, blank=True)
batch_row_008 = models.TextField(null=True, blank=True)
batch_row_009 = models.TextField(null=True, blank=True)
batch_row_010 = models.TextField(null=True, blank=True)
batch_row_011 = models.TextField(null=True, blank=True)
batch_row_012 = models.TextField(null=True, blank=True)
batch_row_013 = models.TextField(null=True, blank=True)
batch_row_014 = models.TextField(null=True, blank=True)
batch_row_015 = models.TextField(null=True, blank=True)
batch_row_016 = models.TextField(null=True, blank=True)
batch_row_017 = models.TextField(null=True, blank=True)
batch_row_018 = models.TextField(null=True, blank=True)
batch_row_019 = models.TextField(null=True, blank=True)
batch_row_020 = models.TextField(null=True, blank=True)
batch_row_021 = models.TextField(null=True, blank=True)
batch_row_022 = models.TextField(null=True, blank=True)
batch_row_023 = models.TextField(null=True, blank=True)
batch_row_024 = models.TextField(null=True, blank=True)
batch_row_025 = models.TextField(null=True, blank=True)
batch_row_026 = models.TextField(null=True, blank=True)
batch_row_027 = models.TextField(null=True, blank=True)
batch_row_028 = models.TextField(null=True, blank=True)
batch_row_029 = models.TextField(null=True, blank=True)
batch_row_030 = models.TextField(null=True, blank=True)
batch_row_031 = models.TextField(null=True, blank=True)
batch_row_032 = models.TextField(null=True, blank=True)
batch_row_033 = models.TextField(null=True, blank=True)
batch_row_034 = models.TextField(null=True, blank=True)
batch_row_035 = models.TextField(null=True, blank=True)
batch_row_036 = models.TextField(null=True, blank=True)
batch_row_037 = models.TextField(null=True, blank=True)
batch_row_038 = models.TextField(null=True, blank=True)
batch_row_039 = models.TextField(null=True, blank=True)
batch_row_040 = models.TextField(null=True, blank=True)
batch_row_041 = models.TextField(null=True, blank=True)
batch_row_042 = models.TextField(null=True, blank=True)
batch_row_043 = models.TextField(null=True, blank=True)
batch_row_044 = models.TextField(null=True, blank=True)
batch_row_045 = models.TextField(null=True, blank=True)
batch_row_046 = models.TextField(null=True, blank=True)
batch_row_047 = models.TextField(null=True, blank=True)
batch_row_048 = models.TextField(null=True, blank=True)
batch_row_049 = models.TextField(null=True, blank=True)
batch_row_050 = models.TextField(null=True, blank=True)
class BatchHeaderTranslationSuggestion(models.Model):
"""
When we bring in batches of data, we want to try to map non-standard headers to the We Vote recognized headers.
This table stores those mappings.
"""
kind_of_batch = models.CharField(max_length=32, choices=KIND_OF_BATCH_CHOICES, default=MEASURE)
header_value_recognized_by_we_vote = models.TextField(null=True, blank=True)
incoming_alternate_header_value = models.TextField(null=True, blank=True)
class BatchProcessManager(models.Manager):
def __unicode__(self):
return "BatchProcessManager"
def create_batch_process_analytics_chunk(self, batch_process_id=0, batch_process=None):
status = ""
success = True
batch_process_analytics_chunk = None
batch_process_analytics_chunk_created = False
if not batch_process:
results = self.retrieve_batch_process(batch_process_id=batch_process_id)
if not results['batch_process_found']:
status += results['status'] + "BATCH_PROCESS_ANALYTICS_CHUNK_NOT_FOUND "
results = {
'success': success,
'status': status,
'batch_process_analytics_chunk': batch_process_analytics_chunk,
'batch_process_analytics_chunk_created': batch_process_analytics_chunk_created,
}
return results
batch_process = results['batch_process']
try:
batch_process_analytics_chunk = BatchProcessAnalyticsChunk.objects.create(
batch_process_id=batch_process.id,
)
if batch_process_analytics_chunk:
status += 'BATCH_PROCESS_ANALYTICS_CHUNK_SAVED '
batch_process_analytics_chunk_created = True
else:
status += 'FAILED_TO_CREATE_BATCH_PROCESS_ANALYTICS_CHUNK '
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_BATCH_PROCESS_ANALYTICS_CHUNK: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'batch_process_analytics_chunk': batch_process_analytics_chunk,
'batch_process_analytics_chunk_created': batch_process_analytics_chunk_created,
}
return results
def create_batch_process_ballot_item_chunk(self, batch_process_id=0, batch_set_id=0):
status = ""
success = True
batch_process_ballot_item_chunk = None
batch_process_ballot_item_chunk_created = False
results = self.retrieve_batch_process(batch_process_id=batch_process_id)
if not results['batch_process_found']:
status += results['status'] + "BATCH_PROCESS_BALLOT_ITEM_CHUNK_NOT_FOUND "
results = {
'success': success,
'status': status,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
'batch_process_ballot_item_chunk_created': batch_process_ballot_item_chunk_created,
}
return results
batch_process = results['batch_process']
try:
batch_process_ballot_item_chunk = BatchProcessBallotItemChunk.objects.create(
batch_process_id=batch_process.id,
batch_set_id=batch_set_id,
google_civic_election_id=batch_process.google_civic_election_id,
state_code=batch_process.state_code,
)
if batch_process_ballot_item_chunk:
status += 'BATCH_PROCESS_BALLOT_ITEM_CHUNK_SAVED '
batch_process_ballot_item_chunk_created = True
else:
status += 'FAILED_TO_CREATE_BATCH_PROCESS_BALLOT_ITEM_CHUNK '
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_BATCH_PROCESS_BALLOT_ITEM_CHUNK: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
'batch_process_ballot_item_chunk_created': batch_process_ballot_item_chunk_created,
}
return results
def create_batch_process(
self,
google_civic_election_id=0,
kind_of_process=None,
polling_location_we_vote_id=None,
state_code="",
voter_id=None,
analytics_date_as_integer=None,
api_name=None,
election_id_list_serialized='',
use_ballotpedia=False,
use_ctcl=False,
use_vote_usa=False,
):
status = ""
success = True
batch_process = None
use_ballotpedia = positive_value_exists(use_ballotpedia)
use_ctcl = positive_value_exists(use_ctcl)
use_vote_usa = positive_value_exists(use_vote_usa)
if kind_of_process not in \
[
ACTIVITY_NOTICE_PROCESS,
API_REFRESH_REQUEST,
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID,
AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT,
CALCULATE_SITEWIDE_VOTER_METRICS,
CALCULATE_SITEWIDE_DAILY_METRICS,
CALCULATE_SITEWIDE_ELECTION_METRICS,
CALCULATE_ORGANIZATION_DAILY_METRICS,
CALCULATE_ORGANIZATION_ELECTION_METRICS,
REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS,
REFRESH_BALLOT_ITEMS_FROM_VOTERS,
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS,
SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE,
UPDATE_TWITTER_DATA_FROM_TWITTER,
]:
status += "KIND_OF_PROCESS_NOT_FOUND: " + str(kind_of_process) + " "
success = False
results = {
'success': success,
'status': status,
'batch_process': batch_process,
'batch_process_saved': success,
}
return results
try:
google_civic_election_id = convert_to_int(google_civic_election_id)
if analytics_date_as_integer:
analytics_date_as_integer = convert_to_int(analytics_date_as_integer)
batch_process = BatchProcess.objects.create(
analytics_date_as_integer=analytics_date_as_integer,
api_name=api_name,
election_id_list_serialized=election_id_list_serialized,
date_added_to_queue=now(),
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
polling_location_we_vote_id=polling_location_we_vote_id,
state_code=state_code,
voter_id=voter_id,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa,
)
status += 'CREATE_BATCH_PROCESS_SAVED '
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_BATCH_PROCESS: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'batch_process': batch_process,
'batch_process_saved': success,
}
return results
def create_batch_process_log_entry(
self,
batch_process_id=0,
batch_process_ballot_item_chunk_id=0,
batch_set_id=0,
critical_failure=False,
google_civic_election_id=0,
kind_of_process="",
polling_location_we_vote_id=None,
state_code="",
status="",
voter_id=None,
analytics_date_as_integer=None):
success = True
batch_process_log_entry = None
batch_process_log_entry_saved = False
batch_process_id = convert_to_int(batch_process_id)
batch_process_ballot_item_chunk_id = convert_to_int(batch_process_ballot_item_chunk_id)
try:
batch_process_log_entry = BatchProcessLogEntry.objects.create(
batch_process_id=batch_process_id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
critical_failure=critical_failure,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
save_changes = False
if positive_value_exists(google_civic_election_id):
batch_process_log_entry.google_civic_election_id = convert_to_int(google_civic_election_id)
save_changes = True
if positive_value_exists(batch_set_id):
batch_process_log_entry.batch_set_id = convert_to_int(batch_set_id)
save_changes = True
if positive_value_exists(voter_id):
batch_process_log_entry.voter_id = convert_to_int(voter_id)
save_changes = True
if positive_value_exists(polling_location_we_vote_id):
batch_process_log_entry.polling_location_we_vote_id = polling_location_we_vote_id
save_changes = True
if positive_value_exists(analytics_date_as_integer):
batch_process_log_entry.analytics_date_as_integer = analytics_date_as_integer
save_changes = True
if save_changes:
batch_process_log_entry.save()
status += 'CREATE_BATCH_PROCESS_LOG_SAVED '
batch_process_log_entry_saved = True
except Exception as e:
success = False
status += 'COULD_NOT_SAVE_BATCH_PROCESS: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'batch_process_log_entry': batch_process_log_entry,
'batch_process_log_entry_saved': batch_process_log_entry_saved,
}
return results
def retrieve_batch_process(
self,
batch_process_id=0,
google_civic_election_id=None,
kind_of_process='',
state_code='',
use_ctcl=False,
use_vote_usa=False):
status = ""
success = True
batch_process = None
batch_process_found = False
try:
if positive_value_exists(batch_process_id):
batch_process = BatchProcess.objects.get(id=batch_process_id)
if batch_process:
batch_process_found = True
status += 'BATCH_PROCESS_RETRIEVED_FROM_ID '
else:
status += 'BATCH_PROCESS_NOT_RETRIEVED_FROM_ID '
elif positive_value_exists(google_civic_election_id) and \
positive_value_exists(kind_of_process) and \
positive_value_exists(state_code):
query = BatchProcess.objects.all()
query = query.filter(google_civic_election_id=google_civic_election_id)
query = query.filter(state_code__iexact=state_code)
if positive_value_exists(use_ctcl):
query = query.filter(use_ctcl=True)
query = query.exclude(use_vote_usa=True)
elif positive_value_exists(use_vote_usa):
query = query.filter(use_vote_usa=True)
query = query.exclude(use_ctcl=True)
query = query.exclude(batch_process_paused=True)
batch_process_list = list(query)
# Default to returning the oldest one
if len(batch_process_list) > 0:
batch_process = batch_process_list[0]
batch_process_found = True
status += 'BATCH_PROCESS_RETRIEVED_FROM_MULTIPLE_VARIABLES '
else:
batch_process_found = False
status += 'BATCH_PROCESS_NOT_RETRIEVED_FROM_MULTIPLE_VARIABLES '
else:
status += 'RETRIEVE_BATCH_PROCESS_MISSING_REQUIRED_VARIABLES '
except BatchProcess.DoesNotExist:
# No batch_process found. Not a problem.
status += 'NO_BATCH_PROCESS_FOUND_DoesNotExist '
except Exception as e:
status += 'FAILED_BATCH_PROCESS_RETRIEVE: ' + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'batch_process': batch_process,
'batch_process_found': batch_process_found,
}
return results
def count_active_batch_processes(self):
status = ""
batch_process_count = 0
election_manager = ElectionManager()
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
google_civic_election_id_list = []
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
try:
batch_process_queryset = BatchProcess.objects.all()
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=False)
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True)
batch_process_queryset = batch_process_queryset.filter(
google_civic_election_id__in=google_civic_election_id_list)
batch_process_count = batch_process_queryset.count()
except Exception as e:
status += 'FAILED_COUNT_ACTIVE_BATCH_PROCESSES: ' + str(e) + ' '
return batch_process_count
def count_checked_out_batch_processes(self):
status = ""
batch_process_count = 0
election_manager = ElectionManager()
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
google_civic_election_id_list = []
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
try:
batch_process_queryset = BatchProcess.objects.all()
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=False)
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
batch_process_queryset = batch_process_queryset.filter(date_checked_out__isnull=False)
batch_process_queryset = batch_process_queryset.filter(
google_civic_election_id__in=google_civic_election_id_list)
batch_process_count = batch_process_queryset.count()
except Exception as e:
status += 'FAILED_COUNT_CHECKED_OUT_BATCH_PROCESSES: ' + str(e) + ' '
return batch_process_count
# ACTIVITY_NOTICE_PROCESS
# API_REFRESH_REQUEST
# AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID
# AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT
# CALCULATE_SITEWIDE_VOTER_METRICS
# CALCULATE_SITEWIDE_DAILY_METRICS
# CALCULATE_SITEWIDE_ELECTION_METRICS
# CALCULATE_ORGANIZATION_DAILY_METRICS
# CALCULATE_ORGANIZATION_ELECTION_METRICS
# REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS
# REFRESH_BALLOT_ITEMS_FROM_VOTERS
# RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS
# SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE
def count_next_steps(
self,
kind_of_process_list=[],
is_active=False,
is_checked_out=False,
is_in_upcoming_queue=False):
status = ""
success = True
batch_process_count = 0
google_civic_election_id_list = []
related_to_upcoming_election = \
REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS in kind_of_process_list or \
REFRESH_BALLOT_ITEMS_FROM_VOTERS in kind_of_process_list or \
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS in kind_of_process_list
if related_to_upcoming_election:
election_manager = ElectionManager()
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
google_civic_election_id_list = []
for one_election in election_list:
google_civic_election_id_list.append(one_election.google_civic_election_id)
try:
batch_process_queryset = BatchProcess.objects.all()
batch_process_queryset = batch_process_queryset.filter(kind_of_process__in=kind_of_process_list)
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True)
if positive_value_exists(is_active):
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=False)
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
elif positive_value_exists(is_checked_out):
batch_process_queryset = batch_process_queryset.filter(date_checked_out__isnull=False)
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
elif positive_value_exists(is_in_upcoming_queue):
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
if related_to_upcoming_election:
batch_process_queryset = batch_process_queryset.filter(
google_civic_election_id__in=google_civic_election_id_list)
batch_process_count = batch_process_queryset.count()
except Exception as e:
status += 'FAILED_COUNT_ACTIVE_BATCH_PROCESSES: ' + str(e) + ' '
success = False
return {
'status': status,
'success': success,
'batch_process_count': batch_process_count,
}
def is_batch_process_currently_scheduled(
self, google_civic_election_id=0, state_code="", kind_of_process=""):
status = ""
try:
batch_process_queryset = BatchProcess.objects.all()
batch_process_queryset = batch_process_queryset.filter(google_civic_election_id=google_civic_election_id)
batch_process_queryset = batch_process_queryset.filter(state_code=state_code)
batch_process_queryset = batch_process_queryset.filter(kind_of_process=kind_of_process)
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
batch_process_queryset = batch_process_queryset.filter(batch_process_paused=False)
batch_process_count = batch_process_queryset.count()
return positive_value_exists(batch_process_count)
except Exception as e:
status += 'FAILED_COUNT_IS_BATCH_PROCESS_CURRENTLY_SCHEDULED: ' + str(e) + ' '
return True
def is_activity_notice_process_currently_running(self):
status = ""
analytics_kind_of_process_list = [ACTIVITY_NOTICE_PROCESS]
try:
batch_process_queryset = BatchProcess.objects.all()
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=False)
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
batch_process_queryset = batch_process_queryset.filter(date_checked_out__isnull=False)
batch_process_queryset = batch_process_queryset.filter(
kind_of_process__in=analytics_kind_of_process_list)
# Don't consider paused back_processes to be currently running
# Note: Paused processes might still be running, but for ACTIVITY_NOTICE_PROCESS, we will allow this
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True)
batch_process_count = batch_process_queryset.count()
return positive_value_exists(batch_process_count)
except Exception as e:
status += 'FAILED_COUNT_CHECKED_OUT_BATCH_PROCESSES-ACTIVITY_NOTICE: ' + str(e) + ' '
return True
def is_analytics_process_currently_running(self):
status = ""
analytics_kind_of_process_list = [
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID, AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT,
CALCULATE_SITEWIDE_VOTER_METRICS, CALCULATE_SITEWIDE_DAILY_METRICS,
CALCULATE_SITEWIDE_ELECTION_METRICS, CALCULATE_ORGANIZATION_DAILY_METRICS,
CALCULATE_ORGANIZATION_ELECTION_METRICS]
try:
batch_process_queryset = BatchProcess.objects.all()
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=False)
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True)
batch_process_queryset = batch_process_queryset.filter(date_checked_out__isnull=False)
batch_process_queryset = batch_process_queryset.filter(
kind_of_process__in=analytics_kind_of_process_list)
batch_process_count = batch_process_queryset.count()
return positive_value_exists(batch_process_count)
except Exception as e:
status += 'FAILED_COUNT_CHECKED_OUT_BATCH_PROCESSES: ' + str(e) + ' '
return True
def retrieve_batch_process_list(
self,
kind_of_process_list=[],
process_active=False,
process_needs_to_be_run=False,
process_queued=False,
for_upcoming_elections=True):
status = ""
success = True
batch_process_list_found = False
filtered_batch_process_list = []
election_manager = ElectionManager()
if positive_value_exists(for_upcoming_elections):
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_elections()
election_list = results['election_list']
try:
batch_process_queryset = BatchProcess.objects.all()
batch_process_queryset = batch_process_queryset.order_by("id")
if kind_of_process_list and len(kind_of_process_list) > 0:
batch_process_queryset = batch_process_queryset.filter(kind_of_process__in=kind_of_process_list)
if positive_value_exists(process_active):
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=False) # Has date_started
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True) # No date_completed
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True) # Not paused
elif positive_value_exists(process_queued):
batch_process_queryset = batch_process_queryset.filter(date_started__isnull=True) # Not started
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True) # Not completed
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True) # Not paused
elif positive_value_exists(process_needs_to_be_run):
batch_process_queryset = batch_process_queryset.filter(date_completed__isnull=True) # Not completed
batch_process_queryset = batch_process_queryset.exclude(batch_process_paused=True) # Not paused
if positive_value_exists(for_upcoming_elections):
# Limit this search to upcoming_elections only, or no election specified
google_civic_election_id_list = [0]
for one_election in election_list:
google_civic_election_id_integer = convert_to_int(one_election.google_civic_election_id)
google_civic_election_id_list.append(google_civic_election_id_integer)
batch_process_queryset = batch_process_queryset.filter(
google_civic_election_id__in=google_civic_election_id_list)
else:
# Do not limit to upcoming elections
pass
# if positive_value_exists(state_code):
# batch_process_queryset = batch_process_queryset.filter(state_code__iexact=state_code)
batch_process_list = list(batch_process_queryset)
# Cycle through all processes retrieved and make sure they aren't being worked on by other processes
for batch_process in batch_process_list:
if batch_process.date_checked_out is None:
# If no date_checked_out, then process can be considered "active", "queued" or "needs_to_be_run"
filtered_batch_process_list.append(batch_process)
else:
# See also longest_activity_notice_processing_run_time_allowed
# If this kind_of_process has run longer than allowed (i.e. probably crashed or timed out)
# consider it to no longer be active
if batch_process.kind_of_process == ACTIVITY_NOTICE_PROCESS:
checked_out_expiration_time = 270 # 4.5 minutes * 60 seconds
elif batch_process.kind_of_process == API_REFRESH_REQUEST:
checked_out_expiration_time = 360 # 6 minutes * 60 seconds
elif batch_process.kind_of_process in [
REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_VOTERS,
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]:
checked_out_expiration_time = 1800 # 30 minutes * 60 seconds
elif batch_process.kind_of_process in [
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID, AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT,
CALCULATE_ORGANIZATION_DAILY_METRICS, CALCULATE_ORGANIZATION_ELECTION_METRICS,
CALCULATE_SITEWIDE_ELECTION_METRICS, CALCULATE_SITEWIDE_VOTER_METRICS,
CALCULATE_SITEWIDE_DAILY_METRICS]:
checked_out_expiration_time = 600 # 10 minutes * 60 seconds
elif batch_process.kind_of_process == SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE:
checked_out_expiration_time = 300 # 5 minutes * 60 seconds - See SEARCH_TWITTER_TIMED_OUT
elif batch_process.kind_of_process == UPDATE_TWITTER_DATA_FROM_TWITTER:
checked_out_expiration_time = 600 # 10 minutes * 60 seconds - See UPDATE_TWITTER_TIMED_OUT
else:
checked_out_expiration_time = 1800 # 30 minutes * 60 seconds
date_checked_out_time_out = \
batch_process.date_checked_out + timedelta(seconds=checked_out_expiration_time)
status += "CHECKED_OUT_PROCESS_FOUND "
if positive_value_exists(process_active):
# When checking to see if the process is active, only consider it such before the timeout time
if now() < date_checked_out_time_out:
filtered_batch_process_list.append(batch_process)
status += "CHECKED_OUT_PROCESS_FOUND_CONSIDER_STILL_ACTIVE "
else:
# This is for "process_queued". If it has passed the timeout point, then we can consider queued
# "needs_to_be_run" shouldn't be able to get here
if now() > date_checked_out_time_out:
filtered_batch_process_list.append(batch_process)
status += "CHECKED_OUT_PROCESS_FOUND_HAS_TIMED_OUT "
if len(filtered_batch_process_list):
batch_process_list_found = True
status += 'BATCH_PROCESS_LIST_RETRIEVED '
else:
status += 'BATCH_PROCESS_LIST_NONE_FOUND '
except BatchProcess.DoesNotExist:
# No batch_process found. Not a problem.
status += 'NO_BATCH_PROCESS_FOUND_DoesNotExist '
except Exception as e:
status += 'FAILED_BATCH_PROCESS_LIST_RETRIEVE: ' + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'batch_process_list': filtered_batch_process_list,
'batch_process_list_found': batch_process_list_found,
}
return results
def retrieve_active_ballot_item_chunk_not_completed(self, batch_process_id):
status = ""
success = True
batch_process_ballot_item_chunk = None
batch_process_ballot_item_chunk_found = False
try:
batch_process_queryset = BatchProcessBallotItemChunk.objects.all()
batch_process_queryset = batch_process_queryset.filter(batch_process_id=batch_process_id)
# Limit to chunks that have at least one completed_date == NULL
filters = [] # Reset for each search word
new_filter = Q(retrieve_date_completed__isnull=True)
filters.append(new_filter)
new_filter = Q(analyze_date_completed__isnull=True)
filters.append(new_filter)
new_filter = Q(create_date_completed__isnull=True)
filters.append(new_filter)
# Add the first query
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
batch_process_queryset = batch_process_queryset.filter(final_filters)
batch_process_queryset = batch_process_queryset.order_by("id")
batch_process_ballot_item_chunk = batch_process_queryset.first()
if batch_process_ballot_item_chunk:
batch_process_ballot_item_chunk_found = True
status += 'BATCH_PROCESS_BALLOT_ITEM_CHUNK_RETRIEVED '
else:
status += 'BATCH_PROCESS_BALLOT_ITEM_CHUNK_NOT_FOUND '
except BatchProcessBallotItemChunk.DoesNotExist:
# No chunk found. Not a problem.
status += 'BATCH_PROCESS_BALLOT_ITEM_CHUNK_NOT_FOUND_DoesNotExist '
except Exception as e:
status += 'FAILED_BATCH_PROCESS_BALLOT_ITEM_CHUNK_RETRIEVE: ' + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
'batch_process_ballot_item_chunk_found': batch_process_ballot_item_chunk_found,
}
return results
def retrieve_analytics_action_chunk_not_completed(self, batch_process_id):
status = ""
success = True
batch_process_analytics_chunk = None
batch_process_analytics_chunk_found = False
try:
batch_process_queryset = BatchProcessAnalyticsChunk.objects.all()
batch_process_queryset = batch_process_queryset.filter(batch_process_id=batch_process_id)
# Limit to chunks that have at least one completed_date == NULL
filters = [] # Reset for each search word
new_filter = Q(date_completed__isnull=True)
filters.append(new_filter)
# Add the first query
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
batch_process_queryset = batch_process_queryset.filter(final_filters)
batch_process_queryset = batch_process_queryset.order_by("id")
batch_process_analytics_chunk = batch_process_queryset.first()
if batch_process_analytics_chunk:
batch_process_analytics_chunk_found = True
status += 'BATCH_PROCESS_ANALYTICS_CHUNK_RETRIEVED '
else:
status += 'BATCH_PROCESS_ANALYTICS_CHUNK_NOT_FOUND '
except BatchProcessAnalyticsChunk.DoesNotExist:
# No chunk found. Not a problem.
status += 'BATCH_PROCESS_ANALYTICS_CHUNK_NOT_FOUND_DoesNotExist '
except Exception as e:
status += 'FAILED_BATCH_PROCESS_ANALYTICS_CHUNK_RETRIEVE: ' + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'batch_process_analytics_chunk': batch_process_analytics_chunk,
'batch_process_analytics_chunk_found': batch_process_analytics_chunk_found,
}
return results
def system_turned_off(self):
from wevote_settings.models import fetch_batch_process_system_on
return not fetch_batch_process_system_on()
class BatchProcess(models.Model):
"""
"""
kind_of_process = models.CharField(max_length=50, choices=KIND_OF_PROCESS_CHOICES,
default=RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, db_index=True)
state_code = models.CharField(verbose_name="state the ballot item is related to", max_length=2, null=True)
# This is used to identify the date of analytics we are processing
analytics_date_as_integer = models.PositiveIntegerField(null=True, blank=True)
# Either voter_id or polling_location_we_vote_id will be set, but not both.
# The unique id of the voter for which this ballot was retrieved.
voter_id = models.IntegerField(verbose_name="the voter unique id", null=True, blank=True)
# The map point for which this ballot was retrieved
polling_location_we_vote_id = models.CharField(
verbose_name="we vote permanent id of the map point", max_length=255, default=None, null=True,
blank=True, unique=False)
# API Refresh Request
api_name = models.CharField(max_length=255, null=True)
election_id_list_serialized = models.CharField(max_length=255, null=True)
date_added_to_queue = models.DateTimeField(verbose_name='start', null=True)
date_started = models.DateTimeField(verbose_name='start', null=True)
# When have all of the steps completed?
date_completed = models.DateTimeField(verbose_name='finished', null=True)
# When a batch_process is running, we mark when it was "taken off the shelf" to be worked on.
# When the process is complete, we should reset this to "NULL"
date_checked_out = models.DateTimeField(null=True)
batch_process_paused = models.BooleanField(default=False)
completion_summary = models.TextField(null=True, blank=True)
use_ballotpedia = models.BooleanField(default=False)
use_ctcl = models.BooleanField(default=False)
use_vote_usa = models.BooleanField(default=False)
class BatchProcessAnalyticsChunk(models.Model):
"""
"""
batch_process_id = models.PositiveIntegerField(default=0, null=False, db_index=True)
date_started = models.DateTimeField(default=None, null=True)
timed_out = models.BooleanField(default=None, null=True)
date_completed = models.DateTimeField(default=None, null=True)
number_of_rows_being_reviewed = models.PositiveIntegerField(default=0, null=True)
number_of_rows_successfully_reviewed = models.PositiveIntegerField(default=0, null=True)
class BatchProcessBallotItemChunk(models.Model):
"""
"""
batch_process_id = models.PositiveIntegerField(default=0, null=False, db_index=True)
batch_set_id = models.PositiveIntegerField(default=0, null=False, db_index=True)
google_civic_election_id = models.PositiveIntegerField(default=0, null=False, db_index=True)
state_code = models.CharField(max_length=2, null=True)
retrieve_date_started = models.DateTimeField(null=True)
retrieve_date_completed = models.DateTimeField(null=True)
retrieve_timed_out = models.BooleanField(default=None, null=True)
retrieve_row_count = models.PositiveIntegerField(default=0, null=False)
analyze_date_started = models.DateTimeField(null=True)
analyze_date_completed = models.DateTimeField(null=True)
analyze_timed_out = models.BooleanField(default=None, null=True)
analyze_row_count = models.PositiveIntegerField(default=0, null=False)
create_date_started = models.DateTimeField(null=True)
create_date_completed = models.DateTimeField(null=True)
create_timed_out = models.BooleanField(default=None, null=True)
create_row_count = models.PositiveIntegerField(default=0, null=False)
class BatchProcessLogEntry(models.Model):
"""
"""
batch_process_id = models.PositiveIntegerField(default=0, null=False, db_index=True)
batch_process_ballot_item_chunk_id = models.PositiveIntegerField(default=0, null=False, db_index=True)
batch_set_id = models.PositiveIntegerField(default=0, null=False, db_index=True)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False)
state_code = models.CharField(verbose_name="state the ballot item is related to", max_length=2, null=True)
# Either voter_id or polling_location_we_vote_id will be set, but not both.
# The unique id of the voter for which this ballot was retrieved.
voter_id = models.IntegerField(verbose_name="the voter unique id", null=True, blank=True)
# The map point for which this ballot was retrieved
polling_location_we_vote_id = models.CharField(
verbose_name="we vote permanent id of the map point", max_length=255, default=None, null=True,
blank=True, unique=False)
critical_failure = models.BooleanField(default=None, null=True)
date_added = models.DateTimeField(null=True, auto_now_add=True)
kind_of_process = models.CharField(max_length=50, default="")
analytics_date_as_integer = models.PositiveIntegerField(default=None, null=True)
status = models.TextField(null=True, blank=True)
class BatchRowTranslationMap(models.Model):
"""
When we bring in batches of data, we want to map different names (for measures, offices, candidates,
or organizations) to the We Vote recognized names. This table stores those mappings. So for example
if one batch uses "Prop A" we want to map it to "Proposition A".
"""
# Are we translating for a Measure, Office, Candidate, or Organization
kind_of_batch = models.CharField(max_length=32, choices=KIND_OF_BATCH_CHOICES, default=MEASURE)
# What is the name of the row? (ex/ contest_office_name)
batch_row_name = models.CharField(verbose_name="name of the the row", max_length=255, null=True, blank=True)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
row_value_recognized_by_we_vote = models.TextField(null=True, blank=True)
incoming_alternate_row_value = models.TextField(null=True, blank=True)
class BatchRowActionMeasure(models.Model):
"""
The definition of the action for importing one Measure.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=True, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from ContestMeasure
measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier",
max_length=255, null=True, blank=True, unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier",
max_length=200, null=True, blank=True, unique=False)
# The title of the measure (e.g. 'Proposition 42').
measure_title = models.CharField(verbose_name="measure title", max_length=255, null=False, blank=False)
# The measure's title as passed over by Google Civic. We save this so we can match to this measure even
# if we edit the measure's name locally.
google_civic_measure_title = models.CharField(verbose_name="measure name exactly as received from google civic",
max_length=255, null=True, blank=True)
# A brief description of the referendum. This field is only populated for contests of type 'Referendum'.
measure_subtitle = models.TextField(verbose_name="google civic referendum subtitle",
null=True, blank=True, default="")
# The text of the measure. This field is only populated for contests of type 'Referendum'.
measure_text = models.TextField(verbose_name="measure text", null=True, blank=False)
# A link to the referendum. This field is only populated for contests of type 'Referendum'.
measure_url = models.CharField(verbose_name="measure details url", max_length=255, null=True, blank=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, blank=False, db_index=True)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
# ballot_placement: We store ballot_placement in the BallotItem table instead because it is different for each voter
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="primary party", max_length=255, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=255, null=False, blank=False)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="district scope", max_length=255, null=False, blank=False)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_id = models.CharField(verbose_name="google civic district id", max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this measure affects", max_length=2, null=True, blank=True)
# Day of the election in YYYY-MM-DD format.
election_day_text = models.CharField(verbose_name="election day", max_length=255, null=True, blank=True)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', max_length=255, blank=True, null=True)
ballotpedia_district_id = models.PositiveIntegerField(
verbose_name="ballotpedia district id", default=0, null=False, blank=False)
ballotpedia_election_id = models.PositiveIntegerField(
verbose_name="ballotpedia election id", default=0, null=False, blank=False)
ballotpedia_measure_id = models.PositiveIntegerField(
verbose_name="ballotpedia measure id", default=0, null=False, blank=False)
ballotpedia_measure_name = models.CharField(
verbose_name="ballotpedia measure name", max_length=255, null=True, blank=True)
ballotpedia_measure_status = models.CharField(
verbose_name="ballotpedia measure status", max_length=255, null=True, blank=True)
ballotpedia_measure_summary = models.TextField(
verbose_name="ballotpedia measure summary", null=True, blank=True, default="")
ballotpedia_measure_text = models.TextField(
verbose_name="ballotpedia measure text", null=True, blank=True, default="")
ballotpedia_measure_url = models.URLField(
verbose_name='ballotpedia url of measure', max_length=255, blank=True, null=True)
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_photo_url = models.URLField(
verbose_name='url of ballotpedia logo', max_length=255, blank=True, null=True)
ballotpedia_yes_vote_description = models.TextField(
verbose_name="what a yes vote means", null=True, blank=True, default=None)
ballotpedia_no_vote_description = models.TextField(
verbose_name="what a no vote means", null=True, blank=True, default=None)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
status = models.TextField(verbose_name="batch row action measure status", null=True, blank=True, default="")
class BatchRowActionContestOffice(models.Model):
"""
The definition of the action for importing one Office.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from ContestOffice
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this contest office", max_length=255, default=None, null=True,
blank=True)
# The name of the office for this contest.
contest_office_name = models.CharField(verbose_name="name of the contest office", max_length=255, null=False,
blank=False)
# TODO: Was the original contest_office_name replaced with a mapped value from BatchRowTranslationMap?
# contest_office_name_mapped = models.BooleanField(verbose_name='office name was replaced', default=False)
# The offices' name as passed over by Google Civic. We save this so we can match to this office even
# if we edit the office's name locally.
google_civic_office_name = models.CharField(verbose_name="office name exactly as received from google civic",
max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=255, null=False, blank=False, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, blank=False)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
maplight_id = models.CharField(
verbose_name="maplight unique identifier", max_length=255, null=True, blank=True)
# 2018-02-16 It is unclear if we want to keep this field
ballotpedia_id = models.CharField(
verbose_name="ballotpedia unique identifier", max_length=255, null=True, blank=True)
ballotpedia_district_id = models.PositiveIntegerField(
verbose_name="ballotpedia district id", null=True, blank=True)
ballotpedia_election_id = models.PositiveIntegerField(verbose_name="ballotpedia election id", null=True, blank=True)
ballotpedia_is_marquee = models.BooleanField(default=None, null=True)
is_ballotpedia_general_election = models.BooleanField(default=False)
is_ballotpedia_general_runoff_election = models.BooleanField(default=False)
is_ballotpedia_primary_election = models.BooleanField(default=False)
is_ballotpedia_primary_runoff_election = models.BooleanField(default=False)
# Equivalent of elected_office in We Vote
ballotpedia_office_id = models.PositiveIntegerField(
verbose_name="ballotpedia integer id", null=True, blank=True)
# The office's name as passed over by Ballotpedia. This helps us do exact matches when id is missing
ballotpedia_office_name = models.CharField(verbose_name="office name exactly as received from ballotpedia",
max_length=255, null=True, blank=True)
ballotpedia_office_url = models.URLField(
verbose_name='url of office on ballotpedia', max_length=255, blank=True, null=True)
# Equivalent of contest_office in We Vote
ballotpedia_race_id = models.PositiveIntegerField(verbose_name="ballotpedia race integer id", null=True, blank=True)
# Federal, State, Local,
ballotpedia_race_office_level = models.CharField(verbose_name="race office level", max_length=255, null=True,
blank=True)
wikipedia_id = models.CharField(verbose_name="wikipedia unique identifier", max_length=255, null=True, blank=True)
# vote_type (ranked choice, majority)
# The number of candidates that a voter may vote for in this contest.
number_voting_for = models.CharField(verbose_name="google civic number of candidates to vote for",
max_length=255, null=True, blank=True)
# The number of candidates that will be elected to office in this contest.
number_elected = models.CharField(verbose_name="google civic number of candidates who will be elected",
max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this office serves", max_length=2, null=True, blank=True)
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=255, null=True, blank=True)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="google civic district scope",
max_length=255, null=True, blank=True)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_id = models.CharField(verbose_name="google civic district id", max_length=255, null=True, blank=True)
# The levels of government of the office for this contest. There may be more than one in cases where a
# jurisdiction effectively acts at two different levels of government; for example, the mayor of the
# District of Columbia acts at "locality" level, but also effectively at both
# "administrative-area-2" and "administrative-area-1".
contest_level0 = models.CharField(verbose_name="google civic level, option 0",
max_length=255, null=True, blank=True)
contest_level1 = models.CharField(verbose_name="google civic level, option 1",
max_length=255, null=True, blank=True)
contest_level2 = models.CharField(verbose_name="google civic level, option 2",
max_length=255, null=True, blank=True)
# ballot_placement: We store ballot_placement in the BallotItem table instead because it is different for each voter
# A description of any additional eligibility requirements for voting in this contest.
electorate_specifications = models.CharField(verbose_name="google civic primary party",
max_length=255, null=True, blank=True)
# "Yes" or "No" depending on whether this a contest being held outside the normal election cycle.
special = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
elected_office_name = models.CharField(verbose_name="name of the elected office", max_length=255, null=True,
blank=True, default=None)
candidate_selection_id1 = models.CharField(verbose_name="temporary id of candidate selection 1", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id2 = models.CharField(verbose_name="temporary id of candidate selection 2", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id3= models.CharField(verbose_name="temporary id of candidate selection 3", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id4 = models.CharField(verbose_name="temporary id of candidate selection 4", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id5 = models.CharField(verbose_name="temporary id of candidate selection 5", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id6 = models.CharField(verbose_name="temporary id of candidate selection 6", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id7 = models.CharField(verbose_name="temporary id of candidate selection 7", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id8 = models.CharField(verbose_name="temporary id of candidate selection 8", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id9 = models.CharField(verbose_name="temporary id of candidate selection 9", max_length=255,
null=True, blank=True, default=None)
candidate_selection_id10 = models.CharField(verbose_name="temporary id of candidate selection 10", max_length=255,
null=True, blank=True, default=None)
vote_usa_office_id = models.CharField(
verbose_name="Vote USA permanent id for this candidate", max_length=64, default=None, null=True, blank=True)
status = models.TextField(verbose_name="batch row action contest office status", null=True, blank=True, default="")
class BatchRowActionElectedOffice(models.Model):
"""
The definition of the action for importing one Office.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from ElectedOffice
elected_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this elected office", max_length=255, default=None, null=True,
blank=True)
# The name of the office for this contest.
elected_office_name = models.CharField(verbose_name="name of the elected office", max_length=255,
null=False, blank=False)
elected_office_name_es = models.CharField(verbose_name="name of the elected office in Spanish", max_length=255,
null=True, blank=True, default=None)
# The offices' name as passed over by Google Civic. We save this so we can match to this office even
# if we edit the office's name locally.
google_civic_office_name = models.CharField(verbose_name="office name exactly as received from google civic",
max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=255, null=False, blank=False, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False, blank=False)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
maplight_id = models.CharField(
verbose_name="maplight unique identifier", max_length=255, null=True, blank=True)
ballotpedia_id = models.CharField(
verbose_name="ballotpedia unique identifier", max_length=255, null=True, blank=True)
wikipedia_id = models.CharField(verbose_name="wikipedia unique identifier", max_length=255, null=True, blank=True)
# vote_type (ranked choice, majority)
# The number of candidates that a voter may vote for in this contest.
# TODO for now comment out number_voting_for for elected_office table
# number_voting_for = models.CharField(verbose_name="google civic number of candidates to vote for",
# max_length=255, null=True, blank=True)
# The number of candidates that will be elected to office in this contest.
number_elected = models.CharField(verbose_name="google civic number of candidates who will be elected",
max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this office serves", max_length=2, null=True, blank=True)
# If this is a partisan election, the name of the party it is for.
primary_party = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
# The name of the district.
district_name = models.CharField(verbose_name="district name", max_length=255, null=True, blank=True)
# The geographic scope of this district. If unspecified the district's geography is not known.
# One of: national, statewide, congressional, stateUpper, stateLower, countywide, judicial, schoolBoard,
# cityWide, township, countyCouncil, cityCouncil, ward, special
district_scope = models.CharField(verbose_name="google civic district scope",
max_length=255, null=True, blank=True)
# An identifier for this district, relative to its scope. For example, the 34th State Senate district
# would have id "34" and a scope of stateUpper.
district_id = models.CharField(verbose_name="google civic district id", max_length=255, null=True, blank=True)
# The levels of government of the office for this contest. There may be more than one in cases where a
# jurisdiction effectively acts at two different levels of government; for example, the mayor of the
# District of Columbia acts at "locality" level, but also effectively at both
# "administrative-area-2" and "administrative-area-1".
contest_level0 = models.CharField(verbose_name="google civic level, option 0",
max_length=255, null=True, blank=True)
contest_level1 = models.CharField(verbose_name="google civic level, option 1",
max_length=255, null=True, blank=True)
contest_level2 = models.CharField(verbose_name="google civic level, option 2",
max_length=255, null=True, blank=True)
# ballot_placement: We store ballot_placement in the BallotItem table instead because it is different for each voter
# A description of any additional eligibility requirements for voting in this contest.
electorate_specifications = models.CharField(verbose_name="google civic primary party",
max_length=255, null=True, blank=True)
# "Yes" or "No" depending on whether this a contest being held outside the normal election cycle.
special = models.CharField(verbose_name="google civic primary party", max_length=255, null=True, blank=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
elected_office_description = models.CharField(verbose_name="office description", max_length=255,
null=True, blank=True)
elected_office_description_es = models.CharField(verbose_name="office description spanish", max_length=255,
null=True, blank=True)
elected_office_is_partisan = models.BooleanField(verbose_name='office is_partisan', default=False)
elected_office_ctcl_id = models.CharField(verbose_name="we vote permanent id for this elected office",
max_length=255, default=None, null=True, blank=True)
status = models.TextField(verbose_name="batch row action elected office status", null=True, blank=True, default="")
class BatchRowActionPolitician(models.Model):
"""
The definition of the action for importing one Politician.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Politician
politician_we_vote_id = models.CharField(verbose_name="we vote permanent id of this politician", max_length=255,
default=None, null=True, blank=True, unique=False)
# See this url for properties: https://docs.python.org/2/library/functions.html#property
first_name = models.CharField(verbose_name="first name", max_length=255, default=None, null=True, blank=True)
middle_name = models.CharField(verbose_name="middle name", max_length=255, default=None, null=True, blank=True)
last_name = models.CharField(verbose_name="last name", max_length=255, default=None, null=True, blank=True)
politician_name = models.CharField(verbose_name="official full name", max_length=255, default=None, null=True,
blank=True)
# This is the politician's name from GoogleCivicCandidateCampaign
google_civic_candidate_name = models.CharField(verbose_name="full name from google civic", max_length=255,
default=None, null=True, blank=True)
# This is the politician's name assembled from TheUnitedStatesIo first_name + last_name for quick search
full_name_assembled = models.CharField(verbose_name="full name assembled from first_name + last_name",
max_length=255, default=None, null=True, blank=True)
gender = models.CharField("gender", max_length=1, choices=GENDER_CHOICES, default=UNKNOWN)
birth_date = models.DateField("birth date", default=None, null=True, blank=True)
# race = enum?
# official_image_id = ??
bioguide_id = models.CharField(verbose_name="bioguide unique identifier", max_length=200, null=True, unique=False)
thomas_id = models.CharField(verbose_name="thomas unique identifier", max_length=200, null=True, unique=False)
lis_id = models.CharField(verbose_name="lis unique identifier", max_length=200, null=True, blank=True, unique=False)
govtrack_id = models.CharField(verbose_name="govtrack unique identifier", max_length=200, null=True, unique=False)
opensecrets_id = models.CharField(verbose_name="opensecrets unique identifier", max_length=200, null=True,
unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier", max_length=200, null=True,
unique=False)
fec_id = models.CharField(verbose_name="fec unique identifier", max_length=200, null=True, unique=False, blank=True)
cspan_id = models.CharField(verbose_name="cspan unique identifier", max_length=200, null=True, blank=True,
unique=False)
wikipedia_id = models.CharField(verbose_name="wikipedia url", max_length=500, default=None, null=True, blank=True)
ballotpedia_id = models.CharField(verbose_name="ballotpedia unique id", max_length=500, default=None, null=True,
blank=True)
house_history_id = models.CharField(verbose_name="house history unique identifier", max_length=200, null=True,
blank=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier", max_length=200, null=True, unique=False,
blank=True)
washington_post_id = models.CharField(verbose_name="washington post unique identifier", max_length=200, null=True,
unique=False)
icpsr_id = models.CharField(verbose_name="icpsr unique identifier", max_length=200, null=True, unique=False)
# The full name of the party the official belongs to.
political_party = models.CharField(verbose_name="politician political party", max_length=255, null=True)
state_code = models.CharField(verbose_name="politician home state", max_length=2, null=True)
politician_url = models.URLField(
verbose_name='latest website url of politician', max_length=255, blank=True, null=True)
politician_twitter_handle = models.CharField(verbose_name='politician twitter screen_name', max_length=255,
null=True, unique=False)
we_vote_hosted_profile_image_url_large = models.URLField(
verbose_name='we vote hosted large image url', max_length=255, blank=True, null=True)
we_vote_hosted_profile_image_url_medium = models.URLField(
verbose_name='we vote hosted medium image url', max_length=255, blank=True, null=True)
we_vote_hosted_profile_image_url_tiny = models.URLField(
verbose_name='we vote hosted tiny image url', max_length=255, blank=True, null=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
politician_facebook_id = models.CharField(verbose_name='politician facebook user name', max_length=255, null=True,
unique=False)
politician_phone_number = models.CharField(verbose_name='politician phone number', max_length=255, null=True,
unique=False)
politician_googleplus_id = models.CharField(verbose_name='politician googleplus profile name', max_length=255,
null=True, unique=False)
politician_youtube_id = models.CharField(verbose_name='politician youtube profile name', max_length=255, null=True,
unique=False)
politician_email_address = models.CharField(verbose_name='politician email address', max_length=80, null=True,
unique=False)
status = models.TextField(verbose_name="batch row action politician status", null=True, blank=True, default="")
class BatchRowActionCandidate(models.Model):
"""
The definition of the action for importing one Candidate.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Candidate
candidate_we_vote_id = models.CharField(
verbose_name="we vote permanent id of this candidate", max_length=255, default=None, null=True,
blank=True)
maplight_id = models.CharField(
verbose_name="maplight candidate id", max_length=255, default=None, null=True, blank=True)
vote_smart_id = models.CharField(
verbose_name="vote smart candidate id", max_length=15, default=None, null=True, blank=True, unique=False)
vote_usa_office_id = models.CharField(max_length=64, default=None, null=True, blank=True)
vote_usa_politician_id = models.CharField(max_length=64, default=None, null=True, blank=True)
vote_usa_profile_image_url_https = models.TextField(null=True, blank=True, default=None)
# The internal We Vote id for the ContestOffice that this candidate is competing for. During setup we need to allow
# this to be null.
contest_office_id = models.CharField(
verbose_name="contest_office_id id", max_length=255, null=True, blank=True)
# We want to link the candidate to the contest with permanent ids so we can export and import
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the office this candidate is running for", max_length=255, default=None,
null=True, blank=True, unique=False)
contest_office_name = models.CharField(verbose_name="name of the office", max_length=255, null=True, blank=True)
# politician (internal) link to local We Vote Politician entry. During setup we need to allow this to be null.
politician_id = models.BigIntegerField(verbose_name="politician unique identifier", null=True, blank=True)
# The persistent We Vote unique ID of the Politician, so we can export and import into other databases.
politician_we_vote_id = models.CharField(
verbose_name="we vote politician id", max_length=255, null=True, blank=True)
# The candidate's name.
candidate_name = models.CharField(verbose_name="candidate name", max_length=255, null=False, blank=False)
# The candidate's name as passed over by Google Civic. We save this so we can match to this candidate even
# if we edit the candidate's name locally.
google_civic_candidate_name = models.CharField(verbose_name="candidate name exactly as received from google civic",
max_length=255, null=False, blank=False)
candidate_gender = models.CharField(verbose_name="candidate gender", max_length=255, null=True, blank=True)
# Birthday in YYYY-MM-DD format.
birth_day_text = models.CharField(verbose_name="birth day", max_length=10, null=True, blank=True)
# The full name of the party the candidate is a member of.
party = models.CharField(verbose_name="party", max_length=255, null=True, blank=True)
# A URL for a photo of the candidate.
photo_url = models.CharField(verbose_name="photoUrl", max_length=255, null=True, blank=True)
photo_url_from_ctcl = models.TextField(null=True, blank=True, default=None)
photo_url_from_maplight = models.URLField(
verbose_name='candidate portrait url of candidate from maplight', max_length=255, blank=True, null=True)
photo_url_from_vote_smart = models.URLField(
verbose_name='candidate portrait url of candidate from vote smart', max_length=255, blank=True, null=True)
photo_url_from_vote_usa = models.TextField(null=True, blank=True, default=None)
# The order the candidate appears on the ballot relative to other candidates for this contest.
order_on_ballot = models.CharField(verbose_name="order on ballot", max_length=255, null=True, blank=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(
verbose_name="google civic election id", max_length=255, null=True, blank=True, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
ocd_division_id = models.CharField(verbose_name="ocd division id", max_length=255, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="state this candidate serves", max_length=2, null=True, blank=True)
# The URL for the candidate's campaign web site.
candidate_url = models.URLField(
verbose_name='website url of candidate', max_length=255, blank=True, null=True)
candidate_contact_form_url = models.URLField(
verbose_name='website url of candidate contact form', max_length=255, blank=True, null=True)
facebook_url = models.URLField(
verbose_name='facebook url of candidate', max_length=255, blank=True, null=True)
twitter_url = models.URLField(verbose_name='twitter url of candidate', blank=True, null=True)
twitter_user_id = models.BigIntegerField(verbose_name="twitter id", null=True, blank=True)
candidate_twitter_handle = models.CharField(
verbose_name='candidate twitter screen_name', max_length=255, null=True, unique=False)
twitter_name = models.CharField(
verbose_name="org name from twitter", max_length=255, null=True, blank=True)
twitter_location = models.CharField(
verbose_name="org location from twitter", max_length=255, null=True, blank=True)
twitter_followers_count = models.IntegerField(verbose_name="number of twitter followers",
null=False, blank=True, default=0)
twitter_profile_image_url_https = models.URLField(verbose_name='url of logo from twitter', blank=True, null=True)
twitter_profile_background_image_url_https = models.URLField(verbose_name='tile-able background from twitter',
blank=True, null=True)
twitter_profile_banner_url_https = models.URLField(verbose_name='profile banner image from twitter',
blank=True, null=True)
twitter_description = models.CharField(verbose_name="Text description of this organization from twitter.",
max_length=255, null=True, blank=True)
google_plus_url = models.URLField(verbose_name='google plus url of candidate', blank=True, null=True)
youtube_url = models.URLField(verbose_name='youtube url of candidate', blank=True, null=True)
# The email address for the candidate's campaign.
candidate_email = models.CharField(verbose_name="candidate email", max_length=255, null=True, blank=True)
# The voice phone number for the candidate's campaign office.
candidate_phone = models.CharField(verbose_name="candidate phone", max_length=255, null=True, blank=True)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_photo_url = models.URLField(verbose_name='url of wikipedia logo', max_length=255, blank=True, null=True)
ballotpedia_candidate_id = models.PositiveIntegerField(
verbose_name="ballotpedia integer id", null=True, blank=True)
# The candidate's name as passed over by Ballotpedia
ballotpedia_candidate_name = models.CharField(verbose_name="candidate name exactly as received from ballotpedia",
max_length=255, null=True, blank=True)
ballotpedia_candidate_summary = models.TextField(verbose_name="candidate summary from ballotpedia",
null=True, blank=True, default=None)
ballotpedia_candidate_url = models.URLField(
verbose_name='url of candidate on ballotpedia', max_length=255, blank=True, null=True)
ballotpedia_election_id = models.PositiveIntegerField(verbose_name="ballotpedia election id", null=True, blank=True)
# The id of the image for retrieval from Ballotpedia API
ballotpedia_image_id = models.PositiveIntegerField(verbose_name="ballotpedia image id", null=True, blank=True)
# Equivalent of elected_office in We Vote
ballotpedia_office_id = models.PositiveIntegerField(
verbose_name="ballotpedia elected office integer id", null=True, blank=True)
# This is just the characters in the Ballotpedia URL
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
# Equivalent of politician in We Vote
ballotpedia_person_id = models.PositiveIntegerField(
verbose_name="ballotpedia person integer id", null=True, blank=True)
ballotpedia_photo_url = models.URLField(
verbose_name='url of ballotpedia logo', max_length=255, blank=True, null=True)
# Equivalent of contest_office in We Vote
ballotpedia_race_id = models.PositiveIntegerField(verbose_name="ballotpedia race integer id", null=True, blank=True)
# Official Statement from Candidate in Ballot Guide
ballot_guide_official_statement = models.TextField(verbose_name="official candidate statement from ballot guide",
null=True, blank=True, default="")
batch_row_action_office_ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
crowdpac_candidate_id = models.PositiveIntegerField(verbose_name="crowdpac integer id", null=True, blank=True)
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
candidate_is_top_ticket = models.BooleanField(verbose_name="candidate is top ticket", default=False)
candidate_is_incumbent = models.BooleanField(verbose_name="candidate is currently in the office", default=False)
candidate_participation_status = models.CharField(verbose_name="candidate participation status",
max_length=255, null=True, blank=True)
# From VIP standard format
candidate_ctcl_person_id = models.CharField(
verbose_name="candidate person id", max_length=255, null=True, blank=True)
status = models.TextField(verbose_name="batch row action candidate status", null=True, blank=True, default="")
class BatchRowActionOrganization(models.Model):
"""
The definition of the action for importing one Organization.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Organization
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True)
organization_name = models.CharField(
verbose_name="organization name", max_length=255, null=False, blank=False)
organization_website = models.URLField(
verbose_name='url of the endorsing organization', max_length=255, blank=True, null=True)
organization_email = models.EmailField(
verbose_name='organization contact email address', max_length=255, unique=False, null=True, blank=True)
organization_contact_form_url = models.URLField(
verbose_name='url of the organization contact form', max_length=255, blank=True, null=True)
organization_contact_name = models.CharField(max_length=255, null=True, unique=False)
organization_facebook = models.URLField(verbose_name='url of facebook page', blank=True, null=True)
organization_image = models.CharField(verbose_name='organization image', max_length=255, null=True, unique=False)
state_served_code = models.CharField(verbose_name="state this organization serves", max_length=2,
null=True, blank=True)
# The vote_smart special interest group sigId for this organization
vote_smart_id = models.BigIntegerField(
verbose_name="vote smart special interest group id", null=True, blank=True)
organization_description = models.TextField(
verbose_name="Text description of this organization.", null=True, blank=True)
organization_address = models.CharField(
verbose_name='organization street address', max_length=255, unique=False, null=True, blank=True)
organization_city = models.CharField(max_length=255, null=True, blank=True)
organization_state = models.CharField(max_length=2, null=True, blank=True)
organization_zip = models.CharField(max_length=255, null=True, blank=True)
organization_phone1 = models.CharField(max_length=255, null=True, blank=True)
organization_phone2 = models.CharField(max_length=255, null=True, blank=True)
organization_fax = models.CharField(max_length=255, null=True, blank=True)
# Facebook session information
facebook_id = models.BigIntegerField(verbose_name="facebook big integer id", null=True, blank=True)
facebook_email = models.EmailField(verbose_name='facebook email address', max_length=255, unique=False,
null=True, blank=True)
fb_username = models.CharField(max_length=50, validators=[alphanumeric], null=True)
facebook_profile_image_url_https = models.URLField(verbose_name='url of image from facebook', blank=True, null=True)
# Twitter information
twitter_user_id = models.BigIntegerField(verbose_name="twitter id", null=True, blank=True)
organization_twitter_handle = models.CharField(
verbose_name='organization twitter screen_name', max_length=255, null=True, unique=False)
twitter_name = models.CharField(
verbose_name="org name from twitter", max_length=255, null=True, blank=True)
twitter_location = models.CharField(
verbose_name="org location from twitter", max_length=255, null=True, blank=True)
twitter_followers_count = models.IntegerField(verbose_name="number of twitter followers",
null=False, blank=True, default=0)
twitter_profile_image_url_https = models.URLField(verbose_name='url of user logo from twitter',
blank=True, null=True)
twitter_profile_background_image_url_https = models.URLField(verbose_name='tile-able background from twitter',
blank=True, null=True)
twitter_profile_banner_url_https = models.URLField(verbose_name='profile banner image from twitter',
blank=True, null=True)
twitter_description = models.CharField(verbose_name="Text description of this organization from twitter.",
max_length=255, null=True, blank=True)
# Instagram
organization_instagram_handle = models.CharField(
verbose_name='organization instagram screen_name', max_length=255, null=True, unique=False)
wikipedia_page_id = models.BigIntegerField(verbose_name="pageid", null=True, blank=True)
wikipedia_page_title = models.CharField(
verbose_name="Page title on Wikipedia", max_length=255, null=True, blank=True)
wikipedia_thumbnail_url = models.URLField(
verbose_name='url of wikipedia logo thumbnail', max_length=255, blank=True, null=True)
wikipedia_thumbnail_width = models.IntegerField(verbose_name="width of photo", null=True, blank=True)
wikipedia_thumbnail_height = models.IntegerField(verbose_name="height of photo", null=True, blank=True)
wikipedia_photo_url = models.URLField(
verbose_name='url of wikipedia logo', max_length=255, blank=True, null=True)
ballotpedia_page_title = models.CharField(
verbose_name="Page title on Ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_photo_url = models.URLField(
verbose_name='url of ballotpedia logo', max_length=255, blank=True, null=True)
organization_type = models.CharField(
verbose_name="type of org", max_length=8, choices=ORGANIZATION_TYPE_CHOICES, default=UNKNOWN)
status = models.TextField(verbose_name="batch row action organization status", null=True, blank=True, default="")
class BatchRowActionPollingLocation(models.Model):
"""
The definition of the action for importing one ballot item.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", null=True, default=None, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
polling_location_we_vote_id = models.CharField(max_length=255, default=None, null=True)
location_name = models.CharField(max_length=255, null=True, blank=True)
polling_hours_text = models.CharField(max_length=255, null=True, blank=True)
directions_text = models.TextField(null=True, blank=True)
latitude = models.FloatField(default=None, null=True)
longitude = models.FloatField(default=None, null=True)
line1 = models.CharField(max_length=255, blank=True, null=True)
line2 = models.CharField(max_length=255, blank=True, null=True)
city = models.CharField(max_length=255, blank=True, null=True)
source_code = models.CharField(max_length=255, blank=True, null=True)
state = models.CharField(max_length=255, blank=True, null=True)
county_name = models.CharField(default=None, max_length=255, null=True)
polling_location_deleted = models.BooleanField(default=False)
precinct_name = models.CharField(default=None, max_length=255, null=True)
use_for_bulk_retrieve = models.BooleanField(default=False)
zip_long = models.CharField(max_length=255, blank=True, null=True)
class BatchRowActionPosition(models.Model):
"""
The definition of the action for importing one Position.
"""
batch_set_id = models.PositiveIntegerField(verbose_name="unique id of batch set", unique=False, null=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", unique=False, null=False, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
# Fields from Position
position_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True)
# The id for the generated position that this PositionEntered entry influences
ballot_item_display_name = models.CharField(verbose_name="text name for ballot item",
max_length=255, null=True, blank=True)
# We cache the url to an image for the candidate, measure or office for rapid display
ballot_item_image_url_https = models.URLField(
verbose_name='url of https image for candidate, measure or office', max_length=255, blank=True, null=True)
ballot_item_twitter_handle = models.CharField(verbose_name='twitter screen_name for candidate, measure, or office',
max_length=255, null=True, unique=False)
# What is the organization name, voter name, or public figure name? We cache this here for rapid display
speaker_display_name = models.CharField(
verbose_name="name of the org or person with position", max_length=255, null=True, blank=True, unique=False)
# We cache the url to an image for the org, voter, or public_figure for rapid display
speaker_image_url_https = models.URLField(
verbose_name='url of https image for org or person with position', max_length=255, blank=True, null=True)
speaker_twitter_handle = models.CharField(verbose_name='twitter screen_name for org or person with position',
max_length=255, null=True, unique=False)
date_entered = models.DateTimeField(verbose_name='date entered', null=True, auto_now=True)
# The date the this position last changed
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# The organization this position is for
organization_id = models.BigIntegerField(null=True, blank=True)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the organization", max_length=255, null=True,
blank=True, unique=False)
# The voter expressing the opinion
# Note that for organizations who have friends, the voter_we_vote_id is what we use to link to the friends
# (in the PositionForFriends table).
# Public positions from an organization are shared via organization_we_vote_id (in PositionEntered table), while
# friend's-only positions are shared via voter_we_vote_id.
voter_id = models.BigIntegerField(null=True, blank=True)
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the voter expressing the opinion", max_length=255, null=True,
blank=True, unique=False)
# The unique id of the public figure expressing the opinion. May be null if position is from org or voter
# instead of public figure.
public_figure_we_vote_id = models.CharField(
verbose_name="public figure we vote id", max_length=255, null=True, blank=True, unique=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=255, null=True, blank=False, default=0, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="us state of the ballot item position is for",
max_length=2, null=True, blank=True)
# ### Values from Vote Smart ###
vote_smart_rating_id = models.BigIntegerField(null=True, blank=True, unique=False)
# Usually in one of these two formats 2015, 2014-2015
vote_smart_time_span = models.CharField(
verbose_name="the period in which the organization stated this position", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating = models.CharField(
verbose_name="vote smart value between 0-100", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating_name = models.CharField(max_length=255, null=True, blank=True, unique=False)
# The unique We Vote id of the tweet that is the source of the position
tweet_source_id = models.BigIntegerField(null=True, blank=True)
# This is the office that the position refers to.
# Either contest_measure is filled, contest_office OR candidate, but not all three
contest_office_id = models.BigIntegerField(verbose_name='id of contest_office', null=True, blank=True)
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_office", max_length=255, null=True, blank=True, unique=False)
contest_office_name = models.CharField(verbose_name="name of the office", max_length=255, null=True, blank=True)
race_office_level = models.CharField(verbose_name="race office level", max_length=255, null=True, blank=True)
# This is the candidate/politician that the position refers to.
# Either candidate is filled, contest_office OR contest_measure, but not all three
candidate_campaign_id = models.BigIntegerField(verbose_name='id of candidate', null=True, blank=True)
candidate_campaign_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the candidate", max_length=255, null=True,
blank=True, unique=False)
# The candidate's name as passed over by Google Civic. We save this so we can match to this candidate if an import
# doesn't include a we_vote_id we recognize.
google_civic_candidate_name = models.CharField(verbose_name="candidate name exactly as received from google civic",
max_length=255, null=True, blank=True)
# The measure's title as passed over by Google Civic. We save this so we can match to this measure if an import
# doesn't include a we_vote_id we recognize.
google_civic_measure_title = models.CharField(verbose_name="measure title exactly as received from google civic",
max_length=255, null=True, blank=True)
# Useful for queries based on Politicians -- not the main table we use for ballot display though
politician_id = models.BigIntegerField(verbose_name='', null=True, blank=True)
politician_we_vote_id = models.CharField(
verbose_name="we vote permanent id for politician", max_length=255, null=True,
blank=True, unique=False)
political_party = models.CharField(verbose_name="political party", max_length=255, null=True)
# This is the measure/initiative/proposition that the position refers to.
# Either contest_measure is filled, contest_office OR candidate, but not all three
contest_measure_id = models.BigIntegerField(verbose_name='id of contest_measure', null=True, blank=True)
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_measure", max_length=255, null=True,
blank=True, unique=False)
# Strategic denormalization - this is redundant but will make generating the voter guide easier.
# geo = models.ForeignKey(Geo, null=True, related_name='pos_geo')
# issue = models.ForeignKey(Issue, null=True, blank=True, related_name='')
stance = models.CharField(max_length=15, choices=POSITION_CHOICES, default=NO_STANCE) # supporting/opposing
statement_text = models.TextField(null=True, blank=True, )
statement_html = models.TextField(null=True, blank=True, )
# A link to any location with more information about this position
more_info_url = models.URLField(
blank=True, null=True, max_length=255, verbose_name='url with more info about this position')
# Did this position come from a web scraper?
from_scraper = models.BooleanField(default=False)
# Was this position certified by an official with the organization?
organization_certified = models.BooleanField(default=False)
# Was this position certified by an official We Vote volunteer?
volunteer_certified = models.BooleanField(default=False)
status = models.TextField(verbose_name="batch row action position status", null=True, blank=True, default="")
class BatchRowActionBallotItem(models.Model):
"""
The definition of the action for importing one ballot item.
"""
batch_set_id = models.PositiveIntegerField(
verbose_name="unique id of batch set", unique=False, null=True, db_index=True)
batch_header_id = models.PositiveIntegerField(
verbose_name="unique id of header row", unique=False, null=False, db_index=True)
batch_row_id = models.PositiveIntegerField(
verbose_name="unique id of batch row", null=True, default=None, db_index=True)
kind_of_action = models.CharField(
max_length=40, choices=KIND_OF_ACTION_CHOICES, default=IMPORT_TO_BE_DETERMINED, db_index=True)
ballot_item_id = models.PositiveIntegerField(
verbose_name="ballot item unique id", default=None, null=True, db_index=True)
# Fields from BallotItem
# The unique id of the voter for which this ballot was retrieved
voter_id = models.IntegerField(verbose_name="the voter unique id", default=0, null=False, blank=False)
# The map point for which this ballot was retrieved
polling_location_we_vote_id = models.CharField(
verbose_name="we vote permanent id of the map point", max_length=255, default=None, null=True,
blank=True, unique=False)
# The unique ID of this election. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=20, null=False, db_index=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=False)
state_code = models.CharField(verbose_name="state the ballot item is related to", max_length=2, null=True)
google_ballot_placement = models.BigIntegerField(
verbose_name="the order this item should appear on the ballot", null=True, blank=True, unique=False)
local_ballot_order = models.IntegerField(
verbose_name="locally calculated order this item should appear on the ballot", null=True, blank=True)
# The id for this contest office specific to this server.
contest_office_id = models.PositiveIntegerField(verbose_name="local id for this contest office", default=0,
null=True, blank=True)
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this office", max_length=255, default=None, null=True,
blank=True, unique=False)
# The local database id for this measure, specific to this server.
contest_measure_id = models.PositiveIntegerField(
verbose_name="contest_measure unique id", default=0, null=True, blank=True)
# The internal We Vote id for the ContestMeasure that this campaign taking a stance on
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for this measure", max_length=255, default=None, null=True,
blank=True, unique=False)
# This is a sortable name, either the candidate name or the measure name
ballot_item_display_name = models.CharField(verbose_name="a label we can sort by", max_length=255, null=True,
blank=True)
measure_subtitle = models.TextField(verbose_name="google civic referendum subtitle",
null=True, blank=True, default="")
measure_text = models.TextField(verbose_name="measure text", null=True, blank=True, default="")
measure_url = models.URLField(
verbose_name='url of measure', max_length=255, blank=True, null=True)
yes_vote_description = models.TextField(verbose_name="what a yes vote means", null=True, blank=True, default=None)
no_vote_description = models.TextField(verbose_name="what a no vote means", null=True, blank=True, default=None)
status = models.TextField(verbose_name="batch row action ballot item status", null=True, blank=True, default="")
def create_batch_from_json_wrapper(
file_name, structured_json_list, mapping_dict, kind_of_batch,
google_civic_election_id=0, organization_we_vote_id="", polling_location_we_vote_id="",
batch_set_id=0, state_code=""):
batch_manager = BatchManager()
return batch_manager.create_batch_from_json(
file_name, structured_json_list, mapping_dict, kind_of_batch,
google_civic_election_id, organization_we_vote_id, polling_location_we_vote_id, batch_set_id, state_code)
| wevote/WeVoteServer | import_export_batches/models.py | Python | mit | 356,213 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Attention types.
ATT_LUONG = "luong"
ATT_LUONG_SCALED = "luong_scaled"
ATT_BAHDANAU = "bahdanau"
ATT_BAHDANAU_NORM = "bahdanau_norm"
ATT_TYPES = (ATT_LUONG, ATT_LUONG_SCALED, ATT_BAHDANAU, ATT_BAHDANAU_NORM)
# Encoder types.
ENC_UNI = "uni"
ENC_BI = "bi"
ENC_GNMT = "gnmt"
ENC_TYPES = (ENC_UNI, ENC_BI, ENC_GNMT)
# Decoder types.
DEC_BASIC = "basic"
DEC_ATTENTIVE = "attentive"
DEC_TYPES = (DEC_BASIC, DEC_ATTENTIVE)
# Language model types.
LM_L2R = "left2right"
LM_TYPES = (LM_L2R,)
| google-research/language | language/labs/consistent_zero_shot_nmt/utils/common_utils.py | Python | apache-2.0 | 1,241 |
#
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import dbus
import inspect
import logging
import subscription_manager.injection as inj
log = logging.getLogger('rhsm-app.' + __name__)
class DbusIface(object):
service_name = 'com.redhat.SubscriptionManager'
def __init__(self):
try:
# Only follow names if there is a default main loop
self.has_main_loop = self._get_main_loop() is not None
self.bus = dbus.SystemBus()
validity_obj = self._get_validity_object(self.service_name,
'/EntitlementStatus',
follow_name_owner_changes=self.has_main_loop)
self.validity_iface = dbus.Interface(validity_obj,
dbus_interface='com.redhat.SubscriptionManager.EntitlementStatus')
# Activate methods now that we're connected
# Avoids some messy exception handling if dbus isn't installed
self.update = self._update
except dbus.DBusException, e:
# we can't connect to dbus. it's not running, likely from a minimal
# install. we can't do anything here, so just ignore it.
log.debug("Unable to connect to dbus")
log.exception(e)
def update(self):
pass
def _update(self):
try:
self.validity_iface.update_status(
inj.require(inj.CERT_SORTER).get_status_for_icon(),
ignore_reply=self.has_main_loop)
except dbus.DBusException, e:
# Should be unreachable in the gui
log.debug("Failed to update rhsmd")
log.exception(e)
# RHEL5 doesn't support 'follow_name_owner_changes'
def _get_validity_object(self, *args, **kwargs):
iface_args = inspect.getargspec(self.bus.get_object)[0]
if 'follow_name_owner_changes' not in iface_args and \
'follow_name_owner_changes' in kwargs:
log.debug("installed python-dbus doesn't support 'follow_name_owner_changes'")
del kwargs['follow_name_owner_changes']
return self.bus.get_object(*args, **kwargs)
# RHEL5 doesn't support 'get_default_main_loop'
def _get_main_loop(self):
if not hasattr(dbus, "get_default_main_loop"):
log.debug("installed python-dbus doesn't support 'get_default_main_loop'")
return None
return dbus.get_default_main_loop()
| vritant/subscription-manager | src/subscription_manager/dbus_interface.py | Python | gpl-2.0 | 2,989 |
import time
import json
import geojson
import copy
from geojson import Feature, Point, Polygon, MultiPolygon, GeometryCollection, FeatureCollection
from geojson import MultiPoint, MultiLineString, LineString
from collections import OrderedDict
from django.conf import settings
from django.db import models
from django.db.models import Q
from opencontext_py.libs.rootpath import RootPath
from opencontext_py.libs.languages import Languages
from opencontext_py.libs.isoyears import ISOyears
from opencontext_py.libs.general import LastUpdatedOrderedDict, DCterms
from opencontext_py.libs.globalmaptiles import GlobalMercator
from opencontext_py.apps.entities.uri.models import URImanagement
from opencontext_py.apps.ocitems.ocitem.itemkeys import ItemKeys
from opencontext_py.apps.ocitems.ocitem.caching import ItemGenerationCache
from opencontext_py.apps.ocitems.ocitem.partsjsonld import PartsJsonLD
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.assertions.containment import Containment
from opencontext_py.apps.ocitems.projects.models import Project
from opencontext_py.apps.ocitems.projects.metadata import ProjectMeta
class ItemSpatialTemporal():
""" Methods for adding spatial context, containment, and spatial temporal
data to an Open Context Item JSON-LD object
"""
def __init__(self):
self.project_uuid = None
self.manifest = None
dc_terms_obj = DCterms()
self.DC_META_PREDS = dc_terms_obj.get_dc_terms_list()
self.item_gen_cache = ItemGenerationCache()
rp = RootPath()
self.base_url = rp.get_baseurl()
self.contexts = False
self.linked_contexts = False
self.geo_meta = False #
self.temporal_meta = False
self.event_meta = False
self.contents = False
self.assertion_hashes = False
self.cannonical_uris = True
self.class_uri_list = [] # uris of item classes used in this item
self.parent_context_list = [] # list of parent context labels, used for making a dc-terms:Title
def get_spatial_temporal_context(self):
""" gets the item spatial context """
act_contain = Containment()
if self.manifest.item_type == 'subjects':
# get item geospatial and chronological metadata if subject item
# will do it differently if not a subject item
parents = act_contain.get_parents_by_child_uuid(self.manifest.uuid)
self.contexts = parents
# prepare a list of contexts (including the current item) to check for
# geospatial and event / chronology metadata
subject_list = act_contain.contexts_list
subject_list.insert(0, self.manifest.uuid)
self.geo_meta = act_contain.get_geochron_from_subject_list(subject_list, 'geo')
self.temporal_meta = act_contain.get_geochron_from_subject_list(subject_list, 'temporal')
self.event_meta = act_contain.get_geochron_from_subject_list(subject_list, 'event')
# now get any children items, contained in this "subjects" item
act_contain = Containment()
self.contents = act_contain.get_children_by_parent_uuid(self.manifest.uuid)
else:
parents = act_contain.get_related_context(self.manifest.uuid)
self.contexts = False
self.linked_contexts = parents
if self.manifest.item_type == 'projects':
# get project metadata objects directly
pm = ProjectMeta()
project = self.item_gen_cache.get_project_model_object(self.manifest.uuid)
sub_projects = self.item_gen_cache.get_project_subprojects(self.manifest.uuid)
if project is not None:
if isinstance(project.meta_json, dict):
if Project.META_KEY_GEO_SPECIFICITY in project.meta_json:
# the project has some default geographic specificity noted
# use this value when making geo_meta
pm.project_specificity = project.meta_json[Project.META_KEY_GEO_SPECIFICITY]
self.geo_meta = pm.get_project_geo_from_db(self.manifest.uuid)
if self.geo_meta is False:
# make geospatial metadata for the project, and sub-projects if they exist
pm.make_geo_meta(self.manifest.uuid, sub_projects)
self.geo_meta = pm.geo_objs
act_contain = Containment()
if self.geo_meta is False:
self.geo_meta = act_contain.get_related_geochron(self.manifest.uuid,
self.manifest.item_type,
'geo')
if self.temporal_meta is False:
self.temporal_meta = act_contain.get_related_geochron(self.manifest.uuid,
self.manifest.item_type,
'temporal')
if self.temporal_meta is False:
# now look in the project for temporal metadata
self.temporal_meta = act_contain.get_temporal_from_project(self.manifest.project_uuid)
if self.event_meta is False:
self.event_meta = act_contain.get_related_geochron(self.manifest.uuid,
self.manifest.item_type,
'event')
def add_json_ld_geojson_contexts(self, json_ld):
""" adds context information if present """
act_context = None
if isinstance(self.contexts, dict):
if len(self.contexts) > 0:
# add spatial context, direct parents of a given subject item
context_predicate = ItemKeys.PREDICATES_OCGEN_HASCONTEXTPATH
act_context = self.make_spatial_context_json_ld(self.contexts)
elif isinstance(self.linked_contexts, dict):
if len(self.linked_contexts) > 0:
# add related spatial contexts (related to a linked subject)
context_predicate = ItemKeys.PREDICATES_OCGEN_HASLINKEDCONTEXTPATH
act_context = self.make_spatial_context_json_ld(self.linked_contexts)
# first make the GeoJSON part of the JSON-LD
json_ld = self.add_geojson(json_ld)
# now add the spatial context part (open context specific)
if act_context is not None:
json_ld[context_predicate] = act_context
return json_ld
def add_contents_json_ld(self, json_ld):
""" adds subject items contained in the current item """
if isinstance(self.contents, dict):
if len(self.contents) > 0:
# make a list of all the UUIDs for children items
act_children_uuids = []
for tree_node, children in self.contents.items():
for child_uuid in children:
if child_uuid not in act_children_uuids:
act_children_uuids.append(child_uuid)
# adds child contents, with different treenodes
parts_json_ld = PartsJsonLD()
# get manifest objects for all the children items, for use in making JSON_LD
parts_json_ld.get_manifest_objects_from_uuids(act_children_uuids)
parts_json_ld.class_uri_list += self.class_uri_list
for tree_node, children in self.contents.items():
act_children = LastUpdatedOrderedDict()
act_children['id'] = tree_node
act_children['type'] = 'oc-gen:contents'
for child_uuid in children:
act_children = parts_json_ld.addto_predicate_list(act_children,
ItemKeys.PREDICATES_OCGEN_CONTAINS,
child_uuid,
'subjects')
self.class_uri_list += parts_json_ld.class_uri_list
json_ld[ItemKeys.PREDICATES_OCGEN_HASCONTENTS] = act_children
return json_ld
def add_geojson(self, json_ld):
"""
adds geospatial and event data that links time and space information
"""
uuid = self.manifest.uuid
item_type = self.manifest.item_type
geo_meta = self.geo_meta
event_meta = self.event_meta
features_dict = False # dict of all features to be added
feature_events = False # mappings between features and time periods
if geo_meta is not False:
# print('here!' + str(geo_meta))
features_dict = LastUpdatedOrderedDict()
feature_events = LastUpdatedOrderedDict()
for geo in geo_meta:
geo_id = geo.feature_id
geo_node = '#geo-' + str(geo_id) # the node id for database rec of the feature
geo_node_geom = '#geo-geom-' + str(geo_id)
geo_node_props = '#geo-props-' + str(geo_id)
geo_node_derived = '#geo-derived-' + str(geo_id) # node id for a derived feature
geo_node_derived_geom = '#geo-derived-geom-' + str(geo_id)
geo_node_derived_props = '#geo-derived-props-' + str(geo_id)
feature_events[geo_node] = []
geo_props = LastUpdatedOrderedDict()
geo_props['href'] = URImanagement.make_oc_uri(uuid, item_type, self.cannonical_uris)
geo_props['type'] = geo.meta_type
if len(geo.note) > 0:
geo_props['note'] = geo.note
if uuid != geo.uuid:
geo_props['reference-type'] = 'inferred'
geo_props['reference-uri'] = URImanagement.make_oc_uri(geo.uuid,
'subjects',
self.cannonical_uris)
rel_meta = self.item_gen_cache.get_entity(geo.uuid)
if rel_meta is not False:
geo_props['reference-label'] = rel_meta.label
geo_props['reference-slug'] = rel_meta.slug
else:
geo_props['reference-label'] = self.manifest.label
geo_props['reference-type'] = 'specified'
if self.assertion_hashes:
geo_props['hash_id'] = geo.hash_id
geo_props['feature_id'] = geo.feature_id
if geo.specificity < 0 and self.manifest.item_type != 'projects':
# case where we've got reduced precision geospatial data
# geotile = quadtree.encode(geo.latitude, geo.longitude, abs(geo.specificity))
geo_props['location-precision'] = abs(geo.specificity)
geo_props['location-precision-note'] = 'Location data approximated as a security precaution.'
gmt = GlobalMercator()
geotile = gmt.lat_lon_to_quadtree(geo.latitude, geo.longitude, abs(geo.specificity))
tile_bounds = gmt.quadtree_to_lat_lon(geotile)
item_polygon = Polygon([[(tile_bounds[1], tile_bounds[0]),
(tile_bounds[1], tile_bounds[2]),
(tile_bounds[3], tile_bounds[2]),
(tile_bounds[3], tile_bounds[0]),
(tile_bounds[1], tile_bounds[0])
]])
item_f_poly = Feature(geometry=item_polygon)
item_f_poly.id = geo_node_derived
item_f_poly.geometry.id = geo_node_derived_geom
item_f_poly.properties.update(geo_props)
item_f_poly.properties['location-note'] = 'This region defines the '\
'approximate location for this item.'
item_f_poly.properties['id'] = geo_node_derived_props
features_dict[geo_node_derived] = item_f_poly
item_point = Point((float(geo.longitude), float(geo.latitude)))
item_f_point = Feature(geometry=item_point)
item_f_point.id = geo_node
item_f_point.geometry.id = geo_node_geom
item_f_point.properties.update(geo_props)
item_f_point.properties['location-note'] = 'This point defines the center of the '\
'region approximating the location for this item.'
item_f_point.properties['id'] = geo_node_props
features_dict[geo_node] = item_f_point
elif len(geo.coordinates) > 1:
# here we have geo_json expressed features and geometries to use
if geo.specificity < 0:
geo_props['location-precision-note'] = 'Location data approximated as a security precaution.'
elif geo.specificity > 0:
geo_props['location-precision-note'] = 'Location data has uncertainty.'
else:
geo_props['location-precision-note'] = 'Location data available with no '\
'intentional reduction in precision.'
item_point = Point((float(geo.longitude), float(geo.latitude)))
item_f_point = Feature(geometry=item_point)
item_f_point.properties.update(geo_props)
if uuid == geo.uuid:
#the item itself has the polygon as it's feature
item_db = Point((float(geo.longitude), float(geo.latitude)))
if geo.ftype == 'Polygon':
coord_obj = json.loads(geo.coordinates)
item_db = Polygon(coord_obj)
elif(geo.ftype == 'MultiPolygon'):
coord_obj = json.loads(geo.coordinates)
item_db = MultiPolygon(coord_obj)
elif(geo.ftype == 'MultiLineString'):
coord_obj = json.loads(geo.coordinates)
item_db = MultiLineString(coord_obj)
item_f_db = Feature(geometry=item_db)
item_f_db.id = geo_node
item_f_db.geometry.id = geo_node_geom
item_f_db.properties.update(geo_props)
item_f_db.properties['id'] = geo_node_props
features_dict[geo_node] = item_f_db
item_f_point.id = geo_node_derived
item_f_point.geometry.id = geo_node_derived_geom
item_f_point.properties['location-region-note'] = 'This point represents the center of the '\
'region defining the location of this item.'
item_f_point.properties['id'] = geo_node_derived_props
features_dict[geo_node_derived] = item_f_point
else:
#the item is contained within another item with a polygon or multipolygon feature
item_f_point.id = geo_node
item_f_point.geometry.id = geo_node_geom
item_f_point.properties['id'] = geo_node_props
item_f_point.properties['contained-in-region'] = True
item_f_point.properties['location-region-note'] = 'This point represents the center of the '\
'region containing this item.'
features_dict[geo_node] = item_f_point
else:
# case where the item only has a point for geo-spatial reference
geo_props['location-note'] = 'Location data available with no intentional reduction in precision.'
item_point = Point((float(geo.longitude), float(geo.latitude)))
item_f_point = Feature(geometry=item_point)
item_f_point.id = geo_node
item_f_point.geometry.id = geo_node_geom
item_f_point.properties.update(geo_props)
item_f_point.properties['id'] = geo_node_props
features_dict[geo_node] = item_f_point
if event_meta is not False:
# events provide chrological information, tied to geo features
# sometimes there are more than 1 time period for each geo feature
# in such cases, we duplicate geo features and add the different time event
# information to the new features
for event in event_meta:
rel_feature_num = 1 # default to the first geospatial feature for where the event happened
rel_feature_node = False
if event.feature_id > 0:
rel_feature_num = event.feature_id
if rel_feature_num >= 1:
rel_feature_node = '#geo-' + str(rel_feature_num)
act_event_obj = LastUpdatedOrderedDict()
act_event_obj = self.add_when_json(act_event_obj, uuid, item_type, event)
if rel_feature_node is not False and feature_events is not False:
feature_events[rel_feature_node].append(act_event_obj)
if features_dict is not False :
if feature_events is not False:
for node_key, event_list in feature_events.items():
# update the feature with the first event "when" information
if len(event_list) > 0:
features_dict[node_key].update(event_list[0])
event_i = 1
for event in event_list:
if event_i <= 1:
# add the time info to the feature
old_feature = features_dict[node_key]
old_geo_id = old_feature.geometry['id']
old_prop_id = old_feature.properties['id']
features_dict[node_key].update(event)
else:
act_feature = copy.deepcopy(old_feature)
# now add new node ids for the new features created to for the event
new_node = node_key + '-event-' + str(event_i)
act_feature.id = new_node
act_feature.geometry['id'] = old_geo_id + '-event-' + str(event_i)
act_feature.properties['id'] = old_prop_id + '-event-' + str(event_i)
act_feature.update(event) # add the time info to the new feature
features_dict[new_node] = act_feature
del(act_feature)
event_i += 1
feature_keys = list(features_dict.keys())
if len(feature_keys) < 1:
del features_dict[feature_keys[0]]['id'] # remove the conflicting id
# only 1 feature, so item is not a feature collection
json_ld.update(features_dict[feature_keys[0]])
else:
feature_list = [] # multiple features, so item has a feature collection
for node_key, feature in features_dict.items():
feature_list.append(feature)
item_fc = FeatureCollection(feature_list)
json_ld.update(item_fc)
return json_ld
def add_when_json(self, act_dict, uuid, item_type, event):
"""
adds when (time interval or instant) data
"""
when = LastUpdatedOrderedDict()
when['id'] = '#event-when-' + str(event.event_id)
when['type'] = event.when_type
when['type'] = event.meta_type
if(event.earliest != event.start):
# when['earliest'] = int(event.earliest)
pass
when['start'] = ISOyears().make_iso_from_float(event.start)
when['stop'] = ISOyears().make_iso_from_float(event.stop)
if event.latest != event.stop:
# when['latest'] = int(event.latest)
pass
if event.uuid != uuid:
# we're inheriting / inferring event metadata from a parent context
when['reference-type'] = 'inferred'
when['reference-uri'] = URImanagement.make_oc_uri(event.uuid, 'subjects', self.cannonical_uris)
rel_meta = self.item_gen_cache.get_entity(event.uuid)
if rel_meta is not False:
when['reference-label'] = rel_meta.label
else:
# metadata is specified for this specific item
when['reference-type'] = 'specified'
when['reference-label'] = self.manifest.label
if self.assertion_hashes:
when['hash_id'] = event.hash_id
act_dict['when'] = when
return act_dict
def make_spatial_context_json_ld(self, raw_contexts):
""" adds context information, if present """
#adds parent contents, with different treenodes
first_node = True
act_context = LastUpdatedOrderedDict()
for tree_node, r_parents in raw_contexts.items():
act_context = LastUpdatedOrderedDict()
# change the parent node to context not contents
tree_node = tree_node.replace('contents', 'context')
act_context['id'] = tree_node
act_context['type'] = 'oc-gen:contexts'
# now reverse the list of parent contexts, so top most parent context is first,
# followed by children contexts
parents = r_parents[::-1]
parts_json_ld = PartsJsonLD()
parts_json_ld.class_uri_list += self.class_uri_list
if len(parents) > 3:
# lots of parents, so probably not worth trying to use the cache.
# makes more sense look these all up in the manifest in 1 query
# get manifest objects for all the parent items, for use in making JSON_LD
parts_json_ld.get_manifest_objects_from_uuids(parents)
for parent_uuid in parents:
act_context = parts_json_ld.addto_predicate_list(act_context,
ItemKeys.PREDICATES_OCGEN_HASPATHITEMS,
parent_uuid,
'subjects')
self.class_uri_list += parts_json_ld.class_uri_list
if first_node:
# set aside a list of parent labels to use for making a dc-term:title
first_node = False
if ItemKeys.PREDICATES_OCGEN_HASPATHITEMS in act_context:
for parent_obj in act_context[ItemKeys.PREDICATES_OCGEN_HASPATHITEMS]:
self.parent_context_list.append(parent_obj['label'])
return act_context | ekansa/open-context-py | opencontext_py/apps/ocitems/ocitem/spatialtemporal.py | Python | gpl-3.0 | 24,192 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
from . import reset_triggers
class overload_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit/overload-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines Overload Bit configuration.
"""
__slots__ = (
"_path_helper", "_extmethods", "__config", "__state", "__reset_triggers"
)
_yang_name = "overload-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
"overload-bit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
YANG Description: This container defines ISIS Overload Bit configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
YANG Description: This container defines state for ISIS Overload Bit.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state for ISIS Overload Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_reset_triggers(self):
"""
Getter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
return self.__reset_triggers
def _set_reset_triggers(self, v, load=False):
"""
Setter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reset_triggers is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reset_triggers() directly.
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """reset_triggers must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=reset_triggers.reset_triggers, is_container='container', yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__reset_triggers = t
if hasattr(self, "_set"):
self._set()
def _unset_reset_triggers(self):
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
reset_triggers = __builtin__.property(_get_reset_triggers, _set_reset_triggers)
_pyangbind_elements = OrderedDict(
[("config", config), ("state", state), ("reset_triggers", reset_triggers)]
)
from . import config
from . import state
from . import reset_triggers
class overload_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit/overload-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines Overload Bit configuration.
"""
__slots__ = (
"_path_helper", "_extmethods", "__config", "__state", "__reset_triggers"
)
_yang_name = "overload-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
"overload-bit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
YANG Description: This container defines ISIS Overload Bit configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
YANG Description: This container defines state for ISIS Overload Bit.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state for ISIS Overload Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_reset_triggers(self):
"""
Getter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
return self.__reset_triggers
def _set_reset_triggers(self, v, load=False):
"""
Setter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reset_triggers is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reset_triggers() directly.
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """reset_triggers must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=reset_triggers.reset_triggers, is_container='container', yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__reset_triggers = t
if hasattr(self, "_set"):
self._set()
def _unset_reset_triggers(self):
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
reset_triggers = __builtin__.property(_get_reset_triggers, _set_reset_triggers)
_pyangbind_elements = OrderedDict(
[("config", config), ("state", state), ("reset_triggers", reset_triggers)]
)
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/lsp_bit/overload_bit/__init__.py | Python | apache-2.0 | 25,678 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.