repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
mattias-ohlsson/anaconda
tests/pyanaconda_test/vnc_test.py
2
4102
#!/usr/bin/python import mock import os class VncTest(mock.TestCase): def setUp(self): self.setupModules(["_isys", "block", "logging", "ConfigParser"]) self.fs = mock.DiskIO() import pyanaconda pyanaconda.anaconda_log = mock.Mock() self.OK = 22 import pyanaconda.vnc pyanaconda.vnc.log = mock.Mock() pyanaconda.vnc.os = mock.Mock() pyanaconda.vnc.subprocess = mock.Mock() pyanaconda.vnc.subprocess.Popen().communicate.return_value = (1, 2) pyanaconda.vnc.subprocess.Popen().returncode = self.OK pyanaconda.vnc.open = self.fs.open self.ROOT = '/' self.DISPLAY = '2' self.DESKTOP = 'Desktop' self.PASS = '' self.LOG_FILE = '/tmp/vnc.log' self.PW_FILE = '/tmp/vncpassword' self.VNCCONNECTHOST = 'host' def tearDown(self): self.tearDownModules() def set_vnc_password_1_test(self): import pyanaconda.vnc server = pyanaconda.vnc.VncServer() pyanaconda.vnc.iutil = mock.Mock() pyanaconda.vnc.os.pipe.return_value = (1, 2) server.setVNCPassword() self.assertEqual( pyanaconda.vnc.iutil.execWithRedirect.call_args_list, [(('vncpasswd', ['-f']), {'stdin': 1, 'stdout': '/tmp/vncpassword'})]) def initialize_test(self): import pyanaconda.vnc IP = '192.168.0.21' HOSTNAME = 'desktop' dev = mock.Mock() dev.get.return_value = 'eth0' pyanaconda.vnc.network = mock.Mock() pyanaconda.vnc.network.Network().netdevices = [dev] pyanaconda.vnc.network.getActiveNetDevs.return_value = [0] pyanaconda.vnc.network.getDefaultHostname.return_value = HOSTNAME pyanaconda.vnc.isys = mock.Mock() pyanaconda.vnc.isys.getIPAddresses = mock.Mock(return_value=[IP]) server = pyanaconda.vnc.VncServer(display=self.DISPLAY) server.initialize() expected = "%s:%s (%s)" % (HOSTNAME, self.DISPLAY, IP) self.assertEqual(server.connxinfo, expected) def openlogfile_test(self): import pyanaconda.vnc FILE = 'file' pyanaconda.vnc.os.O_RDWR = os.O_RDWR pyanaconda.vnc.os.O_CREAT = os.O_CREAT pyanaconda.vnc.os.open.return_value = FILE server = pyanaconda.vnc.VncServer(log_file=self.LOG_FILE) ret = server.openlogfile() self.assertEqual(ret, FILE) self.assertEqual(pyanaconda.vnc.os.open.call_args, ((self.LOG_FILE, os.O_RDWR | os.O_CREAT), {}) ) def connect_to_view_test(self): import pyanaconda.vnc pyanaconda.vnc.subprocess.Popen().communicate.return_value = (self.OK, '') server = pyanaconda.vnc.VncServer(vncconnecthost=self.VNCCONNECTHOST) ret = server.connectToView() self.assertTrue(ret) params = pyanaconda.vnc.subprocess.Popen.call_args[0][0] self.assertTrue(self.VNCCONNECTHOST in params) self.assertTrue(params[params.index(self.VNCCONNECTHOST)-1] == "-connect") def start_server_test(self): import pyanaconda.vnc pyanaconda.vnc.VncServer.initialize = mock.Mock() pyanaconda.vnc.VncServer.setVNCPassword = mock.Mock() pyanaconda.vnc.VncServer.VNCListen = mock.Mock() pyanaconda.vnc.subprocess.Popen().poll.return_value = None pyanaconda.vnc.os.environ = {} pyanaconda.vnc.time.sleep = mock.Mock() server = pyanaconda.vnc.VncServer(root=self.ROOT, display=self.DISPLAY, desktop=self.DESKTOP, password=self.PASS, vncconnecthost="") server.openlogfile = mock.Mock() server.startServer() params = pyanaconda.vnc.subprocess.Popen.call_args[0][0] self.assertTrue('desktop=%s'%self.DESKTOP in params) self.assertTrue(':%s'%self.DISPLAY in params) self.assertTrue(pyanaconda.vnc.VncServer.VNCListen.called) self.assertTrue("DISPLAY" in pyanaconda.vnc.os.environ) self.assertEqual(pyanaconda.vnc.os.environ['DISPLAY'], ":%s" % self.DISPLAY)
gpl-2.0
SNeuhausen/training_management
models/education_plan/education_plan.py
1
7704
# -*- coding: utf-8 -*- # /############################################################################# # # Stephan Neuhausen. # Copyright (C) 20014-TODAY Stephan Neuhausen iad.de. # # /############################################################################# from datetime import date from openerp import models, fields, api from openerp.addons.training_management.models.model_names import ModelNames from openerp.addons.training_management.models.table_names import TableNames from openerp.addons.training_management.models.selections import ParticipationStateSelection from openerp.addons.training_management.models.selections import ParticipationCompletionStates from openerp.addons.training_management.models.selections import ParticipationJobStates from openerp.addons.training_management.utils.date_utils import DateUtils class EducationPlan(models.Model): _name = ModelNames.EDUCATION_PLAN name = fields.Char(size=128, string='Titel', copy=False, required=True) measure_id = fields.Many2one( comodel_name=ModelNames.MEASURE, string='Maßnahme', required=True, copy=True, ) partner_id = fields.Many2one( comodel_name=ModelNames.PARTNER, required=True, domain=[('is_participant', '=', True)], string='Teilnehmer', copy=True, ) status = fields.Selection( selection=ParticipationStateSelection.get_items(), default=ParticipationStateSelection.INTERESTED, string="Status", copy=False, ) is_active = fields.Boolean(string='Ist aktive Planung', default=False, copy=False) customer_number = fields.Char(size=128, string='Kunden-Nr.', copy=True) sponsor_id = fields.Many2one( comodel_name=ModelNames.PARTNER, domain=[('is_sponsor', '=', True), ('is_company', '=', True)], string='Kostenträger', copy=True, ) sponsor_contact_id = fields.Many2one( comodel_name=ModelNames.PARTNER, domain=[('is_sponsor', '=', True), ('is_company', '=', False)], string='Ansprechpartner', copy=True, ) completion_status = fields.Selection( selection=ParticipationCompletionStates.get_items(), string='Beendigungsgrund', copy=False, ) completion_comment = fields.Text(string='Anmerkung', copy=False) job_status = fields.Selection( selection=ParticipationJobStates.get_items(), string='In Arbeit?', copy=False, ) job_checked_date = fields.Date(string='Kontakt vom', copy=False) show_dates = fields.Boolean(string='keine Modultermine zeigen?') show_lectures = fields.Boolean(string='nur Fachinhalte zeigen?') teaching_type_id = fields.Many2one( comodel_name=ModelNames.TEACHING_TYPE, default=lambda self: self._default__teaching_type_id(), string='Typ', copy=False, ) tag_ids = fields.Many2many( comodel_name=ModelNames.TAG, relation=TableNames.EDUCATION_PLAN__TAG, column1="education_plan_id", column2="tag_id", string='Tags', copy=True, ) planning_date = fields.Date( string='Planungsdatum', required=True, default=lambda self: fields.Date.today(), copy=False, ) start_date = fields.Date(string='Startdatum', copy=True) end_date = fields.Date(string='Enddatum', copy=True) school_day_count = fields.Integer( string="Anzahl Unterrichtstage", compute="_compute__school_day_count", readonly=True, store=True, copy=False, ) last_presence_date = fields.Date(string='Letzter Anwesenheitstag', copy=False) plan_comment = fields.Text(string='Anmerkung', copy=False) course_plan_ids = fields.One2many( comodel_name=ModelNames.COURSE_PLAN, inverse_name="education_plan_id", string='Kursplanungen', copy=True, ) concrete_degree_ids = fields.One2many( comodel_name=ModelNames.CONCRETE_DEGREE, inverse_name="education_plan_id", string=u"Abschlüsse", copy=False, ) enable_teaching_filter = fields.Boolean(default=True, string=u"Auswahl der Lehrinhalte filtern") def _default__teaching_type_id(self): return self.env[ModelNames.TEACHING_TYPE].get_course_type() @api.depends( 'course_plan_ids.start_date', 'course_plan_ids.end_date', 'measure_id.special_day_ids.type', 'measure_id.special_day_ids.date' ) def _compute__school_day_count(self): for plan in self: school_days = plan.compute_school_days() plan.school_day_count = len(school_days) def compute_school_days(self): self.ensure_one() school_days = set() for course_plan in self.course_plan_ids: school_days |= course_plan.compute_school_days() return school_days @api.multi def action_button__toggle_active_state(self): for plan in self: plan.is_active = not plan.is_active @api.multi def action_button__compute_dates_from_course_plans(self): convert = DateUtils.convert_to_date new_start_date = convert(self.start_date, date.max) new_end_date = convert(self.end_date, date.min) for course_plan in self.course_plan_ids: new_start_date = min(new_start_date, convert(course_plan.start_date, date.max)) new_end_date = max(new_end_date, convert(course_plan.end_date, date.min)) if new_start_date == date.max: new_start_date = False if new_end_date == date.min: new_end_date = False if not self.last_presence_date: self.last_presence_date = new_end_date self.start_date = new_start_date self.end_date = new_end_date @api.onchange('last_presence_date', 'start_date', 'end_date') def _onchange__change_status_to_completed(self): convert = DateUtils.convert_to_date last_presence_date = convert(self.last_presence_date) start_date = convert(self.start_date) end_date = convert(self.end_date) if not all([last_presence_date, start_date, end_date]): return if start_date <= last_presence_date <= end_date: self.status = ParticipationStateSelection.ALUMNI @api.model def create(self, vals): plan = super(EducationPlan, self).create(vals) if plan.is_active: plan.deactivate_other_plans() return plan @api.multi def write(self, vals): previous_active_states = dict((plan, plan.is_active) for plan in self) result = super(EducationPlan, self).write(vals) for plan in self: is_active_now = plan.is_active was_active_before = previous_active_states[plan] if not was_active_before and is_active_now: plan.deactivate_other_plans() return result def deactivate_other_plans(self): self.ensure_one() other_plans = self.get_other_plans() for other_plan in other_plans: if other_plan.is_active: other_plan.is_active = False def get_other_plans(self): self.ensure_one() participant = self.partner_id if participant: other_plans = participant.education_plan_ids - self return other_plans else: return self.browse() @api.multi def copy(self, default=None): self.ensure_one() default = default or {} default["name"] = u"{name} (Kopie)".format(name=self.name) return super(EducationPlan, self).copy(default=default)
gpl-3.0
jezdez/kuma
vendor/packages/translate/convert/po2wordfast.py
25
4112
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2005-2007 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Convert Gettext PO localization files to a Wordfast translation memory file. See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/po2wordfast.html for examples and usage instructions. """ import os from translate.convert import convert from translate.misc import wStringIO from translate.storage import po, wordfast class po2wordfast: def convertfiles(self, inputfile, wffile, sourcelanguage='en', targetlanguage=None): """converts a .po file (possibly many) to a Wordfast TM file""" inputstore = po.pofile(inputfile) for inunit in inputstore.units: if inunit.isheader() or inunit.isblank() or not inunit.istranslated(): continue source = inunit.source target = inunit.target newunit = wffile.addsourceunit(source) newunit.target = target newunit.targetlang = targetlanguage def convertpo(inputfile, outputfile, templatefile, sourcelanguage='en', targetlanguage=None): """reads in stdin using fromfileclass, converts using convertorclass, writes to stdout""" convertor = po2wordfast() outputfile.wffile.header.targetlang = targetlanguage convertor.convertfiles(inputfile, outputfile.wffile, sourcelanguage, targetlanguage) return 1 class wfmultifile: def __init__(self, filename, mode=None): """initialises wfmultifile from a seekable inputfile or writable outputfile""" self.filename = filename if mode is None: if os.path.exists(filename): mode = 'r' else: mode = 'w' self.mode = mode self.multifilename = os.path.splitext(filename)[0] self.wffile = wordfast.WordfastTMFile() def openoutputfile(self, subfile): """returns a pseudo-file object for the given subfile""" def onclose(contents): pass outputfile = wStringIO.CatchStringOutput(onclose) outputfile.filename = subfile outputfile.wffile = self.wffile return outputfile class WfOptionParser(convert.ArchiveConvertOptionParser): def recursiveprocess(self, options): if not options.targetlanguage: raise ValueError("You must specify the target language") super(WfOptionParser, self).recursiveprocess(options) self.output = open(options.output, 'w') #options.outputarchive.wffile.setsourcelanguage(options.sourcelanguage) self.output.write(str(options.outputarchive.wffile)) def main(argv=None): formats = {"po": ("txt", convertpo), ("po", "txt"): ("txt", convertpo)} archiveformats = {(None, "output"): wfmultifile, (None, "template"): wfmultifile} parser = WfOptionParser(formats, usepots=False, usetemplates=False, description=__doc__, archiveformats=archiveformats) parser.add_option("-l", "--language", dest="targetlanguage", default=None, help="set target language code (e.g. af-ZA) [required]", metavar="LANG") parser.add_option("", "--source-language", dest="sourcelanguage", default='en', help="set source language code (default: en)", metavar="LANG") parser.passthrough.append("sourcelanguage") parser.passthrough.append("targetlanguage") parser.run(argv) if __name__ == '__main__': main()
mpl-2.0
dongjoon-hyun/spark
python/pyspark/ml/__init__.py
25
1530
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ DataFrame-based machine learning APIs to let users quickly assemble and configure practical machine learning pipelines. """ from pyspark.ml.base import Estimator, Model, Predictor, PredictionModel, \ Transformer, UnaryTransformer from pyspark.ml.pipeline import Pipeline, PipelineModel from pyspark.ml import classification, clustering, evaluation, feature, fpm, \ image, recommendation, regression, stat, tuning, util, linalg, param __all__ = [ "Transformer", "UnaryTransformer", "Estimator", "Model", "Predictor", "PredictionModel", "Pipeline", "PipelineModel", "classification", "clustering", "evaluation", "feature", "fpm", "image", "recommendation", "regression", "stat", "tuning", "util", "linalg", "param", ]
apache-2.0
waidyanatha/sambro-eden
private/templates/EUROSHA/config.py
2
19304
# -*- coding: utf-8 -*- try: # Python 2.7 from collections import OrderedDict except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import current from gluon.storage import Storage from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox settings = current.deployment_settings T = current.T """ Template settings for EUROSHA: European Open Source Humanitarian Aid """ # Pre-Populate settings.base.prepopulate = ["EUROSHA"] settings.base.system_name = T("EUROSHA Humanitarian Data Registry") settings.base.system_name_short = T("EUROSHA") # Theme (folder to use for views/layout.html) settings.base.theme = "EUROSHA" # Auth settings # Do new users need to verify their email address? settings.auth.registration_requires_verification = True # Do new users need to be approved by an administrator prior to being able to login? settings.auth.registration_requires_approval = True # Uncomment this to request the Organisation when a user registers settings.auth.registration_requests_organisation = True settings.auth.role_modules = OrderedDict([ ("transport", "Airports and Seaports"), ("hms", "Hospitals"), ("org", "Organizations, Offices, and Facilities"), ("inv", "Warehouses"), ("staff", "Staff"), ("vol", "Volunteers"), ("project", "Projects"), #("asset", "Assets"), #("vehicle", "Vehicles"), ]) # L10n settings settings.L10n.languages = OrderedDict([ ("en", "English"), ("fr", "French"), ]) # Default timezone for users settings.L10n.utc_offset = "UTC +0100" # Number formats (defaults to ISO 31-0) # Decimal separator for numbers (defaults to ,) settings.L10n.decimal_separator = "." # Thousands separator for numbers (defaults to space) settings.L10n.thousands_separator = "," # Finance settings settings.fin.currencies = { "EUR" : T("Euros"), "GBP" : T("Great British Pounds"), "USD" : T("United States Dollars"), } # Security Policy settings.security.policy = 8 # Delegations settings.security.map = True # Realm Entity (old) #def eurosha_realm_entity(table, row): # user = current.auth.user # if user is not None: # return current.s3db.pr_get_pe_id("org_organisation", # user.organisation_id) # else: # return None #settings.auth.realm_entity = eurosha_realm_entity def eurosha_realm_entity(table, row): """ Assign a Realm Entity to records """ tablename = table._tablename # Do not apply realms for Master Data # @ToDo: Restore Realms and add a role/functionality support for Master Data if tablename in [#"hrm_certificate", "hrm_department", "hrm_job_title", "hrm_course", "hrm_programme", ]: return None db = current.db s3db = current.s3db # Entity reference fields EID = "pe_id" #OID = "organisation_id" SID = "site_id" #GID = "group_id" PID = "person_id" # Owner Entity Foreign Key realm_entity_fks = dict(pr_contact = EID, pr_physical_description = EID, pr_address = EID, pr_image = EID, pr_identity = PID, pr_education = PID, pr_note = PID, hrm_human_resource = SID, inv_recv = SID, inv_recv_item = "req_id", inv_send = SID, inv_track_item = "track_org_id", inv_adj_item = "adj_id", req_req_item = "req_id" ) # Default Foreign Keys (ordered by priority) default_fks = ["catalog_id", "project_id", "project_location_id" ] # Link Tables realm_entity_link_table = dict( project_task = Storage(tablename = "project_task_project", link_key = "task_id" ) ) if tablename in realm_entity_link_table: # Replace row with the record from the link table link_table = realm_entity_link_table[tablename] table = s3db[link_table.tablename] rows = db(table[link_table.link_key] == row.id).select(table.id, limitby=(0, 1)) if rows: # Update not Create row = rows.first() # Check if there is a FK to inherit the realm_entity realm_entity = 0 fk = realm_entity_fks.get(tablename, None) for default_fk in [fk] + default_fks: if default_fk in table.fields: fk = default_fk # Inherit realm_entity from parent record if fk == EID: ftable = s3db.pr_person query = ftable[EID] == row[EID] else: ftablename = table[fk].type[10:] # reference tablename ftable = s3db[ftablename] query = (table.id == row.id) & \ (table[fk] == ftable.id) record = db(query).select(ftable.realm_entity, limitby=(0, 1)).first() if record: realm_entity = record.realm_entity break #else: # Continue to loop through the rest of the default_fks # Fall back to default get_realm_entity function # EUROSHA should never use User organsiation (since volunteers editing on behalf of other Orgs) #use_user_organisation = False ## Suppliers & Partners are owned by the user's organisation #if realm_entity == 0 and tablename == "org_organisation": # ott = s3db.org_organisation_type # row = table[row.id] # row = db(table.organisation_type_id == ott.id).select(ott.name, # limitby=(0, 1) # ).first() # # if row and row.name != "Red Cross / Red Crescent": # use_user_organisation = True ## Groups are owned by the user's organisation #elif tablename in ["pr_group"]: # use_user_organisation = True #user = current.auth.user #if use_user_organisation and user: # # @ToDo - this might cause issues if the user's org is different from the realm that gave them permissions to create the Org # realm_entity = s3db.pr_get_pe_id("org_organisation", # user.organisation_id) return realm_entity settings.auth.realm_entity = eurosha_realm_entity # Set this if there will be multiple areas in which work is being done, # and a menu to select among them is wanted. settings.gis.menu = "Country" # PoIs to export in KML/OSM feeds from Admin locations settings.gis.poi_resources = ["cr_shelter", "hms_hospital", "org_office", "transport_airport", "transport_seaport" ] # Enable this for a UN-style deployment settings.ui.cluster = True settings.frontpage.rss = [ {"title": "Blog", "url": "http://eurosha-volunteers-blog.org/feed/" } ] # Organisation Management # Uncomment to add summary fields for Organisations/Offices for # National/International staff settings.org.summary = True # HRM # Uncomment to allow HRs to have multiple Job Titles settings.hrm.multiple_job_titles = True # Uncomment to disable Staff experience settings.hrm.staff_experience = False # Uncomment to disable Volunteer experience settings.hrm.vol_experience = False # Uncomment to disable the use of HR Certificates settings.hrm.use_certificates = False # Uncomment to disable the use of HR Credentials settings.hrm.use_credentials = False # Uncomment to disable the use of HR Description settings.hrm.use_description = False # Uncomment to disable the use of HR ID settings.hrm.use_id = False # Uncomment to disable the use of HR Skills settings.hrm.use_skills = False # Uncomment to disable the use of HR Trainings settings.hrm.use_trainings = False # Projects # Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR) settings.project.mode_3w = True # Uncomment this to use Codes for projects settings.project.codes = True # Uncomment this to call project locations 'Communities' #settings.project.community = True # Uncomment this to use multiple Budgets per project settings.project.multiple_budgets = True # Uncomment this to use multiple Organisations per project settings.project.multiple_organisations = True # Uncomment this to customise #settings.project.organisation_roles = { # 1: T("Host National Society"), # 2: T("Partner National Society"), # 3: T("Donor"), # #4: T("Customer"), # T("Beneficiary")? # 5: T("Partner") #} # ----------------------------------------------------------------------------- def customize_org_organisation(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r) else: result = True if r.interactive or r.representation.lower() == "aadata": s3db = current.s3db list_fields = ["id", "name", "acronym", "organisation_type_id", (T("Clusters"), "sector.name"), "country", "website" ] s3db.configure("org_organisation", list_fields=list_fields) if r.interactive: crud_form = S3SQLCustomForm( "name", "acronym", "organisation_type_id", "region", "country", S3SQLInlineComponentCheckbox( "sector", label = T("Clusters"), field = "sector_id", cols = 3, ), "phone", "website", "year", "logo", "comments", ) s3db.configure("org_organisation", crud_form=crud_form) return result s3.prep = custom_prep return attr settings.ui.customize_org_organisation = customize_org_organisation # ----------------------------------------------------------------------------- settings.ui.crud_form_project_project = S3SQLCustomForm( "organisation_id", "name", "code", "description", "status_id", "start_date", "end_date", #S3SQLInlineComponentCheckbox( # "hazard", # label = T("Hazards"), # field = "hazard_id", # cols = 4, #), S3SQLInlineComponentCheckbox( "sector", label = T("Sectors"), field = "sector_id", cols = 4, ), #S3SQLInlineComponent( # "location", # label = T("Locations"), # fields = ["location_id"], #), S3SQLInlineComponentCheckbox( "theme", label = T("Themes"), field = "theme_id", cols = 4, # Filter Theme by Sector # filter = {"linktable": "project_theme_sector", # "lkey": "theme_id", # "rkey": "sector_id", # }, # script = ''' #S3OptionsFilter({ # 'triggerName':'defaultsector-sector_id', # 'targetName':'defaulttheme-theme_id', # 'targetWidget':'defaulttheme-theme_id_widget', # 'lookupResource':'theme', # 'lookupURL':S3.Ap.concat('/project/theme_sector_widget?sector_ids='), # 'getWidgetHTML':true, # 'showEmptyField':false #})''' ), #"drr.hfa", "objectives", "human_resource_id", # Partner Orgs #S3SQLInlineComponent( # "organisation", # name = "partner", # label = T("Partner Organizations"), # fields = ["organisation_id", # "comments", # ], # filterby = dict(field = "role", # options = "2" # ) #), # Donors #S3SQLInlineComponent( # "organisation", # name = "donor", # label = T("Donor(s)"), # fields = ["organisation_id", # "amount", # "currency"], # filterby = dict(field = "role", # options = "3" # ) #), #"budget", #"currency", "comments", ) settings.ui.crud_form_project_location = S3SQLCustomForm( "project_id", "location_id", # @ToDo: Grouped Checkboxes S3SQLInlineComponentCheckbox( "activity_type", label = T("Activity Types"), field = "activity_type_id", cols = 3, # Filter Activity Type by Sector #filter = {"linktable": "project_activity_type_sector", # "lkey": "activity_type_id", # "rkey": "sector_id", # "lookuptable": "project_project", # "lookupkey": "project_id", # }, ), "comments", ) # Comment/uncomment modules here to disable/enable them settings.modules = OrderedDict([ # Core modules which shouldn't be disabled ("default", Storage( name_nice = T("Home"), restricted = False, # Use ACLs to control access to this module access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller module_type = None # This item is not shown in the menu )), ("admin", Storage( name_nice = T("Administration"), #description = "Site Administration", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu & access the controller module_type = None # This item is handled separately for the menu )), ("appadmin", Storage( name_nice = T("Administration"), #description = "Site Administration", restricted = True, module_type = None # No Menu )), ("errors", Storage( name_nice = T("Ticket Viewer"), #description = "Needed for Breadcrumbs", restricted = False, module_type = None # No Menu )), ("sync", Storage( name_nice = T("Synchronization"), #description = "Synchronization", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu & access the controller module_type = None # This item is handled separately for the menu )), ("translate", Storage( name_nice = T("Translation Functionality"), #description = "Selective translation of strings based on module.", module_type = None, )), ("gis", Storage( name_nice = T("Map"), #description = "Situation Awareness & Geospatial Analysis", restricted = True, module_type = 1, )), ("pr", Storage( name_nice = T("Person Registry"), #description = "Central point to record details on People", restricted = True, access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still) module_type = None )), ("org", Storage( name_nice = T("Organizations"), #description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities', restricted = True, module_type = 2 )), # All modules below here should be possible to disable safely ("hrm", Storage( name_nice = T("Staff"), #description = "Human Resources Management", restricted = True, module_type = None, )), ("cms", Storage( name_nice = T("Content Management"), #description = "Content Management System", restricted = True, module_type = 10, )), ("doc", Storage( name_nice = T("Documents"), #description = "A library of digital resources, such as photos, documents and reports", restricted = True, module_type = 10, )), ("msg", Storage( name_nice = T("Messaging"), #description = "Sends & Receives Alerts via Email & SMS", restricted = True, # The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules. module_type = None, )), ("supply", Storage( name_nice = T("Supply Chain Management"), #description = "Used within Inventory Management, Request Management and Asset Management", restricted = True, module_type = None, # Not displayed )), ("inv", Storage( name_nice = T("Warehouses"), #description = "Receiving and Sending Items", restricted = True, module_type = 4 )), #("asset", Storage( # name_nice = T("Assets"), # #description = "Recording and Assigning Assets", # restricted = True, # module_type = 5, # )), # Vehicle depends on Assets #("vehicle", Storage( # name_nice = T("Vehicles"), # #description = "Manage Vehicles", # restricted = True, # module_type = 6, # )), ("project", Storage( name_nice = T("Projects"), #description = "Tracking of Projects, Activities and Tasks", restricted = True, module_type = 7 )), ("cr", Storage( name_nice = T("Shelters"), #description = "Tracks the location, capacity and breakdown of victims in Shelters", restricted = True, module_type = 10 )), ("hms", Storage( name_nice = T("Hospitals"), #description = "Helps to monitor status of hospitals", restricted = True, module_type = 3 )), ("transport", Storage( name_nice = T("Transport"), restricted = True, module_type = 10, )), ("stats", Storage( name_nice = "Stats", #description = "Needed for Project Benficiaries", restricted = True, module_type = None )), ])
mit
TaintTrap/platform_external_chromium
net/tools/testserver/xmppserver.py
67
16907
#!/usr/bin/python2.4 # Copyright (c) 2010 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A bare-bones and non-compliant XMPP server. Just enough of the protocol is implemented to get it to work with Chrome's sync notification system. """ import asynchat import asyncore import base64 import re import socket from xml.dom import minidom # pychecker complains about the use of fileno(), which is implemented # by asyncore by forwarding to an internal object via __getattr__. __pychecker__ = 'no-classattr' class Error(Exception): """Error class for this module.""" pass class UnexpectedXml(Error): """Raised when an unexpected XML element has been encountered.""" def __init__(self, xml_element): xml_text = xml_element.toxml() Error.__init__(self, 'Unexpected XML element', xml_text) def ParseXml(xml_string): """Parses the given string as XML and returns a minidom element object. """ dom = minidom.parseString(xml_string) # minidom handles xmlns specially, but there's a bug where it sets # the attribute value to None, which causes toxml() or toprettyxml() # to break. def FixMinidomXmlnsBug(xml_element): if xml_element.getAttribute('xmlns') is None: xml_element.setAttribute('xmlns', '') def ApplyToAllDescendantElements(xml_element, fn): fn(xml_element) for node in xml_element.childNodes: if node.nodeType == node.ELEMENT_NODE: ApplyToAllDescendantElements(node, fn) root = dom.documentElement ApplyToAllDescendantElements(root, FixMinidomXmlnsBug) return root def CloneXml(xml): """Returns a deep copy of the given XML element. Args: xml: The XML element, which should be something returned from ParseXml() (i.e., a root element). """ return xml.ownerDocument.cloneNode(True).documentElement class StanzaParser(object): """A hacky incremental XML parser. StanzaParser consumes data incrementally via FeedString() and feeds its delegate complete parsed stanzas (i.e., XML documents) via FeedStanza(). Any stanzas passed to FeedStanza() are unlinked after the callback is done. Use like so: class MyClass(object): ... def __init__(self, ...): ... self._parser = StanzaParser(self) ... def SomeFunction(self, ...): ... self._parser.FeedString(some_data) ... def FeedStanza(self, stanza): ... print stanza.toprettyxml() ... """ # NOTE(akalin): The following regexps are naive, but necessary since # none of the existing Python 2.4/2.5 XML libraries support # incremental parsing. This works well enough for our purposes. # # The regexps below assume that any present XML element starts at # the beginning of the string, but there may be trailing whitespace. # Matches an opening stream tag (e.g., '<stream:stream foo="bar">') # (assumes that the stream XML namespace is defined in the tag). _stream_re = re.compile(r'^(<stream:stream [^>]*>)\s*') # Matches an empty element tag (e.g., '<foo bar="baz"/>'). _empty_element_re = re.compile(r'^(<[^>]*/>)\s*') # Matches a non-empty element (e.g., '<foo bar="baz">quux</foo>'). # Does *not* handle nested elements. _non_empty_element_re = re.compile(r'^(<([^ >]*)[^>]*>.*?</\2>)\s*') # The closing tag for a stream tag. We have to insert this # ourselves since all XML stanzas are children of the stream tag, # which is never closed until the connection is closed. _stream_suffix = '</stream:stream>' def __init__(self, delegate): self._buffer = '' self._delegate = delegate def FeedString(self, data): """Consumes the given string data, possibly feeding one or more stanzas to the delegate. """ self._buffer += data while (self._ProcessBuffer(self._stream_re, self._stream_suffix) or self._ProcessBuffer(self._empty_element_re) or self._ProcessBuffer(self._non_empty_element_re)): pass def _ProcessBuffer(self, regexp, xml_suffix=''): """If the buffer matches the given regexp, removes the match from the buffer, appends the given suffix, parses it, and feeds it to the delegate. Returns: Whether or not the buffer matched the given regexp. """ results = regexp.match(self._buffer) if not results: return False xml_text = self._buffer[:results.end()] + xml_suffix self._buffer = self._buffer[results.end():] stanza = ParseXml(xml_text) self._delegate.FeedStanza(stanza) # Needed because stanza may have cycles. stanza.unlink() return True class Jid(object): """Simple struct for an XMPP jid (essentially an e-mail address with an optional resource string). """ def __init__(self, username, domain, resource=''): self.username = username self.domain = domain self.resource = resource def __str__(self): jid_str = "%s@%s" % (self.username, self.domain) if self.resource: jid_str += '/' + self.resource return jid_str def GetBareJid(self): return Jid(self.username, self.domain) class IdGenerator(object): """Simple class to generate unique IDs for XMPP messages.""" def __init__(self, prefix): self._prefix = prefix self._id = 0 def GetNextId(self): next_id = "%s.%s" % (self._prefix, self._id) self._id += 1 return next_id class HandshakeTask(object): """Class to handle the initial handshake with a connected XMPP client. """ # The handshake states in order. (_INITIAL_STREAM_NEEDED, _AUTH_NEEDED, _AUTH_STREAM_NEEDED, _BIND_NEEDED, _SESSION_NEEDED, _FINISHED) = range(6) # Used when in the _INITIAL_STREAM_NEEDED and _AUTH_STREAM_NEEDED # states. Not an XML object as it's only the opening tag. # # The from and id attributes are filled in later. _STREAM_DATA = ( '<stream:stream from="%s" id="%s" ' 'version="1.0" xmlns:stream="http://etherx.jabber.org/streams" ' 'xmlns="jabber:client">') # Used when in the _INITIAL_STREAM_NEEDED state. _AUTH_STANZA = ParseXml( '<stream:features xmlns:stream="http://etherx.jabber.org/streams">' ' <mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">' ' <mechanism>PLAIN</mechanism>' ' <mechanism>X-GOOGLE-TOKEN</mechanism>' ' </mechanisms>' '</stream:features>') # Used when in the _AUTH_NEEDED state. _AUTH_SUCCESS_STANZA = ParseXml( '<success xmlns="urn:ietf:params:xml:ns:xmpp-sasl"/>') # Used when in the _AUTH_STREAM_NEEDED state. _BIND_STANZA = ParseXml( '<stream:features xmlns:stream="http://etherx.jabber.org/streams">' ' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind"/>' ' <session xmlns="urn:ietf:params:xml:ns:xmpp-session"/>' '</stream:features>') # Used when in the _BIND_NEEDED state. # # The id and jid attributes are filled in later. _BIND_RESULT_STANZA = ParseXml( '<iq id="" type="result">' ' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">' ' <jid/>' ' </bind>' '</iq>') # Used when in the _SESSION_NEEDED state. # # The id attribute is filled in later. _IQ_RESPONSE_STANZA = ParseXml('<iq id="" type="result"/>') def __init__(self, connection, resource_prefix): self._connection = connection self._id_generator = IdGenerator(resource_prefix) self._username = '' self._domain = '' self._jid = None self._resource_prefix = resource_prefix self._state = self._INITIAL_STREAM_NEEDED def FeedStanza(self, stanza): """Inspects the given stanza and changes the handshake state if needed. Called when a stanza is received from the client. Inspects the stanza to make sure it has the expected attributes given the current state, advances the state if needed, and sends a reply to the client if needed. """ def ExpectStanza(stanza, name): if stanza.tagName != name: raise UnexpectedXml(stanza) def ExpectIq(stanza, type, name): ExpectStanza(stanza, 'iq') if (stanza.getAttribute('type') != type or stanza.firstChild.tagName != name): raise UnexpectedXml(stanza) def GetStanzaId(stanza): return stanza.getAttribute('id') def HandleStream(stanza): ExpectStanza(stanza, 'stream:stream') domain = stanza.getAttribute('to') if domain: self._domain = domain SendStreamData() def SendStreamData(): next_id = self._id_generator.GetNextId() stream_data = self._STREAM_DATA % (self._domain, next_id) self._connection.SendData(stream_data) def GetUserDomain(stanza): encoded_username_password = stanza.firstChild.data username_password = base64.b64decode(encoded_username_password) (_, username_domain, _) = username_password.split('\0') # The domain may be omitted. # # If we were using python 2.5, we'd be able to do: # # username, _, domain = username_domain.partition('@') # if not domain: # domain = self._domain at_pos = username_domain.find('@') if at_pos != -1: username = username_domain[:at_pos] domain = username_domain[at_pos+1:] else: username = username_domain domain = self._domain return (username, domain) if self._state == self._INITIAL_STREAM_NEEDED: HandleStream(stanza) self._connection.SendStanza(self._AUTH_STANZA, False) self._state = self._AUTH_NEEDED elif self._state == self._AUTH_NEEDED: ExpectStanza(stanza, 'auth') (self._username, self._domain) = GetUserDomain(stanza) self._connection.SendStanza(self._AUTH_SUCCESS_STANZA, False) self._state = self._AUTH_STREAM_NEEDED elif self._state == self._AUTH_STREAM_NEEDED: HandleStream(stanza) self._connection.SendStanza(self._BIND_STANZA, False) self._state = self._BIND_NEEDED elif self._state == self._BIND_NEEDED: ExpectIq(stanza, 'set', 'bind') stanza_id = GetStanzaId(stanza) resource_element = stanza.getElementsByTagName('resource')[0] resource = resource_element.firstChild.data full_resource = '%s.%s' % (self._resource_prefix, resource) response = CloneXml(self._BIND_RESULT_STANZA) response.setAttribute('id', stanza_id) self._jid = Jid(self._username, self._domain, full_resource) jid_text = response.parentNode.createTextNode(str(self._jid)) response.getElementsByTagName('jid')[0].appendChild(jid_text) self._connection.SendStanza(response) self._state = self._SESSION_NEEDED elif self._state == self._SESSION_NEEDED: ExpectIq(stanza, 'set', 'session') stanza_id = GetStanzaId(stanza) xml = CloneXml(self._IQ_RESPONSE_STANZA) xml.setAttribute('id', stanza_id) self._connection.SendStanza(xml) self._state = self._FINISHED self._connection.HandshakeDone(self._jid) def AddrString(addr): return '%s:%d' % addr class XmppConnection(asynchat.async_chat): """A single XMPP client connection. This class handles the connection to a single XMPP client (via a socket). It does the XMPP handshake and also implements the (old) Google notification protocol. """ # Used for acknowledgements to the client. # # The from and id attributes are filled in later. _IQ_RESPONSE_STANZA = ParseXml('<iq from="" id="" type="result"/>') def __init__(self, sock, socket_map, delegate, addr): """Starts up the xmpp connection. Args: sock: The socket to the client. socket_map: A map from sockets to their owning objects. delegate: The delegate, which is notified when the XMPP handshake is successful, when the connection is closed, and when a notification has to be broadcast. addr: The host/port of the client. """ # We do this because in versions of python < 2.6, # async_chat.__init__ doesn't take a map argument nor pass it to # dispatcher.__init__. We rely on the fact that # async_chat.__init__ calls dispatcher.__init__ as the last thing # it does, and that calling dispatcher.__init__ with socket=None # and map=None is essentially a no-op. asynchat.async_chat.__init__(self) asyncore.dispatcher.__init__(self, sock, socket_map) self.set_terminator(None) self._delegate = delegate self._parser = StanzaParser(self) self._jid = None self._addr = addr addr_str = AddrString(self._addr) self._handshake_task = HandshakeTask(self, addr_str) print 'Starting connection to %s' % self def __str__(self): if self._jid: return str(self._jid) else: return AddrString(self._addr) # async_chat implementation. def collect_incoming_data(self, data): self._parser.FeedString(data) # This is only here to make pychecker happy. def found_terminator(self): asynchat.async_chat.found_terminator(self) def close(self): print "Closing connection to %s" % self self._delegate.OnXmppConnectionClosed(self) asynchat.async_chat.close(self) # Called by self._parser.FeedString(). def FeedStanza(self, stanza): if self._handshake_task: self._handshake_task.FeedStanza(stanza) elif stanza.tagName == 'iq' and stanza.getAttribute('type') == 'result': # Ignore all client acks. pass elif (stanza.firstChild and stanza.firstChild.namespaceURI == 'google:push'): self._HandlePushCommand(stanza) else: raise UnexpectedXml(stanza) # Called by self._handshake_task. def HandshakeDone(self, jid): self._jid = jid self._handshake_task = None self._delegate.OnXmppHandshakeDone(self) print "Handshake done for %s" % self def _HandlePushCommand(self, stanza): if stanza.tagName == 'iq' and stanza.firstChild.tagName == 'subscribe': # Subscription request. self._SendIqResponseStanza(stanza) elif stanza.tagName == 'message' and stanza.firstChild.tagName == 'push': # Send notification request. self._delegate.ForwardNotification(self, stanza) else: raise UnexpectedXml(command_xml) def _SendIqResponseStanza(self, iq): stanza = CloneXml(self._IQ_RESPONSE_STANZA) stanza.setAttribute('from', str(self._jid.GetBareJid())) stanza.setAttribute('id', iq.getAttribute('id')) self.SendStanza(stanza) def SendStanza(self, stanza, unlink=True): """Sends a stanza to the client. Args: stanza: The stanza to send. unlink: Whether to unlink stanza after sending it. (Pass in False if stanza is a constant.) """ self.SendData(stanza.toxml()) if unlink: stanza.unlink() def SendData(self, data): """Sends raw data to the client. """ # We explicitly encode to ascii as that is what the client expects # (some minidom library functions return unicode strings). self.push(data.encode('ascii')) def ForwardNotification(self, notification_stanza): """Forwards a notification to the client.""" notification_stanza.setAttribute('from', str(self._jid.GetBareJid())) notification_stanza.setAttribute('to', str(self._jid)) self.SendStanza(notification_stanza, False) class XmppServer(asyncore.dispatcher): """The main XMPP server class. The XMPP server starts accepting connections on the given address and spawns off XmppConnection objects for each one. Use like so: socket_map = {} xmpp_server = xmppserver.XmppServer(socket_map, ('127.0.0.1', 5222)) asyncore.loop(30.0, False, socket_map) """ def __init__(self, socket_map, addr): asyncore.dispatcher.__init__(self, None, socket_map) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.set_reuse_addr() self.bind(addr) self.listen(5) self._socket_map = socket_map self._connections = set() self._handshake_done_connections = set() def handle_accept(self): (sock, addr) = self.accept() xmpp_connection = XmppConnection(sock, self._socket_map, self, addr) self._connections.add(xmpp_connection) def close(self): # A copy is necessary since calling close on each connection # removes it from self._connections. for connection in self._connections.copy(): connection.close() asyncore.dispatcher.close(self) # XmppConnection delegate methods. def OnXmppHandshakeDone(self, xmpp_connection): self._handshake_done_connections.add(xmpp_connection) def OnXmppConnectionClosed(self, xmpp_connection): self._connections.discard(xmpp_connection) self._handshake_done_connections.discard(xmpp_connection) def ForwardNotification(self, unused_xmpp_connection, notification_stanza): for connection in self._handshake_done_connections: print 'Sending notification to %s' % connection connection.ForwardNotification(notification_stanza)
bsd-3-clause
iandees/all-the-places
locations/spiders/lifetime.py
1
1351
# -*- coding: utf-8 -*- import scrapy import json from locations.items import GeojsonPointItem class LifetimeFitnessSpider(scrapy.Spider): name = "lifetimefitness" allowed_domains = ['lifetime.life'] start_urls = ( 'https://www.lifetime.life/view-all-locations.html', ) def parse(self, response): response.selector.remove_namespaces() city_urls = response.xpath('//a[@class="btn btn-link btn-xs m-b-sm p-x-0 b-x-xs-0 pull-xs-right"]/@href').extract() for path in city_urls: yield scrapy.Request( "https://www.lifetime.life" + path.strip(), callback=self.parse_store, ) def parse_store(self, response): json_data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first() data = json.loads(json_data) properties = { 'name': data['name'], 'ref': data['name'], 'addr_full': data['address']['streetAddress'], 'city': data['address']['addressLocality'], 'state': data['address']['addressRegion'], 'postcode': data['address']['postalCode'], 'phone': data['telephone'], 'website': data['url'], 'lat': data['geo']['latitude'], 'lon': data['geo']['longitude'], } yield GeojsonPointItem(**properties)
mit
GuillaumeGomez/servo
tests/wpt/web-platform-tests/webdriver/support/asserts.py
23
2715
# WebDriver specification ID: dfn-error-response-data errors = { "element click intercepted": 400, "element not selectable": 400, "element not interactable": 400, "insecure certificate": 400, "invalid argument": 400, "invalid cookie domain": 400, "invalid coordinates": 400, "invalid element state": 400, "invalid selector": 400, "invalid session id": 404, "javascript error": 500, "move target out of bounds": 500, "no such alert": 400, "no such cookie": 404, "no such element": 404, "no such frame": 400, "no such window": 400, "script timeout": 408, "session not created": 500, "stale element reference": 400, "timeout": 408, "unable to set cookie": 500, "unable to capture screen": 500, "unexpected alert open": 500, "unknown command": 404, "unknown error": 500, "unknown method": 405, "unsupported operation": 500, } # WebDriver specification ID: dfn-send-an-error # # > When required to send an error, with error code, a remote end must run the # > following steps: # > # > 1. Let http status and name be the error response data for error code. # > 2. Let message be an implementation-defined string containing a # > human-readable description of the reason for the error. # > 3. Let stacktrace be an implementation-defined string containing a stack # > trace report of the active stack frames at the time when the error # > occurred. # > 4. Let data be a new JSON Object initialised with the following properties: # > # > error # > name # > message # > message # > stacktrace # > stacktrace # > # > 5. Send a response with status and data as arguments. def assert_error(response, error_code): """Verify that the provided wdclient.Response instance described a valid error response as defined by `dfn-send-an-error` and the provided error code. :param response: wdclient.Response instance :param error_code: string value of the expected "error code" """ assert response.status == errors[error_code] assert "value" in response.body assert response.body["value"]["error"] == error_code assert isinstance(response.body["value"]["message"], basestring) assert isinstance(response.body["value"]["stacktrace"], basestring) def assert_success(response, value): """Verify that the provided wdclient.Response instance described a valid error response as defined by `dfn-send-an-error` and the provided error code. :param response: wdclient.Response instance :param value: expected value of the response body """ assert response.status == 200 assert response.body["value"] == value
mpl-2.0
eduNEXT/edx-platform
openedx/core/djangoapps/schedules/migrations/0001_initial.py
3
1385
import django.utils.timezone import model_utils.fields from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('student', '0001_squashed_0031_auto_20200317_1122'), ] operations = [ migrations.CreateModel( name='Schedule', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)), ('active', models.BooleanField(default=True, help_text='Indicates if this schedule is actively used')), ('start', models.DateTimeField(help_text='Date this schedule went into effect')), ('upgrade_deadline', models.DateTimeField(help_text='Deadline by which the learner must upgrade to a verified seat', null=True, blank=True)), ('enrollment', models.OneToOneField(to='student.CourseEnrollment', on_delete=models.CASCADE)), ], options={ 'verbose_name': 'Schedule', 'verbose_name_plural': 'Schedules', }, ), ]
agpl-3.0
TheWardoctor/Wardoctors-repo
script.module.exodus/lib/resources/lib/sources/en/watchfree.py
5
8377
# -*- coding: utf-8 -*- ''' Exodus Add-on This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,urllib,urlparse,base64 from resources.lib.modules import cleantitle from resources.lib.modules import client from resources.lib.modules import proxy class source: def __init__(self): self.priority = 1 self.language = ['en'] self.domains = ['watchfree.to','watchfree.unblockall.org'] self.base_link = 'http://watchfree.unblockall.org' self.moviesearch_link = '/?keyword=%s&search_section=1' self.tvsearch_link = '/?keyword=%s&search_section=2' def movie(self, imdb, title, localtitle, aliases, year): try: query = self.moviesearch_link % urllib.quote_plus(cleantitle.query(title)) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'free movies')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'free movies')) result = client.parseDOM(result, 'div', attrs = {'class': 'item'}) title = 'watch' + cleantitle.get(title) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [i[0] for i in r if title == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = proxy.request(urlparse.urljoin(self.base_link, i), 'free movies') r = re.findall('(tt\d+)', r) if imdb in r: url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): try: query = self.tvsearch_link % urllib.quote_plus(cleantitle.query(tvshowtitle)) query = urlparse.urljoin(self.base_link, query) result = str(proxy.request(query, 'free movies')) if 'page=2' in result or 'page%3D2' in result: result += str(proxy.request(query + '&page=2', 'free movies')) result = client.parseDOM(result, 'div', attrs = {'class': 'item'}) tvshowtitle = 'watch' + cleantitle.get(tvshowtitle) years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)] result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result] result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0] result = [i for i in result if any(x in i[1] for x in years)] r = [(proxy.parse(i[0]), i[1]) for i in result] match = [i[0] for i in r if tvshowtitle == cleantitle.get(i[1]) and '(%s)' % str(year) in i[1]] match2 = [i[0] for i in r] match2 = [x for y,x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if len(match) > 0: url = match[0] ; break r = proxy.request(urlparse.urljoin(self.base_link, i), 'free movies') r = re.findall('(tt\d+)', r) if imdb in r: url = i ; break except: pass url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'tv_episode_item') result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'}) title = cleantitle.get(title) premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0] premiered = '%s %01d %s' % (premiered[1].replace('01','January').replace('02','February').replace('03','March').replace('04','April').replace('05','May').replace('06','June').replace('07','July').replace('08','August').replace('09','September').replace('10','October').replace('11','November').replace('12','December'), int(premiered[2]), premiered[0]) result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), client.parseDOM(i, 'span', attrs = {'class': 'tv_num_versions'})) for i in result] result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0] result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0] result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0] url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1] if len(url) == 0: url = [i for i in result if premiered == i[2]] if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]] url = url[0][0] url = proxy.parse(url) url = re.findall('(?://.+?|)(/.+)', url)[0] url = client.replaceHTMLCodes(url) url = url.encode('utf-8') return url except: return def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources url = urlparse.urljoin(self.base_link, url) result = proxy.request(url, 'link_ite') links = client.parseDOM(result, 'table', attrs = {'class': 'link_ite.+?'}) for i in links: try: url = client.parseDOM(i, 'a', ret='href') url = [x for x in url if 'gtfo' in x][-1] url = proxy.parse(url) url = urlparse.parse_qs(urlparse.urlparse(url).query)['gtfo'][0] url = base64.b64decode(url) url = client.replaceHTMLCodes(url) url = url.encode('utf-8') host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0] if not host in hostDict: raise Exception() host = host.encode('utf-8') quality = client.parseDOM(i, 'div', attrs = {'class': 'quality'}) if any(x in ['[CAM]', '[TS]'] for x in quality): quality = 'CAM' else: quality = 'SD' quality = quality.encode('utf-8') sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources def resolve(self, url): return url
apache-2.0
dudepare/django
tests/m2m_regress/tests.py
273
4695
from __future__ import unicode_literals from django.core.exceptions import FieldError from django.test import TestCase from django.utils import six from .models import ( Entry, Line, Post, RegressionModelSplit, SelfRefer, SelfReferChild, SelfReferChildSibling, Tag, TagCollection, Worksheet, ) class M2MRegressionTests(TestCase): def test_multiple_m2m(self): # Multiple m2m references to model must be distinguished when # accessing the relations through an instance attribute. s1 = SelfRefer.objects.create(name='s1') s2 = SelfRefer.objects.create(name='s2') s3 = SelfRefer.objects.create(name='s3') s1.references.add(s2) s1.related.add(s3) e1 = Entry.objects.create(name='e1') t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') e1.topics.add(t1) e1.related.add(t2) self.assertQuerysetEqual(s1.references.all(), ["<SelfRefer: s2>"]) self.assertQuerysetEqual(s1.related.all(), ["<SelfRefer: s3>"]) self.assertQuerysetEqual(e1.topics.all(), ["<Tag: t1>"]) self.assertQuerysetEqual(e1.related.all(), ["<Tag: t2>"]) def test_internal_related_name_not_in_error_msg(self): # The secret internal related names for self-referential many-to-many # fields shouldn't appear in the list when an error is made. six.assertRaisesRegex( self, FieldError, "Choices are: id, name, references, related, selfreferchild, selfreferchildsibling$", lambda: SelfRefer.objects.filter(porcupine='fred') ) def test_m2m_inheritance_symmetry(self): # Test to ensure that the relationship between two inherited models # with a self-referential m2m field maintains symmetry sr_child = SelfReferChild(name="Hanna") sr_child.save() sr_sibling = SelfReferChildSibling(name="Beth") sr_sibling.save() sr_child.related.add(sr_sibling) self.assertQuerysetEqual(sr_child.related.all(), ["<SelfRefer: Beth>"]) self.assertQuerysetEqual(sr_sibling.related.all(), ["<SelfRefer: Hanna>"]) def test_m2m_pk_field_type(self): # Regression for #11311 - The primary key for models in a m2m relation # doesn't have to be an AutoField w = Worksheet(id='abc') w.save() w.delete() def test_add_m2m_with_base_class(self): # Regression for #11956 -- You can add an object to a m2m with the # base class without causing integrity errors t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') c1 = TagCollection.objects.create(name='c1') c1.tags = [t1, t2] c1 = TagCollection.objects.get(name='c1') self.assertQuerysetEqual(c1.tags.all(), ["<Tag: t1>", "<Tag: t2>"], ordered=False) self.assertQuerysetEqual(t1.tag_collections.all(), ["<TagCollection: c1>"]) def test_manager_class_caching(self): e1 = Entry.objects.create() e2 = Entry.objects.create() t1 = Tag.objects.create() t2 = Tag.objects.create() # Get same manager twice in a row: self.assertIs(t1.entry_set.__class__, t1.entry_set.__class__) self.assertIs(e1.topics.__class__, e1.topics.__class__) # Get same manager for different instances self.assertIs(e1.topics.__class__, e2.topics.__class__) self.assertIs(t1.entry_set.__class__, t2.entry_set.__class__) def test_m2m_abstract_split(self): # Regression for #19236 - an abstract class with a 'split' method # causes a TypeError in add_lazy_relation m1 = RegressionModelSplit(name='1') m1.save() def test_assigning_invalid_data_to_m2m_doesnt_clear_existing_relations(self): t1 = Tag.objects.create(name='t1') t2 = Tag.objects.create(name='t2') c1 = TagCollection.objects.create(name='c1') c1.tags = [t1, t2] with self.assertRaises(TypeError): c1.tags = 7 c1.refresh_from_db() self.assertQuerysetEqual(c1.tags.order_by('name'), ["<Tag: t1>", "<Tag: t2>"]) def test_multiple_forwards_only_m2m(self): # Regression for #24505 - Multiple ManyToManyFields to same "to" # model with related_name set to '+'. foo = Line.objects.create(name='foo') bar = Line.objects.create(name='bar') post = Post.objects.create() post.primary_lines.add(foo) post.secondary_lines.add(bar) self.assertQuerysetEqual(post.primary_lines.all(), ['<Line: foo>']) self.assertQuerysetEqual(post.secondary_lines.all(), ['<Line: bar>'])
bsd-3-clause
jordanemedlock/psychtruths
temboo/Library/Google/Plus/Domains/Media/Insert.py
5
6021
# -*- coding: utf-8 -*- ############################################################################### # # Insert # Adds a new media item to an album. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class Insert(Choreography): def __init__(self, temboo_session): """ Create a new instance of the Insert Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(Insert, self).__init__(temboo_session, '/Library/Google/Plus/Domains/Media/Insert') def new_input_set(self): return InsertInputSet() def _make_result_set(self, result, path): return InsertResultSet(result, path) def _make_execution(self, session, exec_id, path): return InsertChoreographyExecution(session, exec_id, path) class InsertInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the Insert Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.) """ super(InsertInputSet, self)._set_input('AccessToken', value) def set_ClientID(self, value): """ Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.) """ super(InsertInputSet, self)._set_input('ClientID', value) def set_ClientSecret(self, value): """ Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.) """ super(InsertInputSet, self)._set_input('ClientSecret', value) def set_Collection(self, value): """ Set the value of the Collection input for this Choreo. ((optional, string) Currently the acceptable values are "cloud". (Upload the media to share on Google+).) """ super(InsertInputSet, self)._set_input('Collection', value) def set_ContentType(self, value): """ Set the value of the ContentType input for this Choreo. ((conditional, string) The Content-Type of the file that is being uploaded (i.e. image/jpg). Required when specifying the FileContent input.) """ super(InsertInputSet, self)._set_input('ContentType', value) def set_DisplayName(self, value): """ Set the value of the DisplayName input for this Choreo. ((optional, string) The display name for the media. If this parameter is not provided, Google assigns a GUID to the media resource.) """ super(InsertInputSet, self)._set_input('DisplayName', value) def set_Fields(self, value): """ Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying a subset of fields to include in the response.) """ super(InsertInputSet, self)._set_input('Fields', value) def set_FileContent(self, value): """ Set the value of the FileContent input for this Choreo. ((conditional, string) The Base64 encoded contents of the file to upload.) """ super(InsertInputSet, self)._set_input('FileContent', value) def set_RefreshToken(self, value): """ Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.) """ super(InsertInputSet, self)._set_input('RefreshToken', value) def set_UserID(self, value): """ Set the value of the UserID input for this Choreo. ((optional, string) The ID of the user to create the activity on behalf of. The value "me" is set as the default to indicate the authenticated user.) """ super(InsertInputSet, self)._set_input('UserID', value) class InsertResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the Insert Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.) """ return self._output.get('Response', None) def get_NewAccessToken(self): """ Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.) """ return self._output.get('NewAccessToken', None) class InsertChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return InsertResultSet(response, path)
apache-2.0
kris-singh/pgmpy
pgmpy/base/DirectedGraph.py
3
9379
#!/usr/bin/env python3 import itertools import networkx as nx from pgmpy.base import UndirectedGraph class DirectedGraph(nx.DiGraph): """ Base class for all Directed Graphical Models. Each node in the graph can represent either a random variable, `Factor`, or a cluster of random variables. Edges in the graph represent the dependencies between these. Parameters ---------- data: input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list or any Networkx graph object. Examples -------- Create an empty DirectedGraph with no nodes and no edges >>> from pgmpy.base import DirectedGraph >>> G = DirectedGraph() G can be grown in several ways: **Nodes:** Add one node at a time: >>> G.add_node(node='a') Add the nodes from any container (a list, set or tuple or the nodes from another graph). >>> G.add_nodes_from(nodes=['a', 'b']) **Edges:** G can also be grown by adding edges. Add one edge, >>> G.add_edge(u='a', v='b') a list of edges, >>> G.add_edges_from(ebunch=[('a', 'b'), ('b', 'c')]) If some edges connect nodes not yet in the model, the nodes are added automatically. There are no errors when adding nodes or edges that already exist. **Shortcuts:** Many common graph features allow python syntax for speed reporting. >>> 'a' in G # check if node in graph True >>> len(G) # number of nodes in graph 3 """ def __init__(self, ebunch=None): super(DirectedGraph, self).__init__(ebunch) def add_node(self, node, weight=None): """ Adds a single node to the Graph. Parameters ---------- node: str, int, or any hashable python object. The node to add to the graph. weight: int, float The weight of the node. Examples -------- >>> from pgmpy.base import DirectedGraph >>> G = DirectedGraph() >>> G.add_node(node='A') >>> G.nodes() ['A'] Adding a node with some weight. >>> G.add_node(node='B', weight=0.3) The weight of these nodes can be accessed as: >>> G.node['B'] {'weight': 0.3} >>> G.node['A'] {'weight': None} """ super(DirectedGraph, self).add_node(node, weight=weight) def add_nodes_from(self, nodes, weights=None): """ Add multiple nodes to the Graph. **The behviour of adding weights is different than in networkx. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, or any hashable python object). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the variable at index i. Examples -------- >>> from pgmpy.base import DirectedGraph >>> G = DirectedGraph() >>> G.add_nodes_from(nodes=['A', 'B', 'C']) >>> G.nodes() ['A', 'B', 'C'] Adding nodes with weights: >>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6]) >>> G.node['D'] {'weight': 0.3} >>> G.node['E'] {'weight': 0.6} >>> G.node['A'] {'weight': None} """ nodes = list(nodes) if weights: if len(nodes) != len(weights): raise ValueError("The number of elements in nodes and weights" "should be equal.") for index in range(len(nodes)): self.add_node(node=nodes[index], weight=weights[index]) else: for node in nodes: self.add_node(node=node) def add_edge(self, u, v, weight=None): """ Add an edge between u and v. The nodes u and v will be automatically added if they are not already in the graph. Parameters ---------- u, v : nodes Nodes can be any hashable Python object. weight: int, float (default=None) The weight of the edge Examples -------- >>> from pgmpy.base import DirectedGraph >>> G = DirectedGraph() >>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles']) >>> G.add_edge(u='Alice', v='Bob') >>> G.nodes() ['Alice', 'Bob', 'Charles'] >>> G.edges() [('Alice', 'Bob')] When the node is not already present in the graph: >>> G.add_edge(u='Alice', v='Ankur') >>> G.nodes() ['Alice', 'Ankur', 'Bob', 'Charles'] >>> G.edges() [('Alice', 'Bob'), ('Alice', 'Ankur')] Adding edges with weight: >>> G.add_edge('Ankur', 'Maria', weight=0.1) >>> G.edge['Ankur']['Maria'] {'weight': 0.1} """ super(DirectedGraph, self).add_edge(u, v, weight=weight) def add_edges_from(self, ebunch, weights=None): """ Add all the edges in ebunch. If nodes referred in the ebunch are not already present, they will be automatically added. Node names can be any hashable python object. **The behavior of adding weights is different than networkx. Parameters ---------- ebunch : container of edges Each edge given in the container will be added to the graph. The edges must be given as 2-tuples (u, v). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the edge at index i. Examples -------- >>> from pgmpy.base import DirectedGraph >>> G = DirectedGraph() >>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles']) >>> G.add_edges_from(ebunch=[('Alice', 'Bob'), ('Bob', 'Charles')]) >>> G.nodes() ['Alice', 'Bob', 'Charles'] >>> G.edges() [('Alice', 'Bob'), ('Bob', 'Charles')] When the node is not already in the model: >>> G.add_edges_from(ebunch=[('Alice', 'Ankur')]) >>> G.nodes() ['Alice', 'Bob', 'Charles', 'Ankur'] >>> G.edges() [('Alice', 'Bob'), ('Bob', 'Charles'), ('Alice', 'Ankur')] Adding edges with weights: >>> G.add_edges_from([('Ankur', 'Maria'), ('Maria', 'Mason')], ... weights=[0.3, 0.5]) >>> G.edge['Ankur']['Maria'] {'weight': 0.3} >>> G.edge['Maria']['Mason'] {'weight': 0.5} """ ebunch = list(ebunch) if weights: if len(ebunch) != len(weights): raise ValueError("The number of elements in ebunch and weights" "should be equal") for index in range(len(ebunch)): self.add_edge(ebunch[index][0], ebunch[index][1], weight=weights[index]) else: for edge in ebunch: self.add_edge(edge[0], edge[1]) def get_parents(self, node): """ Returns a list of parents of node. Throws an error if the node is not present in the graph. Parameters ---------- node: string, int or any hashable python object. The node whose parents would be returned. Examples -------- >>> from pgmpy.base import DirectedGraph >>> G = DirectedGraph(ebunch=[('diff', 'grade'), ('intel', 'grade')]) >>> G.parents(node='grade') ['diff', 'intel'] """ return self.predecessors(node) def moralize(self): """ Removes all the immoralities in the DirectedGraph and creates a moral graph (UndirectedGraph). A v-structure X->Z<-Y is an immorality if there is no directed edge between X and Y. Examples -------- >>> from pgmpy.base import DirectedGraph >>> G = DirectedGraph(ebunch=[('diff', 'grade'), ('intel', 'grade')]) >>> moral_graph = G.moralize() >>> moral_graph.edges() [('intel', 'grade'), ('intel', 'diff'), ('grade', 'diff')] """ moral_graph = UndirectedGraph(self.to_undirected().edges()) for node in self.nodes(): moral_graph.add_edges_from( itertools.combinations(self.get_parents(node), 2)) return moral_graph def get_leaves(self): """ Returns a list of leaves of the graph. Examples -------- >>> from pgmpy.base import DirectedGraph >>> graph = DirectedGraph([('A', 'B'), ('B', 'C'), ('B', 'D')]) >>> graph.get_leaves() ['C', 'D'] """ return [node for node, out_degree in self.out_degree_iter() if out_degree == 0] def get_roots(self): """ Returns a list of roots of the graph. Examples -------- >>> from pgmpy.base import DirectedGraph >>> graph = DirectedGraph([('A', 'B'), ('B', 'C'), ('B', 'D'), ('E', 'B')]) >>> graph.get_roots() ['A', 'E'] """ return [node for node, in_degree in self.in_degree().items() if in_degree == 0]
mit
MinFu/youtube-dl
youtube_dl/extractor/hypem.py
128
2101
from __future__ import unicode_literals import json import time from .common import InfoExtractor from ..compat import ( compat_urllib_parse, compat_urllib_request, ) from ..utils import ( ExtractorError, ) class HypemIE(InfoExtractor): _VALID_URL = r'http://(?:www\.)?hypem\.com/track/(?P<id>[^/]+)/' _TEST = { 'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME', 'md5': 'b9cc91b5af8995e9f0c1cee04c575828', 'info_dict': { 'id': '1v6ga', 'ext': 'mp3', 'title': 'Tame', 'uploader': 'BODYWORK', } } def _real_extract(self, url): track_id = self._match_id(url) data = {'ax': 1, 'ts': time.time()} data_encoded = compat_urllib_parse.urlencode(data) complete_url = url + "?" + data_encoded request = compat_urllib_request.Request(complete_url) response, urlh = self._download_webpage_handle( request, track_id, 'Downloading webpage with the url') cookie = urlh.headers.get('Set-Cookie', '') html_tracks = self._html_search_regex( r'(?ms)<script type="application/json" id="displayList-data">\s*(.*?)\s*</script>', response, 'tracks') try: track_list = json.loads(html_tracks) track = track_list['tracks'][0] except ValueError: raise ExtractorError('Hypemachine contained invalid JSON.') key = track['key'] track_id = track['id'] artist = track['artist'] title = track['song'] serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key) request = compat_urllib_request.Request( serve_url, '', {'Content-Type': 'application/json'}) request.add_header('cookie', cookie) song_data = self._download_json(request, track_id, 'Downloading metadata') final_url = song_data["url"] return { 'id': track_id, 'url': final_url, 'ext': 'mp3', 'title': title, 'uploader': artist, }
unlicense
mcrowson/django
tests/check_framework/test_model_field_deprecation.py
322
2584
from django.core import checks from django.db import models from django.test import SimpleTestCase from .tests import IsolateModelsMixin class TestDeprecatedField(IsolateModelsMixin, SimpleTestCase): def test_default_details(self): class MyField(models.Field): system_check_deprecated_details = {} class Model(models.Model): name = MyField() model = Model() self.assertEqual(model.check(), [ checks.Warning( msg='MyField has been deprecated.', hint=None, obj=Model._meta.get_field('name'), id='fields.WXXX', ) ]) def test_user_specified_details(self): class MyField(models.Field): system_check_deprecated_details = { 'msg': 'This field is deprecated and will be removed soon.', 'hint': 'Use something else.', 'id': 'fields.W999', } class Model(models.Model): name = MyField() model = Model() self.assertEqual(model.check(), [ checks.Warning( msg='This field is deprecated and will be removed soon.', hint='Use something else.', obj=Model._meta.get_field('name'), id='fields.W999', ) ]) class TestRemovedField(IsolateModelsMixin, SimpleTestCase): def test_default_details(self): class MyField(models.Field): system_check_removed_details = {} class Model(models.Model): name = MyField() model = Model() self.assertEqual(model.check(), [ checks.Error( msg='MyField has been removed except for support in historical migrations.', hint=None, obj=Model._meta.get_field('name'), id='fields.EXXX', ) ]) def test_user_specified_details(self): class MyField(models.Field): system_check_removed_details = { 'msg': 'Support for this field is gone.', 'hint': 'Use something else.', 'id': 'fields.E999', } class Model(models.Model): name = MyField() model = Model() self.assertEqual(model.check(), [ checks.Error( msg='Support for this field is gone.', hint='Use something else.', obj=Model._meta.get_field('name'), id='fields.E999', ) ])
bsd-3-clause
hyz1011088/StarCluster
utils/s3mount.py
21
1151
#!/usr/bin/env python import os import sys from starcluster.config import StarClusterConfig print 'Simple wrapper script for s3fs (http://s3fs.googlecode.com/)' cfg = StarClusterConfig().load() ec2 = cfg.get_easy_ec2() buckets = ec2.s3.get_buckets() counter = 0 for bucket in buckets: print "[%d] %s" % (counter,bucket.name) counter += 1 try: inp = int(raw_input('>>> Enter the bucket to mnt: ')) selection = buckets[inp].name print 'you selected: %s' % selection mountpt = raw_input('>>> please enter the mnt point: ') print 'mounting %s at: %s' % (selection,mountpt) except KeyboardInterrupt,e: print print 'Exiting...' sys.exit(1) try: os.system('s3fs %s -o accessKeyId=%s -o secretAccessKey=%s %s' % (selection, cfg.aws.get('aws_access_key_id'), cfg.aws.get('aws_secret_access_key'),mountpt)) except KeyboardInterrupt,e: print print 'Attempting to umount %s' % mountpt os.system('sudo umount %s' % mountpt) print 'Exiting...' sys.exit(1)
gpl-3.0
MechanisM/musicdb
contrib/django/core/serializers/base.py
2
5366
""" Module for abstract serializer/unserializer base classes. """ from StringIO import StringIO from django.db import models from django.utils.encoding import smart_unicode class SerializerDoesNotExist(KeyError): """The requested serializer was not found.""" pass class SerializationError(Exception): """Something bad happened during serialization.""" pass class DeserializationError(Exception): """Something bad happened during deserialization.""" pass class Serializer(object): """ Abstract serializer base class. """ # Indicates if the implemented serializer is only available for # internal Django use. internal_use_only = False def serialize(self, queryset, **options): """ Serialize a queryset. """ self.options = options self.stream = options.pop("stream", StringIO()) self.selected_fields = options.pop("fields", None) self.use_natural_keys = options.pop("use_natural_keys", False) self.start_serialization() for obj in queryset: self.start_object(obj) for field in obj._meta.local_fields: if field.serialize: if field.rel is None: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_field(obj, field) else: if self.selected_fields is None or field.attname[:-3] in self.selected_fields: self.handle_fk_field(obj, field) for field in obj._meta.many_to_many: if field.serialize: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_m2m_field(obj, field) self.end_object(obj) self.end_serialization() return self.getvalue() def start_serialization(self): """ Called when serializing of the queryset starts. """ raise NotImplementedError def end_serialization(self): """ Called when serializing of the queryset ends. """ pass def start_object(self, obj): """ Called when serializing of an object starts. """ raise NotImplementedError def end_object(self, obj): """ Called when serializing of an object ends. """ pass def handle_field(self, obj, field): """ Called to handle each individual (non-relational) field on an object. """ raise NotImplementedError def handle_fk_field(self, obj, field): """ Called to handle a ForeignKey field. """ raise NotImplementedError def handle_m2m_field(self, obj, field): """ Called to handle a ManyToManyField. """ raise NotImplementedError def getvalue(self): """ Return the fully serialized queryset (or None if the output stream is not seekable). """ if callable(getattr(self.stream, 'getvalue', None)): return self.stream.getvalue() class Deserializer(object): """ Abstract base deserializer class. """ def __init__(self, stream_or_string, **options): """ Init this serializer given a stream or a string """ self.options = options if isinstance(stream_or_string, basestring): self.stream = StringIO(stream_or_string) else: self.stream = stream_or_string # hack to make sure that the models have all been loaded before # deserialization starts (otherwise subclass calls to get_model() # and friends might fail...) models.get_apps() def __iter__(self): return self def next(self): """Iteration iterface -- return the next item in the stream""" raise NotImplementedError class DeserializedObject(object): """ A deserialized model. Basically a container for holding the pre-saved deserialized data along with the many-to-many data saved with the object. Call ``save()`` to save the object (with the many-to-many data) to the database; call ``save(save_m2m=False)`` to save just the object fields (and not touch the many-to-many stuff.) """ def __init__(self, obj, m2m_data=None): self.object = obj self.m2m_data = m2m_data def __repr__(self): return "<DeserializedObject: %s.%s(pk=%s)>" % ( self.object._meta.app_label, self.object._meta.object_name, self.object.pk) def save(self, save_m2m=True, using=None): # Call save on the Model baseclass directly. This bypasses any # model-defined save. The save is also forced to be raw. # This ensures that the data that is deserialized is literally # what came from the file, not post-processed by pre_save/save # methods. models.Model.save_base(self.object, using=using, raw=True) if self.m2m_data and save_m2m: for accessor_name, object_list in self.m2m_data.items(): setattr(self.object, accessor_name, object_list) # prevent a second (possibly accidental) call to save() from saving # the m2m data twice. self.m2m_data = None
agpl-3.0
RevelSystems/django
django/conf/locale/hu/formats.py
504
1117
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'Y. F j.' TIME_FORMAT = 'G.i' DATETIME_FORMAT = 'Y. F j. G.i' YEAR_MONTH_FORMAT = 'Y. F' MONTH_DAY_FORMAT = 'F j.' SHORT_DATE_FORMAT = 'Y.m.d.' SHORT_DATETIME_FORMAT = 'Y.m.d. G.i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%Y.%m.%d.', # '2006.10.25.' ] TIME_INPUT_FORMATS = [ '%H.%M.%S', # '14.30.59' '%H.%M', # '14.30' ] DATETIME_INPUT_FORMATS = [ '%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59' '%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200' '%Y.%m.%d. %H.%M', # '2006.10.25. 14.30' '%Y.%m.%d.', # '2006.10.25.' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = ' ' # Non-breaking space NUMBER_GROUPING = 3
bsd-3-clause
emrecamasuvi/appengineTmp
lib/flask/helpers.py
776
33793
# -*- coding: utf-8 -*- """ flask.helpers ~~~~~~~~~~~~~ Implements various helpers. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import os import sys import pkgutil import posixpath import mimetypes from time import time from zlib import adler32 from threading import RLock from werkzeug.routing import BuildError from functools import update_wrapper try: from werkzeug.urls import url_quote except ImportError: from urlparse import quote as url_quote from werkzeug.datastructures import Headers from werkzeug.exceptions import NotFound # this was moved in 0.7 try: from werkzeug.wsgi import wrap_file except ImportError: from werkzeug.utils import wrap_file from jinja2 import FileSystemLoader from .signals import message_flashed from .globals import session, _request_ctx_stack, _app_ctx_stack, \ current_app, request from ._compat import string_types, text_type # sentinel _missing = object() # what separators does this operating system provide that are not a slash? # this is used by the send_from_directory function to ensure that nobody is # able to access files from outside the filesystem. _os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, '/')) def _endpoint_from_view_func(view_func): """Internal helper that returns the default endpoint for a given function. This always is the function name. """ assert view_func is not None, 'expected view func if endpoint ' \ 'is not provided.' return view_func.__name__ def stream_with_context(generator_or_function): """Request contexts disappear when the response is started on the server. This is done for efficiency reasons and to make it less likely to encounter memory leaks with badly written WSGI middlewares. The downside is that if you are using streamed responses, the generator cannot access request bound information any more. This function however can help you keep the context around for longer:: from flask import stream_with_context, request, Response @app.route('/stream') def streamed_response(): @stream_with_context def generate(): yield 'Hello ' yield request.args['name'] yield '!' return Response(generate()) Alternatively it can also be used around a specific generator:: from flask import stream_with_context, request, Response @app.route('/stream') def streamed_response(): def generate(): yield 'Hello ' yield request.args['name'] yield '!' return Response(stream_with_context(generate())) .. versionadded:: 0.9 """ try: gen = iter(generator_or_function) except TypeError: def decorator(*args, **kwargs): gen = generator_or_function() return stream_with_context(gen) return update_wrapper(decorator, generator_or_function) def generator(): ctx = _request_ctx_stack.top if ctx is None: raise RuntimeError('Attempted to stream with context but ' 'there was no context in the first place to keep around.') with ctx: # Dummy sentinel. Has to be inside the context block or we're # not actually keeping the context around. yield None # The try/finally is here so that if someone passes a WSGI level # iterator in we're still running the cleanup logic. Generators # don't need that because they are closed on their destruction # automatically. try: for item in gen: yield item finally: if hasattr(gen, 'close'): gen.close() # The trick is to start the generator. Then the code execution runs until # the first dummy None is yielded at which point the context was already # pushed. This item is discarded. Then when the iteration continues the # real generator is executed. wrapped_g = generator() next(wrapped_g) return wrapped_g def make_response(*args): """Sometimes it is necessary to set additional headers in a view. Because views do not have to return response objects but can return a value that is converted into a response object by Flask itself, it becomes tricky to add headers to it. This function can be called instead of using a return and you will get a response object which you can use to attach headers. If view looked like this and you want to add a new header:: def index(): return render_template('index.html', foo=42) You can now do something like this:: def index(): response = make_response(render_template('index.html', foo=42)) response.headers['X-Parachutes'] = 'parachutes are cool' return response This function accepts the very same arguments you can return from a view function. This for example creates a response with a 404 error code:: response = make_response(render_template('not_found.html'), 404) The other use case of this function is to force the return value of a view function into a response which is helpful with view decorators:: response = make_response(view_function()) response.headers['X-Parachutes'] = 'parachutes are cool' Internally this function does the following things: - if no arguments are passed, it creates a new response argument - if one argument is passed, :meth:`flask.Flask.make_response` is invoked with it. - if more than one argument is passed, the arguments are passed to the :meth:`flask.Flask.make_response` function as tuple. .. versionadded:: 0.6 """ if not args: return current_app.response_class() if len(args) == 1: args = args[0] return current_app.make_response(args) def url_for(endpoint, **values): """Generates a URL to the given endpoint with the method provided. Variable arguments that are unknown to the target endpoint are appended to the generated URL as query arguments. If the value of a query argument is `None`, the whole pair is skipped. In case blueprints are active you can shortcut references to the same blueprint by prefixing the local endpoint with a dot (``.``). This will reference the index function local to the current blueprint:: url_for('.index') For more information, head over to the :ref:`Quickstart <url-building>`. To integrate applications, :class:`Flask` has a hook to intercept URL build errors through :attr:`Flask.build_error_handler`. The `url_for` function results in a :exc:`~werkzeug.routing.BuildError` when the current app does not have a URL for the given endpoint and values. When it does, the :data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if it is not `None`, which can return a string to use as the result of `url_for` (instead of `url_for`'s default to raise the :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. An example:: def external_url_handler(error, endpoint, **values): "Looks up an external URL when `url_for` cannot build a URL." # This is an example of hooking the build_error_handler. # Here, lookup_url is some utility function you've built # which looks up the endpoint in some external URL registry. url = lookup_url(endpoint, **values) if url is None: # External lookup did not have a URL. # Re-raise the BuildError, in context of original traceback. exc_type, exc_value, tb = sys.exc_info() if exc_value is error: raise exc_type, exc_value, tb else: raise error # url_for will use this result, instead of raising BuildError. return url app.build_error_handler = external_url_handler Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and `endpoint` and `**values` are the arguments passed into `url_for`. Note that this is for building URLs outside the current application, and not for handling 404 NotFound errors. .. versionadded:: 0.10 The `_scheme` parameter was added. .. versionadded:: 0.9 The `_anchor` and `_method` parameters were added. .. versionadded:: 0.9 Calls :meth:`Flask.handle_build_error` on :exc:`~werkzeug.routing.BuildError`. :param endpoint: the endpoint of the URL (name of the function) :param values: the variable arguments of the URL rule :param _external: if set to `True`, an absolute URL is generated. Server address can be changed via `SERVER_NAME` configuration variable which defaults to `localhost`. :param _scheme: a string specifying the desired URL scheme. The `_external` parameter must be set to `True` or a `ValueError` is raised. :param _anchor: if provided this is added as anchor to the URL. :param _method: if provided this explicitly specifies an HTTP method. """ appctx = _app_ctx_stack.top reqctx = _request_ctx_stack.top if appctx is None: raise RuntimeError('Attempted to generate a URL without the ' 'application context being pushed. This has to be ' 'executed when application context is available.') # If request specific information is available we have some extra # features that support "relative" urls. if reqctx is not None: url_adapter = reqctx.url_adapter blueprint_name = request.blueprint if not reqctx.request._is_old_module: if endpoint[:1] == '.': if blueprint_name is not None: endpoint = blueprint_name + endpoint else: endpoint = endpoint[1:] else: # TODO: get rid of this deprecated functionality in 1.0 if '.' not in endpoint: if blueprint_name is not None: endpoint = blueprint_name + '.' + endpoint elif endpoint.startswith('.'): endpoint = endpoint[1:] external = values.pop('_external', False) # Otherwise go with the url adapter from the appctx and make # the urls external by default. else: url_adapter = appctx.url_adapter if url_adapter is None: raise RuntimeError('Application was not able to create a URL ' 'adapter for request independent URL generation. ' 'You might be able to fix this by setting ' 'the SERVER_NAME config variable.') external = values.pop('_external', True) anchor = values.pop('_anchor', None) method = values.pop('_method', None) scheme = values.pop('_scheme', None) appctx.app.inject_url_defaults(endpoint, values) if scheme is not None: if not external: raise ValueError('When specifying _scheme, _external must be True') url_adapter.url_scheme = scheme try: rv = url_adapter.build(endpoint, values, method=method, force_external=external) except BuildError as error: # We need to inject the values again so that the app callback can # deal with that sort of stuff. values['_external'] = external values['_anchor'] = anchor values['_method'] = method return appctx.app.handle_url_build_error(error, endpoint, values) if anchor is not None: rv += '#' + url_quote(anchor) return rv def get_template_attribute(template_name, attribute): """Loads a macro (or variable) a template exports. This can be used to invoke a macro from within Python code. If you for example have a template named `_cider.html` with the following contents: .. sourcecode:: html+jinja {% macro hello(name) %}Hello {{ name }}!{% endmacro %} You can access this from Python code like this:: hello = get_template_attribute('_cider.html', 'hello') return hello('World') .. versionadded:: 0.2 :param template_name: the name of the template :param attribute: the name of the variable of macro to access """ return getattr(current_app.jinja_env.get_template(template_name).module, attribute) def flash(message, category='message'): """Flashes a message to the next request. In order to remove the flashed message from the session and to display it to the user, the template has to call :func:`get_flashed_messages`. .. versionchanged:: 0.3 `category` parameter added. :param message: the message to be flashed. :param category: the category for the message. The following values are recommended: ``'message'`` for any kind of message, ``'error'`` for errors, ``'info'`` for information messages and ``'warning'`` for warnings. However any kind of string can be used as category. """ # Original implementation: # # session.setdefault('_flashes', []).append((category, message)) # # This assumed that changes made to mutable structures in the session are # are always in sync with the sess on object, which is not true for session # implementations that use external storage for keeping their keys/values. flashes = session.get('_flashes', []) flashes.append((category, message)) session['_flashes'] = flashes message_flashed.send(current_app._get_current_object(), message=message, category=category) def get_flashed_messages(with_categories=False, category_filter=[]): """Pulls all flashed messages from the session and returns them. Further calls in the same request to the function will return the same messages. By default just the messages are returned, but when `with_categories` is set to `True`, the return value will be a list of tuples in the form ``(category, message)`` instead. Filter the flashed messages to one or more categories by providing those categories in `category_filter`. This allows rendering categories in separate html blocks. The `with_categories` and `category_filter` arguments are distinct: * `with_categories` controls whether categories are returned with message text (`True` gives a tuple, where `False` gives just the message text). * `category_filter` filters the messages down to only those matching the provided categories. See :ref:`message-flashing-pattern` for examples. .. versionchanged:: 0.3 `with_categories` parameter added. .. versionchanged:: 0.9 `category_filter` parameter added. :param with_categories: set to `True` to also receive categories. :param category_filter: whitelist of categories to limit return values """ flashes = _request_ctx_stack.top.flashes if flashes is None: _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ if '_flashes' in session else [] if category_filter: flashes = list(filter(lambda f: f[0] in category_filter, flashes)) if not with_categories: return [x[1] for x in flashes] return flashes def send_file(filename_or_fp, mimetype=None, as_attachment=False, attachment_filename=None, add_etags=True, cache_timeout=None, conditional=False): """Sends the contents of a file to the client. This will use the most efficient method available and configured. By default it will try to use the WSGI server's file_wrapper support. Alternatively you can set the application's :attr:`~Flask.use_x_sendfile` attribute to ``True`` to directly emit an `X-Sendfile` header. This however requires support of the underlying webserver for `X-Sendfile`. By default it will try to guess the mimetype for you, but you can also explicitly provide one. For extra security you probably want to send certain files as attachment (HTML for instance). The mimetype guessing requires a `filename` or an `attachment_filename` to be provided. Please never pass filenames to this function from user sources without checking them first. Something like this is usually sufficient to avoid security problems:: if '..' in filename or filename.startswith('/'): abort(404) .. versionadded:: 0.2 .. versionadded:: 0.5 The `add_etags`, `cache_timeout` and `conditional` parameters were added. The default behavior is now to attach etags. .. versionchanged:: 0.7 mimetype guessing and etag support for file objects was deprecated because it was unreliable. Pass a filename if you are able to, otherwise attach an etag yourself. This functionality will be removed in Flask 1.0 .. versionchanged:: 0.9 cache_timeout pulls its default from application config, when None. :param filename_or_fp: the filename of the file to send. This is relative to the :attr:`~Flask.root_path` if a relative path is specified. Alternatively a file object might be provided in which case `X-Sendfile` might not work and fall back to the traditional method. Make sure that the file pointer is positioned at the start of data to send before calling :func:`send_file`. :param mimetype: the mimetype of the file if provided, otherwise auto detection happens. :param as_attachment: set to `True` if you want to send this file with a ``Content-Disposition: attachment`` header. :param attachment_filename: the filename for the attachment if it differs from the file's filename. :param add_etags: set to `False` to disable attaching of etags. :param conditional: set to `True` to enable conditional responses. :param cache_timeout: the timeout in seconds for the headers. When `None` (default), this value is set by :meth:`~Flask.get_send_file_max_age` of :data:`~flask.current_app`. """ mtime = None if isinstance(filename_or_fp, string_types): filename = filename_or_fp file = None else: from warnings import warn file = filename_or_fp filename = getattr(file, 'name', None) # XXX: this behavior is now deprecated because it was unreliable. # removed in Flask 1.0 if not attachment_filename and not mimetype \ and isinstance(filename, string_types): warn(DeprecationWarning('The filename support for file objects ' 'passed to send_file is now deprecated. Pass an ' 'attach_filename if you want mimetypes to be guessed.'), stacklevel=2) if add_etags: warn(DeprecationWarning('In future flask releases etags will no ' 'longer be generated for file objects passed to the send_file ' 'function because this behavior was unreliable. Pass ' 'filenames instead if possible, otherwise attach an etag ' 'yourself based on another value'), stacklevel=2) if filename is not None: if not os.path.isabs(filename): filename = os.path.join(current_app.root_path, filename) if mimetype is None and (filename or attachment_filename): mimetype = mimetypes.guess_type(filename or attachment_filename)[0] if mimetype is None: mimetype = 'application/octet-stream' headers = Headers() if as_attachment: if attachment_filename is None: if filename is None: raise TypeError('filename unavailable, required for ' 'sending as attachment') attachment_filename = os.path.basename(filename) headers.add('Content-Disposition', 'attachment', filename=attachment_filename) if current_app.use_x_sendfile and filename: if file is not None: file.close() headers['X-Sendfile'] = filename headers['Content-Length'] = os.path.getsize(filename) data = None else: if file is None: file = open(filename, 'rb') mtime = os.path.getmtime(filename) headers['Content-Length'] = os.path.getsize(filename) data = wrap_file(request.environ, file) rv = current_app.response_class(data, mimetype=mimetype, headers=headers, direct_passthrough=True) # if we know the file modification date, we can store it as the # the time of the last modification. if mtime is not None: rv.last_modified = int(mtime) rv.cache_control.public = True if cache_timeout is None: cache_timeout = current_app.get_send_file_max_age(filename) if cache_timeout is not None: rv.cache_control.max_age = cache_timeout rv.expires = int(time() + cache_timeout) if add_etags and filename is not None: rv.set_etag('flask-%s-%s-%s' % ( os.path.getmtime(filename), os.path.getsize(filename), adler32( filename.encode('utf-8') if isinstance(filename, text_type) else filename ) & 0xffffffff )) if conditional: rv = rv.make_conditional(request) # make sure we don't send x-sendfile for servers that # ignore the 304 status code for x-sendfile. if rv.status_code == 304: rv.headers.pop('x-sendfile', None) return rv def safe_join(directory, filename): """Safely join `directory` and `filename`. Example usage:: @app.route('/wiki/<path:filename>') def wiki_page(filename): filename = safe_join(app.config['WIKI_FOLDER'], filename) with open(filename, 'rb') as fd: content = fd.read() # Read and process the file content... :param directory: the base directory. :param filename: the untrusted filename relative to that directory. :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path would fall out of `directory`. """ filename = posixpath.normpath(filename) for sep in _os_alt_seps: if sep in filename: raise NotFound() if os.path.isabs(filename) or \ filename == '..' or \ filename.startswith('../'): raise NotFound() return os.path.join(directory, filename) def send_from_directory(directory, filename, **options): """Send a file from a given directory with :func:`send_file`. This is a secure way to quickly expose static files from an upload folder or something similar. Example usage:: @app.route('/uploads/<path:filename>') def download_file(filename): return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True) .. admonition:: Sending files and Performance It is strongly recommended to activate either `X-Sendfile` support in your webserver or (if no authentication happens) to tell the webserver to serve files for the given path on its own without calling into the web application for improved performance. .. versionadded:: 0.5 :param directory: the directory where all the files are stored. :param filename: the filename relative to that directory to download. :param options: optional keyword arguments that are directly forwarded to :func:`send_file`. """ filename = safe_join(directory, filename) if not os.path.isfile(filename): raise NotFound() options.setdefault('conditional', True) return send_file(filename, **options) def get_root_path(import_name): """Returns the path to a package or cwd if that cannot be found. This returns the path of a package or the folder that contains a module. Not to be confused with the package path returned by :func:`find_package`. """ # Module already imported and has a file attribute. Use that first. mod = sys.modules.get(import_name) if mod is not None and hasattr(mod, '__file__'): return os.path.dirname(os.path.abspath(mod.__file__)) # Next attempt: check the loader. loader = pkgutil.get_loader(import_name) # Loader does not exist or we're referring to an unloaded main module # or a main module without path (interactive sessions), go with the # current working directory. if loader is None or import_name == '__main__': return os.getcwd() # For .egg, zipimporter does not have get_filename until Python 2.7. # Some other loaders might exhibit the same behavior. if hasattr(loader, 'get_filename'): filepath = loader.get_filename(import_name) else: # Fall back to imports. __import__(import_name) filepath = sys.modules[import_name].__file__ # filepath is import_name.py for a module, or __init__.py for a package. return os.path.dirname(os.path.abspath(filepath)) def find_package(import_name): """Finds a package and returns the prefix (or None if the package is not installed) as well as the folder that contains the package or module as a tuple. The package path returned is the module that would have to be added to the pythonpath in order to make it possible to import the module. The prefix is the path below which a UNIX like folder structure exists (lib, share etc.). """ root_mod_name = import_name.split('.')[0] loader = pkgutil.get_loader(root_mod_name) if loader is None or import_name == '__main__': # import name is not found, or interactive/main module package_path = os.getcwd() else: # For .egg, zipimporter does not have get_filename until Python 2.7. if hasattr(loader, 'get_filename'): filename = loader.get_filename(root_mod_name) elif hasattr(loader, 'archive'): # zipimporter's loader.archive points to the .egg or .zip # archive filename is dropped in call to dirname below. filename = loader.archive else: # At least one loader is missing both get_filename and archive: # Google App Engine's HardenedModulesHook # # Fall back to imports. __import__(import_name) filename = sys.modules[import_name].__file__ package_path = os.path.abspath(os.path.dirname(filename)) # package_path ends with __init__.py for a package if loader.is_package(root_mod_name): package_path = os.path.dirname(package_path) site_parent, site_folder = os.path.split(package_path) py_prefix = os.path.abspath(sys.prefix) if package_path.startswith(py_prefix): return py_prefix, package_path elif site_folder.lower() == 'site-packages': parent, folder = os.path.split(site_parent) # Windows like installations if folder.lower() == 'lib': base_dir = parent # UNIX like installations elif os.path.basename(parent).lower() == 'lib': base_dir = os.path.dirname(parent) else: base_dir = site_parent return base_dir, package_path return None, package_path class locked_cached_property(object): """A decorator that converts a function into a lazy property. The function wrapped is called the first time to retrieve the result and then that calculated result is used the next time you access the value. Works like the one in Werkzeug but has a lock for thread safety. """ def __init__(self, func, name=None, doc=None): self.__name__ = name or func.__name__ self.__module__ = func.__module__ self.__doc__ = doc or func.__doc__ self.func = func self.lock = RLock() def __get__(self, obj, type=None): if obj is None: return self with self.lock: value = obj.__dict__.get(self.__name__, _missing) if value is _missing: value = self.func(obj) obj.__dict__[self.__name__] = value return value class _PackageBoundObject(object): def __init__(self, import_name, template_folder=None): #: The name of the package or module. Do not change this once #: it was set by the constructor. self.import_name = import_name #: location of the templates. `None` if templates should not be #: exposed. self.template_folder = template_folder #: Where is the app root located? self.root_path = get_root_path(self.import_name) self._static_folder = None self._static_url_path = None def _get_static_folder(self): if self._static_folder is not None: return os.path.join(self.root_path, self._static_folder) def _set_static_folder(self, value): self._static_folder = value static_folder = property(_get_static_folder, _set_static_folder) del _get_static_folder, _set_static_folder def _get_static_url_path(self): if self._static_url_path is None: if self.static_folder is None: return None return '/' + os.path.basename(self.static_folder) return self._static_url_path def _set_static_url_path(self, value): self._static_url_path = value static_url_path = property(_get_static_url_path, _set_static_url_path) del _get_static_url_path, _set_static_url_path @property def has_static_folder(self): """This is `True` if the package bound object's container has a folder named ``'static'``. .. versionadded:: 0.5 """ return self.static_folder is not None @locked_cached_property def jinja_loader(self): """The Jinja loader for this package bound object. .. versionadded:: 0.5 """ if self.template_folder is not None: return FileSystemLoader(os.path.join(self.root_path, self.template_folder)) def get_send_file_max_age(self, filename): """Provides default cache_timeout for the :func:`send_file` functions. By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from the configuration of :data:`~flask.current_app`. Static file functions such as :func:`send_from_directory` use this function, and :func:`send_file` calls this function on :data:`~flask.current_app` when the given cache_timeout is `None`. If a cache_timeout is given in :func:`send_file`, that timeout is used; otherwise, this method is called. This allows subclasses to change the behavior when sending files based on the filename. For example, to set the cache timeout for .js files to 60 seconds:: class MyFlask(flask.Flask): def get_send_file_max_age(self, name): if name.lower().endswith('.js'): return 60 return flask.Flask.get_send_file_max_age(self, name) .. versionadded:: 0.9 """ return current_app.config['SEND_FILE_MAX_AGE_DEFAULT'] def send_static_file(self, filename): """Function used internally to send static files from the static folder to the browser. .. versionadded:: 0.5 """ if not self.has_static_folder: raise RuntimeError('No static folder for this object') # Ensure get_send_file_max_age is called in all cases. # Here, we ensure get_send_file_max_age is called for Blueprints. cache_timeout = self.get_send_file_max_age(filename) return send_from_directory(self.static_folder, filename, cache_timeout=cache_timeout) def open_resource(self, resource, mode='rb'): """Opens a resource from the application's resource folder. To see how this works, consider the following folder structure:: /myapplication.py /schema.sql /static /style.css /templates /layout.html /index.html If you want to open the `schema.sql` file you would do the following:: with app.open_resource('schema.sql') as f: contents = f.read() do_something_with(contents) :param resource: the name of the resource. To access resources within subfolders use forward slashes as separator. :param mode: resource file opening mode, default is 'rb'. """ if mode not in ('r', 'rb'): raise ValueError('Resources can only be opened for reading') return open(os.path.join(self.root_path, resource), mode)
apache-2.0
kjschiroo/WikiChatter
test/test_indentblock.py
1
5369
import unittest import wikichatter.indentblock as indentblock import wikichatter.mwparsermod as mwpm EMPTY = "\n" LEVEL0 = "Level 0\n" LEVEL1 = ":Level 1\n" LEVEL2 = "::Level 2\n" LEVEL3 = ":::Level 3\n" LEVEL4 = "::::Level 4\n" LIST1 = "*Level 1\n" LIST2 = "**Level 2\n" LIST3 = "***Level 3\n" LIST4 = "****Level 4\n" OUTDENT = "{{outdent}}" OUTDENT_LEVEL = "{{outdent|5}}" class IndentBlockTest(unittest.TestCase): def test_generates_list_from_basic_input(self): text = ( LEVEL0 + LEVEL1 + LEVEL2 + LEVEL3 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 4) self.assertEqual(blocks[0].indent, 0) self.assertEqual(blocks[1].indent, 1) self.assertEqual(blocks[2].indent, 2) self.assertEqual(blocks[3].indent, 3) def test_generates_list_from_reverse_input(self): text = ( LEVEL3 + LEVEL2 + LEVEL1 + LEVEL0 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 4) self.assertEqual(blocks[0].indent, 3) self.assertEqual(blocks[1].indent, 2) self.assertEqual(blocks[2].indent, 1) self.assertEqual(blocks[3].indent, 0) def test_generates_list_from_zigzag_input(self): text = ( LEVEL0 + LEVEL1 + LEVEL2 + LEVEL3 + LEVEL2 + LEVEL1 + LEVEL0 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 7) self.assertEqual(blocks[0].indent, 0) self.assertEqual(blocks[1].indent, 1) self.assertEqual(blocks[2].indent, 2) self.assertEqual(blocks[3].indent, 3) self.assertEqual(blocks[4].indent, 2) self.assertEqual(blocks[5].indent, 1) self.assertEqual(blocks[6].indent, 0) def test_handles_outdent(self): text = ( LEVEL0 + LEVEL1 + LEVEL2 + OUTDENT + LEVEL0 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 4) self.assertEqual(blocks[3].indent, 3) def test_handles_double_outdent(self): text = ( LEVEL0 + LEVEL1 + LEVEL2 + OUTDENT + LEVEL0 + LEVEL1 + LEVEL2 + OUTDENT + LEVEL0 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 7) self.assertEqual(blocks[6].indent, 6) def test_handles_triple_outdent(self): text = ( LEVEL0 + LEVEL1 + OUTDENT + LEVEL0 + LEVEL1 + OUTDENT + LEVEL0 + LEVEL1 + OUTDENT + LEVEL0 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 7) self.assertEqual(blocks[6].indent, 6) def test_generates_list_from_basic_list_input(self): text = ( LEVEL0 + LIST1 + LIST2 + LIST3 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 4) self.assertEqual(blocks[0].indent, 0) self.assertEqual(blocks[1].indent, 1) self.assertEqual(blocks[2].indent, 2) self.assertEqual(blocks[3].indent, 3) def test_breaks_same_level_apart(self): text = ( LEVEL0 + LIST1 + LIST1 + LIST2 + LIST3 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 5) self.assertEqual(blocks[0].indent, 0) self.assertEqual(blocks[1].indent, 1) self.assertEqual(blocks[2].indent, 1) self.assertEqual(blocks[3].indent, 2) self.assertEqual(blocks[4].indent, 3) def test_grants_empty_line_previous_indent(self): text = ( LEVEL0 + LIST1 + EMPTY + LIST1 + LIST2 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 5) self.assertEqual(blocks[0].indent, 0) self.assertEqual(blocks[1].indent, 1) self.assertEqual(blocks[2].indent, 1) self.assertEqual(blocks[3].indent, 1) self.assertEqual(blocks[4].indent, 2) def test_gives_empty_start_zero_indent(self): text = ( EMPTY + LEVEL0 + LIST1 + LIST1 + LIST2 ) code = mwpm.parse(text) blocks = indentblock.generate_indentblock_list(code) self.assertEqual(len(blocks), 5) self.assertEqual(blocks[0].indent, 0) self.assertEqual(blocks[1].indent, 0) self.assertEqual(blocks[2].indent, 1) self.assertEqual(blocks[3].indent, 1) self.assertEqual(blocks[4].indent, 2)
mit
sauloal/pycluster
pypy-1.9_64/lib-python/2.7/distutils/tests/test_check.py
14
3546
"""Tests for distutils.command.check.""" import unittest from test.test_support import run_unittest from distutils.command.check import check, HAS_DOCUTILS from distutils.tests import support from distutils.errors import DistutilsSetupError class CheckTestCase(support.LoggingSilencer, support.TempdirManager, unittest.TestCase): def _run(self, metadata=None, **options): if metadata is None: metadata = {} pkg_info, dist = self.create_dist(**metadata) cmd = check(dist) cmd.initialize_options() for name, value in options.items(): setattr(cmd, name, value) cmd.ensure_finalized() cmd.run() return cmd def test_check_metadata(self): # let's run the command with no metadata at all # by default, check is checking the metadata # should have some warnings cmd = self._run() self.assertEqual(cmd._warnings, 2) # now let's add the required fields # and run it again, to make sure we don't get # any warning anymore metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'xxx', 'name': 'xxx', 'version': 'xxx'} cmd = self._run(metadata) self.assertEqual(cmd._warnings, 0) # now with the strict mode, we should # get an error if there are missing metadata self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1}) # and of course, no error when all metadata are present cmd = self._run(metadata, strict=1) self.assertEqual(cmd._warnings, 0) def test_check_document(self): if not HAS_DOCUTILS: # won't test without docutils return pkg_info, dist = self.create_dist() cmd = check(dist) # let's see if it detects broken rest broken_rest = 'title\n===\n\ntest' msgs = cmd._check_rst_data(broken_rest) self.assertEqual(len(msgs), 1) # and non-broken rest rest = 'title\n=====\n\ntest' msgs = cmd._check_rst_data(rest) self.assertEqual(len(msgs), 0) def test_check_restructuredtext(self): if not HAS_DOCUTILS: # won't test without docutils return # let's see if it detects broken rest in long_description broken_rest = 'title\n===\n\ntest' pkg_info, dist = self.create_dist(long_description=broken_rest) cmd = check(dist) cmd.check_restructuredtext() self.assertEqual(cmd._warnings, 1) # let's see if we have an error with strict=1 metadata = {'url': 'xxx', 'author': 'xxx', 'author_email': 'xxx', 'name': 'xxx', 'version': 'xxx', 'long_description': broken_rest} self.assertRaises(DistutilsSetupError, self._run, metadata, **{'strict': 1, 'restructuredtext': 1}) # and non-broken rest metadata['long_description'] = 'title\n=====\n\ntest' cmd = self._run(metadata, strict=1, restructuredtext=1) self.assertEqual(cmd._warnings, 0) def test_check_all(self): metadata = {'url': 'xxx', 'author': 'xxx'} self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1, 'restructuredtext': 1}) def test_suite(): return unittest.makeSuite(CheckTestCase) if __name__ == "__main__": run_unittest(test_suite())
mit
aferr/TimingCompartments
src/mem/slicc/symbols/State.py
60
1754
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from slicc.symbols.Symbol import Symbol class State(Symbol): def __repr__(self): return "[State: %s]" % self.ident __all__ = [ "State" ]
bsd-3-clause
htzy/bigfour
lms/djangoapps/courseware/management/commands/export_course.py
62
3095
""" A Django command that exports a course to a tar.gz file. If <filename> is '-', it pipes the file to stdout """ import os import re import shutil import tarfile from tempfile import mktemp, mkdtemp from textwrap import dedent from path import path from django.core.management.base import BaseCommand, CommandError from xmodule.modulestore.django import modulestore from xmodule.modulestore.xml_exporter import export_course_to_xml from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey class Command(BaseCommand): """ Export a course to XML. The output is compressed as a tar.gz file """ args = "<course_id> <output_filename>" help = dedent(__doc__).strip() def handle(self, *args, **options): course_key, filename, pipe_results = self._parse_arguments(args) export_course_to_tarfile(course_key, filename) results = self._get_results(filename) if pipe_results else None return results def _parse_arguments(self, args): """Parse command line arguments""" try: course_key = CourseKey.from_string(args[0]) filename = args[1] except InvalidKeyError: raise CommandError("Unparsable course_id") except IndexError: raise CommandError("Insufficient arguments") # If filename is '-' save to a temp file pipe_results = False if filename == '-': filename = mktemp() pipe_results = True return course_key, filename, pipe_results def _get_results(self, filename): """Load results from file""" with open(filename) as f: results = f.read() os.remove(filename) return results def export_course_to_tarfile(course_key, filename): """Exports a course into a tar.gz file""" tmp_dir = mkdtemp() try: course_dir = export_course_to_directory(course_key, tmp_dir) compress_directory(course_dir, filename) finally: shutil.rmtree(tmp_dir, ignore_errors=True) def export_course_to_directory(course_key, root_dir): """Export course into a directory""" store = modulestore() course = store.get_course(course_key) if course is None: raise CommandError("Invalid course_id") # The safest characters are A-Z, a-z, 0-9, <underscore>, <period> and <hyphen>. # We represent the first four with \w. # TODO: Once we support courses with unicode characters, we will need to revisit this. replacement_char = u'-' course_dir = replacement_char.join([course.id.org, course.id.course, course.id.run]) course_dir = re.sub(r'[^\w\.\-]', replacement_char, course_dir) export_course_to_xml(store, None, course.id, root_dir, course_dir) export_dir = path(root_dir) / course_dir return export_dir def compress_directory(directory, filename): """Compress a directory into a tar.gz file""" mode = 'w:gz' name = path(directory).name with tarfile.open(filename, mode) as tar_file: tar_file.add(directory, arcname=name)
agpl-3.0
stephanekirsch/e-colle
accueil/views.py
1
8473
#-*- coding: utf-8 -*- from django.http import HttpResponseForbidden, Http404, HttpResponse from django.shortcuts import render, redirect, get_object_or_404 from django.contrib.auth import logout from django.contrib.auth.decorators import login_required from accueil.models import Classe, Matiere, Colleur, Message, Destinataire, Eleve, Config, Prof from accueil.forms import UserForm, UserProfprincipalForm, SelectMessageForm, EcrireForm, ReponseForm from django.contrib import messages as messagees from ecolle.settings import IP_FILTRE_ADMIN, IP_FILTRE_ADRESSES import re import qrcode as qr from django.db.models import Q from io import BytesIO def home(request): """Renvoie la vue d'accueil ou, si l'utilisateur est déjà identifié, redirige vers la section adéquate""" user=request.user if user.is_authenticated: if user.username=="admin": return redirect('action_admin') elif user.username=="Secrétariat": return redirect('action_secret') elif user.colleur: return redirect('action_colleur') elif user.eleve: return redirect('action_eleve') classes=Classe.objects.all() matieres=list(Matiere.objects.all()) for i in range(len(matieres)-1,0,-1): if matieres[i].nom.lower() == matieres[i-1].nom.lower(): matieres.pop(i) show_admin=True if IP_FILTRE_ADMIN: show_admin=False user_ip = request.META['REMOTE_ADDR'] for ip in IP_FILTRE_ADRESSES: authenticated_by_ip = re.compile(ip).match(user_ip) if authenticated_by_ip: show_admin = True break return render(request,'accueil/home.html',{'classes':classes,'matieres':matieres,'show_admin':show_admin}) def deconnexion(request): """Déconnecte l'utilisateur courant et redirige vers la page d'accueil""" logout(request) return redirect('accueil') @login_required(login_url='accueil') def profil(request): """Renvoie la vue du profil où on peut modifier son email et/ou son mot de passe""" user=request.user if not user.is_authenticated: return HttpResponseForbidden("Vous devez être connecté pour accéder à cette page") profprincipal = bool(user.colleur and Classe.objects.filter(profprincipal=user.colleur)) if profprincipal: classes = Classe.objects.filter(profprincipal=user.colleur) initial = {'email':user.email} for classe in classes: initial["{}_groupe".format(classe.pk)] = Colleur.objects.filter(colleurprof__classe=classe,colleurprof__modifgroupe=True) initial["{}_colloscope".format(classe.pk)] = Colleur.objects.filter(colleurprof__classe=classe,colleurprof__modifcolloscope=True) form = UserProfprincipalForm(user.colleur,classes,request.POST or None,instance = user, initial = initial) if form.is_valid(): form.save() return redirect('accueil') else: form=UserForm(request.POST or None,instance = user) if form.is_valid(): form.save() return redirect('accueil') return render(request,"accueil/profil.html",{'form':form}) @login_required(login_url='accueil') def messages(request): """Renvoie vers la vue des messages""" form = SelectMessageForm(request.user,request.POST or None) if form.is_valid(): form.save() return redirect('messages') peut_composer = True if request.user.eleve: peut_composer = Config.objects.get_config().message_eleves return render(request,"accueil/messages.html",{'form':form,'peut_composer':peut_composer,'nonvide':form.fields['message'].queryset.exists()}) @login_required(login_url='accueil') def message(request,id_message): """Renvoie vers la vue du message dont l'id est id_message""" message = Message.objects.filter(pk=id_message).filter(Q(auteur = request.user, hasAuteur = True) | Q(messagerecu__user = request.user)) if not message.exists(): raise Http404("Message non trouvé") message = message.first() repondre = True envoye = False if message.auteur == request.user: # si c'est un message envoyé envoye = True if request.user.eleve: # on peut répondre, sauf si on est élève et que les élèves n'ont le droit que de répondre repondre = Config.objects.get_config().message_eleves else: # si c'est un message reçu destinataire = Destinataire.objects.get(message = message,user=request.user) if not destinataire.lu: # on met à jour le destinataire message.luPar += str(request.user) + "; " message.save() destinataire.lu=True destinataire.save() if request.user.eleve and destinataire.reponses and not Config.objects.get_config().message_eleves: repondre = False if message.auteur.username in ['admin','Secrétariat']: repondre = False return render(request,"accueil/message.html",{'message':message,'repondre':repondre,'envoye':envoye}) @login_required(login_url='accueil') def ecrire(request): """Renvoie vers la vue d'écriture d'un message """ if request.user.eleve and not Config.objects.get_config().message_eleves: return HttpResponseForbidden("Vous n'avez pas le droit d'écrire une message") message = Message(auteur=request.user) form=EcrireForm(request.user,request.POST or None,request.FILES or None, instance = message) if form.is_valid(): form.save() messagees.error(request, "Message envoyé") return redirect('messages') return render(request,"accueil/ecrire.html",{'form':form}) @login_required(login_url='accueil') def repondre(request,message_id): """Renvoie vers la vue de réponse au message dont l'id est message_id""" message = get_object_or_404(Message, pk=message_id) if message.auteur == request.user: # on ne peut que "répondre à tous" à un message qu'on a envoyé raise Http404 destinataire = get_object_or_404(Destinataire,message=message,user=request.user) if request.user.eleve and destinataire.reponses and not Config.objects.get_config().message_eleves or message.auteur.username in ['admin','Secrétariat']: return HttpResponseForbidden("Vous n'avez pas le droit de répondre") reponse = Message(auteur=request.user, listedestinataires=str(message.auteur), titre = "Re: "+ message.titre, corps = (">"+message.corps.strip().replace("\n","\n>")+"\n")) form = ReponseForm(message, False, request.user, request.POST or None, request.FILES or None, initial = {'destinataire': reponse.listedestinataires }, instance = reponse) if form.is_valid(): form.save() messagees.error(request, "Message envoyé") destinataire.reponses +=1 destinataire.save() return redirect('messages') return render(request,"accueil/repondre.html",{'form':form,'message':message}) @login_required(login_url='accueil') def repondreatous(request,message_id): """Renvoie vers la vue de réponse au message dont l'id est message_id""" message = Message.objects.filter(pk=message_id).filter(Q(auteur = request.user, hasAuteur = True) | Q(messagerecu__user = request.user)) if not message.exists(): raise Http404("Message non trouvé") message = message.first() destinataires = list(message.messagerecu.all()) if message.auteur == request.user: # si on répond à un message qu'on a envoyé if request.user.eleve and not Config.objects.get_config().message_eleves: return HttpResponseForbidden("Vous n'avez pas le droit de répondre") else: desti = get_object_or_404(Destinataire,message=message,user=request.user) if request.user.eleve and desti.reponses and not Config.objects.get_config().message_eleves: return HttpResponseForbidden("Vous n'avez pas le droit de répondre") destinataires.append(Destinataire(user=message.auteur,message=None)) listedestinataires = "; ".join([str(desti.user) for desti in destinataires]) reponse = Message(auteur=request.user , listedestinataires=listedestinataires, titre = "Re: "+ message.titre, corps = (">"+message.corps.strip().replace("\n","\n>")+"\n")) form = ReponseForm(message, destinataires, request.user, request.POST or None, request.FILES or None, initial = {"destinataire": listedestinataires}, instance = reponse) if form.is_valid(): form.save() messagees.error(request, "Message envoyé") if message.auteur != request.user: desti.reponses +=1 desti.save() return redirect('messages') return render(request,"accueil/repondre.html",{'form':form,'message':message}) @login_required(login_url='accueil') def qrcode(request): return render(request,"accueil/qrcode.html") @login_required(login_url='accueil') def qrcodepng(request): url = request.build_absolute_uri('/') img = qr.make(url) buffer = BytesIO() img.save(buffer,format="PNG") response = HttpResponse(content_type='image/png') response.write(buffer.getvalue()) buffer.close() return response
agpl-3.0
erdincay/pyload
module/plugins/hoster/EuroshareEu.py
6
2197
# -*- coding: utf-8 -*- import re from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo class EuroshareEu(SimpleHoster): __name__ = "EuroshareEu" __type__ = "hoster" __version__ = "0.30" __status__ = "testing" __pattern__ = r'http://(?:www\.)?euroshare\.(eu|sk|cz|hu|pl)/file/.+' __config__ = [("use_premium", "bool", "Use premium account if available", True)] __description__ = """Euroshare.eu hoster plugin""" __license__ = "GPLv3" __authors__ = [("zoidberg", "[email protected]")] INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>' OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Požadovaná stránka neexistuje!' LINK_FREE_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"' DL_LIMIT_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor' ERROR_PATTERN = r'href="/customer-zone/login/"' URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")] def handle_premium(self, pyfile): if self.ERROR_PATTERN in self.html: self.account.relogin(self.user) self.retry(reason=_("User not logged in")) self.link = pyfile.url.rstrip('/') + "/download/" check = self.check_download({'login': re.compile(self.ERROR_PATTERN), 'json' : re.compile(r'\{"status":"error".*?"message":"(.*?)"')}) if check == "login" or (check == "json" and self.last_check.group(1) == "Access token expired"): self.account.relogin(self.user) self.retry(reason=_("Access token expired")) elif check == "json": self.fail(self.last_check.group(1)) def handle_free(self, pyfile): if re.search(self.DL_LIMIT_PATTERN, self.html): self.wait(5 * 60, 12, _("Download limit reached")) m = re.search(self.LINK_FREE_PATTERN, self.html) if m is None: self.error(_("LINK_FREE_PATTERN not found")) self.link = "http://euroshare.eu%s" % m.group(1) getInfo = create_getInfo(EuroshareEu)
gpl-3.0
sysalexis/kbengine
kbe/res/scripts/common/Lib/test/test_importlib/source/test_source_encoding.py
81
5396
from .. import util from . import util as source_util machinery = util.import_importlib('importlib.machinery') import codecs import importlib.util import re import sys import types # Because sys.path gets essentially blanked, need to have unicodedata already # imported for the parser to use. import unicodedata import unittest import warnings CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) class EncodingTest: """PEP 3120 makes UTF-8 the default encoding for source code [default encoding]. PEP 263 specifies how that can change on a per-file basis. Either the first or second line can contain the encoding line [encoding first line] encoding second line]. If the file has the BOM marker it is considered UTF-8 implicitly [BOM]. If any encoding is specified it must be UTF-8, else it is an error [BOM and utf-8][BOM conflict]. """ variable = '\u00fc' character = '\u00c9' source_line = "{0} = '{1}'\n".format(variable, character) module_name = '_temp' def run_test(self, source): with source_util.create_modules(self.module_name) as mapping: with open(mapping[self.module_name], 'wb') as file: file.write(source) loader = self.machinery.SourceFileLoader(self.module_name, mapping[self.module_name]) return self.load(loader) def create_source(self, encoding): encoding_line = "# coding={0}".format(encoding) assert CODING_RE.match(encoding_line) source_lines = [encoding_line.encode('utf-8')] source_lines.append(self.source_line.encode(encoding)) return b'\n'.join(source_lines) def test_non_obvious_encoding(self): # Make sure that an encoding that has never been a standard one for # Python works. encoding_line = "# coding=koi8-r" assert CODING_RE.match(encoding_line) source = "{0}\na=42\n".format(encoding_line).encode("koi8-r") self.run_test(source) # [default encoding] def test_default_encoding(self): self.run_test(self.source_line.encode('utf-8')) # [encoding first line] def test_encoding_on_first_line(self): encoding = 'Latin-1' source = self.create_source(encoding) self.run_test(source) # [encoding second line] def test_encoding_on_second_line(self): source = b"#/usr/bin/python\n" + self.create_source('Latin-1') self.run_test(source) # [BOM] def test_bom(self): self.run_test(codecs.BOM_UTF8 + self.source_line.encode('utf-8')) # [BOM and utf-8] def test_bom_and_utf_8(self): source = codecs.BOM_UTF8 + self.create_source('utf-8') self.run_test(source) # [BOM conflict] def test_bom_conflict(self): source = codecs.BOM_UTF8 + self.create_source('latin-1') with self.assertRaises(SyntaxError): self.run_test(source) class EncodingTestPEP451(EncodingTest): def load(self, loader): module = types.ModuleType(self.module_name) module.__spec__ = importlib.util.spec_from_loader(self.module_name, loader) loader.exec_module(module) return module Frozen_EncodingTestPEP451, Source_EncodingTestPEP451 = util.test_both( EncodingTestPEP451, machinery=machinery) class EncodingTestPEP302(EncodingTest): def load(self, loader): with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) return loader.load_module(self.module_name) Frozen_EncodingTestPEP302, Source_EncodingTestPEP302 = util.test_both( EncodingTestPEP302, machinery=machinery) class LineEndingTest: r"""Source written with the three types of line endings (\n, \r\n, \r) need to be readable [cr][crlf][lf].""" def run_test(self, line_ending): module_name = '_temp' source_lines = [b"a = 42", b"b = -13", b''] source = line_ending.join(source_lines) with source_util.create_modules(module_name) as mapping: with open(mapping[module_name], 'wb') as file: file.write(source) loader = self.machinery.SourceFileLoader(module_name, mapping[module_name]) return self.load(loader, module_name) # [cr] def test_cr(self): self.run_test(b'\r') # [crlf] def test_crlf(self): self.run_test(b'\r\n') # [lf] def test_lf(self): self.run_test(b'\n') class LineEndingTestPEP451(LineEndingTest): def load(self, loader, module_name): module = types.ModuleType(module_name) module.__spec__ = importlib.util.spec_from_loader(module_name, loader) loader.exec_module(module) return module Frozen_LineEndingTestPEP451, Source_LineEndingTestPEP451 = util.test_both( LineEndingTestPEP451, machinery=machinery) class LineEndingTestPEP302(LineEndingTest): def load(self, loader, module_name): with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) return loader.load_module(module_name) Frozen_LineEndingTestPEP302, Source_LineEndingTestPEP302 = util.test_both( LineEndingTestPEP302, machinery=machinery) if __name__ == '__main__': unittest.main()
lgpl-3.0
Datera/cinder
cinder/tests/unit/volume/drivers/dell_emc/sc/test_fc.py
3
48496
# Copyright (c) 2014 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder import context from cinder import exception from cinder import test from cinder.tests.unit import fake_constants as fake from cinder.volume.drivers.dell_emc.sc import storagecenter_api from cinder.volume.drivers.dell_emc.sc import storagecenter_fc # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. @mock.patch.object(storagecenter_api.HttpClient, '__init__', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'open_connection') @mock.patch.object(storagecenter_api.SCApi, 'close_connection') class DellSCSanFCDriverTestCase(test.TestCase): VOLUME = {u'instanceId': u'64702.4829', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 4831, u'objectType': u'ScVolume', u'index': 4829, u'volumeFolderPath': u'dopnstktst/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'statusMessage': u'', u'status': u'Down', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'opnstktst', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe0000000000000012df', u'active': False, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-000012df', u'replayAllowed': False, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'opnstktst/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'opnstktst', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell EMC Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } MAPPING = {u'instanceId': u'64702.2183', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'lunUsed': [1], u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.4829', u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'objectType': u'ScVolume'}, u'connectivity': u'Up', u'readOnly': False, u'objectType': u'ScMappingProfile', u'hostCache': False, u'mappedVia': u'Server', u'mapCount': 2, u'instanceName': u'4829-47', u'lunRequested': u'N/A' } def setUp(self): super(DellSCSanFCDriverTestCase, self).setUp() # configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "pwd" self.configuration.dell_sc_ssn = 64702 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' self.configuration.dell_sc_api_port = 3033 self._context = context.get_admin_context() self.driver = storagecenter_fc.SCFCDriver( configuration=self.configuration) self.driver.do_setup(None) self.driver._stats = {'QoS_support': False, 'volume_backend_name': 'dell-1', 'free_capacity_gb': 12123, 'driver_version': '1.0.1', 'total_capacity_gb': 12388, 'reserved_percentage': 0, 'vendor_name': 'Dell', 'storage_protocol': 'FC'} # Start with none. Add in the specific tests later. # Mock tests bozo this. self.driver.backends = None self.driver.replication_enabled = False self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d' self.volume_name = "volume" + self.volid self.connector = {'ip': '192.168.0.77', 'host': 'cinderfc-vm', 'wwnns': ['20000024ff30441c', '20000024ff30441d'], 'initiator': 'iqn.1993-08.org.debian:01:e1b1312f9e1', 'wwpns': ['21000024ff30441c', '21000024ff30441d']} @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) def test_initialize_connection(self, mock_find_wwns, mock_map_volume, mock_get_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.initialize_connection(volume, connector) expected = {'data': {'discard': True, 'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_discovered': True, 'target_lun': 1, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False) mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_api.SCApi, 'find_wwns') @mock.patch.object(storagecenter_fc.SCFCDriver, 'initialize_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_initialize_connection_live_vol(self, mock_get_live_volume, mock_initialize_secondary, mock_find_wwns, mock_is_live_volume, mock_map_volume, mock_get_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102, 'secondaryRole': 'Secondary'} mock_is_live_volume.return_value = True mock_find_wwns.return_value = ( 1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_initialize_secondary.return_value = ( 1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'], {u'21000024FF30441E': [u'5000D31000FCBE36'], u'21000024FF30441F': [u'5000D31000FCBE3E']}) mock_get_live_volume.return_value = sclivevol res = self.driver.initialize_connection(volume, connector) expected = {'data': {'discard': True, 'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D'], u'21000024FF30441E': [u'5000D31000FCBE36'], u'21000024FF30441F': [u'5000D31000FCBE3E']}, 'target_discovered': True, 'target_lun': 1, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35', u'5000D31000FCBE3E', u'5000D31000FCBE36']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True) mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_api.SCApi, 'find_wwns') @mock.patch.object(storagecenter_fc.SCFCDriver, 'initialize_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_initialize_connection_live_vol_afo(self, mock_get_live_volume, mock_initialize_secondary, mock_find_wwns, mock_is_live_volume, mock_map_volume, mock_get_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} scvol = {'instanceId': '102.101'} mock_find_volume.return_value = scvol mock_get_volume.return_value = scvol connector = self.connector sclivevol = {'instanceId': '101.10001', 'primaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'primaryScSerialNumber': 102, 'secondaryVolume': {'instanceId': '101.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 101, 'secondaryRole': 'Activated'} mock_is_live_volume.return_value = True mock_find_wwns.return_value = ( 1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_get_live_volume.return_value = sclivevol res = self.driver.initialize_connection(volume, connector) expected = {'data': {'discard': True, 'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_discovered': True, 'target_lun': 1, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice self.assertFalse(mock_initialize_secondary.called) mock_find_volume.assert_called_once_with( fake.VOLUME_ID, '101.101', True) mock_get_volume.assert_called_once_with('102.101') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_no_wwns(self, mock_find_wwns, mock_map_volume, mock_get_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_no_server(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_vol_not_found(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_map_vol_fail(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): # Test case where map_volume returns None (no mappings) volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) def test_initialize_secondary(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) self.assertEqual(find_wwns_ret, ret) def test_initialize_secondary_create_server(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=None) mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) self.assertEqual(find_wwns_ret, ret) def test_initialize_secondary_no_server(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=None) mock_api.create_server = mock.MagicMock(return_value=None) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) def test_initialize_secondary_map_fail(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock(return_value=None) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) def test_initialize_secondary_vol_not_found(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) mock_api.get_volume = mock.MagicMock(return_value=None) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') def test_force_detach(self, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol mock_unmap_all.return_value = True volume = {'id': fake.VOLUME_ID} res = self.driver.force_detach(volume) mock_unmap_all.assert_called_once_with(scvol) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') def test_force_detach_fail(self, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol mock_unmap_all.return_value = False volume = {'id': fake.VOLUME_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.force_detach, volume) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_fc.SCFCDriver, 'terminate_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_force_detach_lv(self, mock_get_live_volume, mock_terminate_secondary, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = True scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol sclivevol = {'instandId': '12345.1.0'} mock_get_live_volume.return_value = sclivevol mock_terminate_secondary.return_value = True volume = {'id': fake.VOLUME_ID} mock_unmap_all.return_value = True res = self.driver.force_detach(volume) mock_unmap_all.assert_called_once_with(scvol) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res) self.assertEqual(1, mock_terminate_secondary.call_count) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') def test_force_detach_vol_not_found(self, mock_is_live_vol, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False mock_find_volume.return_value = None volume = {'id': fake.VOLUME_ID} res = self.driver.force_detach(volume) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(storagecenter_fc.SCFCDriver, 'force_detach') def test_terminate_connection_none_connector(self, mock_force_detach, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} self.driver.terminate_connection(volume, None) mock_force_detach.assert_called_once_with(volume) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_fc.SCFCDriver, 'terminate_secondary') def test_terminate_connection_live_vol(self, mock_terminate_secondary, mock_is_live_vol, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector mock_terminate_secondary.return_value = (None, [], {}) mock_is_live_vol.return_value = True res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_server(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_volume(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_wwns(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.terminate_connection(volume, connector) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=False) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_failure(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=0) def test_terminate_connection_vol_count_zero(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): # Test case where get_volume_count is zero volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'data': {'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') def test_terminate_secondary(self, mock_close_connection, mock_open_connection, mock_init): mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) mock_api.find_wwns = mock.MagicMock(return_value=(None, [], {})) mock_api.unmap_volume = mock.MagicMock(return_value=True) sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} ret = self.driver.terminate_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) @mock.patch.object(storagecenter_api.SCApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh(self, mock_get_storage_usage, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) mock_get_storage_usage.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_get_volume_stats_no_refresh(self, mock_get_storage_usage, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(False) self.assertEqual('FC', stats['storage_protocol']) mock_get_storage_usage.assert_not_called()
apache-2.0
nayomal/bloom
bloom/generators/__init__.py
5
2000
# Software License Agreement (BSD License) # # Copyright (c) 2013, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from .common import BloomGenerator from .common import GeneratorError from .common import list_generators from .common import load_generator from .common import resolve_dependencies from .common import update_rosdep __all__ = [ 'BloomGenerator', 'GeneratorError', 'list_generators', 'load_generator', 'resolve_dependencies', 'update_rosdep' ]
bsd-3-clause
MikkelSchubert/paleomix
paleomix/nodes/fastqc.py
1
1586
#!/usr/bin/env python3 """ FastQC - A quality control analysis tool for high throughput sequencing data https://github.com/s-andrews/FastQC """ import os import re from paleomix.common.command import AtomicCmd, InputFile, OutputFile from paleomix.common.versions import Requirement from paleomix.node import CommandNode # File extensions striped by FASTQ for output filenames _FASTQC_EXCLUDED_EXTENSIONS = re.compile( r"(\.gz|\.bz2|\.txt|\.fastq|\.fq|\.csfastq|\.sam|\.bam)+$" ) class FastQCNode(CommandNode): def __init__(self, in_file, out_folder, options={}, dependencies=()): out_prefix = _FASTQC_EXCLUDED_EXTENSIONS.sub("", os.path.basename(in_file)) command = AtomicCmd( ["fastqc", InputFile(in_file)], extra_files=[ OutputFile(os.path.join(out_folder, out_prefix + "_fastqc.html")), OutputFile(os.path.join(out_folder, out_prefix + "_fastqc.zip")), ], requirements=[ Requirement( name="FastQC", call=["fastqc", "--version"], search=r"FastQC v(\d+).(\d+).(\d+)", ), ], ) command.merge_options( user_options=options, fixed_options={ "--outdir": "%(TEMP_DIR)s", "--dir": "%(TEMP_DIR)s", }, ) CommandNode.__init__( self, command=command, description="fastQC of {}".format(in_file), dependencies=dependencies, )
mit
lbartoletti/QGIS
python/plugins/processing/algs/grass7/ext/r_statistics.py
36
2298
# -*- coding: utf-8 -*- """ *************************************************************************** r_statistics.py --------------- Date : September 2017 Copyright : (C) 2017 by Médéric Ribreux Email : medspx at medspx dot fr *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Médéric Ribreux' __date__ = 'September 2017' __copyright__ = '(C) 2017, Médéric Ribreux' from qgis.core import QgsProcessingParameterString from processing.algs.grass7.Grass7Utils import Grass7Utils def processCommand(alg, parameters, context, feedback): # We had a new "output" parameter out = 'output{}'.format(alg.uniqueSuffix) p = QgsProcessingParameterString('~output', None, out, False, False) alg.addParameter(p) # We need to remove all outputs alg.processCommand(parameters, context, feedback, True) # Then we add a new command for treating results calcExpression = 'correctedoutput{}=@{}'.format( alg.uniqueSuffix, out) command = 'r.mapcalc expression="{}"'.format(calcExpression) alg.commands.append(command) def processOutputs(alg, parameters, context, feedback): createOpt = alg.parameterAsString(parameters, alg.GRASS_RASTER_FORMAT_OPT, context) metaOpt = alg.parameterAsString(parameters, alg.GRASS_RASTER_FORMAT_META, context) # Export the results from correctedoutput grassName = 'correctedoutput{}'.format(alg.uniqueSuffix) fileName = alg.parameterAsOutputLayer( parameters, 'routput', context) outFormat = Grass7Utils.getRasterFormatFromFilename(fileName) alg.exportRasterLayer(grassName, fileName, True, outFormat, createOpt, metaOpt)
gpl-2.0
kevint2u/audio-collector
server/node_modules/binaryjs/node_modules/binarypack/node_modules/buffercursor/node_modules/verror/node_modules/extsprintf/deps/javascriptlint/javascriptlint/util.py
28
4517
# vim: ts=4 sw=4 expandtab import cgi import os.path import re import unittest _identifier = re.compile('^[A-Za-z_$][A-Za-z0-9_$]*$') _contenttypes = ( 'text/javascript', 'text/ecmascript', 'application/javascript', 'application/ecmascript', 'application/x-javascript', ) class JSVersion: def __init__(self, jsversion, is_e4x): self.version = jsversion self.e4x = is_e4x def __eq__(self, other): return self.version == other.version and \ self.e4x == other.e4x @classmethod def default(klass): return klass('default', False) @classmethod def fromattr(klass, attr, default_version=None): if attr.get('type'): return klass.fromtype(attr['type']) if attr.get('language'): return klass.fromlanguage(attr['language']) return default_version @classmethod def fromtype(klass, type_): typestr, typeparms = cgi.parse_header(type_) if typestr.lower() in _contenttypes: jsversion = typeparms.get('version', 'default') is_e4x = typeparms.get('e4x') == '1' return klass(jsversion, is_e4x) return None @classmethod def fromlanguage(klass, language): if language.lower() in ('javascript', 'livescript', 'mocha'): return klass.default() # Simplistic parsing of javascript/x.y if language.lower().startswith('javascript'): language = language[len('javascript'):] if language.replace('.', '').isdigit(): return klass(language, False) return None def isidentifier(text): return _identifier.match(text) def _encode_error_keyword(s): s = s.replace('\\', '\\\\') s = s.replace('"', '\\"') s = s.replace("'", "\\'") s = s.replace("\t", "\\t") s = s.replace("\r", "\\r") s = s.replace("\n", "\\n") return s def format_error(output_format, path, line, col, errname, errdesc): errprefix = 'warning' #TODO replacements = { '__FILE__': path, '__FILENAME__': os.path.basename(path), '__LINE__': str(line+1), '__COL__': str(col), '__ERROR__': '%s: %s' % (errprefix, errdesc), '__ERROR_NAME__': errname, '__ERROR_PREFIX__': errprefix, '__ERROR_MSG__': errdesc, '__ERROR_MSGENC__': errdesc, } formatted_error = output_format # If the output format starts with encode:, all of the keywords should be # encoded. if formatted_error.startswith('encode:'): formatted_error = formatted_error[len('encode:'):] encoded_keywords = replacements.keys() else: encoded_keywords = ['__ERROR_MSGENC__'] for keyword in encoded_keywords: replacements[keyword] = _encode_error_keyword(replacements[keyword]) regexp = '|'.join(replacements.keys()) return re.sub(regexp, lambda match: replacements[match.group(0)], formatted_error) class TestUtil(unittest.TestCase): def testIdentifier(self): assert not isidentifier('') assert not isidentifier('0a') assert not isidentifier('a b') assert isidentifier('a') assert isidentifier('$0') def testEncodeKeyword(self): self.assertEquals(_encode_error_keyword(r'normal text'), 'normal text') self.assertEquals(_encode_error_keyword(r'a\b'), r'a\\b') self.assertEquals(_encode_error_keyword(r"identifier's"), r"identifier\'s") self.assertEquals(_encode_error_keyword(r'"i"'), r'\"i\"') self.assertEquals(_encode_error_keyword('a\tb'), r'a\tb') self.assertEquals(_encode_error_keyword('a\rb'), r'a\rb') self.assertEquals(_encode_error_keyword('a\nb'), r'a\nb') def testFormattedError(self): self.assertEquals(format_error('__FILE__', '__LINE__', 1, 2, 'name', 'desc'), '__LINE__') self.assertEquals(format_error('__FILE__', r'c:\my\file', 1, 2, 'name', 'desc'), r'c:\my\file') self.assertEquals(format_error('encode:__FILE__', r'c:\my\file', 1, 2, 'name', 'desc'), r'c:\\my\\file') self.assertEquals(format_error('__ERROR_MSGENC__', r'c:\my\file', 1, 2, 'name', r'a\b'), r'a\\b') self.assertEquals(format_error('encode:__ERROR_MSGENC__', r'c:\my\file', 1, 2, 'name', r'a\b'), r'a\\b') if __name__ == '__main__': unittest.main()
mit
yfdyh000/kuma
kuma/core/managers.py
2
6153
"""Extras for django-taggit Includes: - Handle tag namespaces (eg. tech:javascript, profile:interest:homebrewing) TODO: - Permissions for tag namespaces (eg. system:* is superuser-only) - Machine tag assists """ from datetime import date, timedelta from django.db import models from django.db.models.fields import BLANK_CHOICE_DASH from django.contrib.auth.models import AnonymousUser from taggit.managers import TaggableManager, _TaggableManager from taggit.models import Tag from taggit.utils import edit_string_for_tags, require_instance_manager class NamespacedTaggableManager(TaggableManager): """TaggableManager with tag namespace support""" # HACK: Yes, I really do want to allow tags in admin change lists flatchoices = None # HACK: This is expensive, too, but should help with list_filter in admin def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH): return [(t.id, t.name) for t in Tag.objects.all()] def __init__(self, *args, **kwargs): kwargs['manager'] = _NamespacedTaggableManager super(NamespacedTaggableManager, self).__init__(*args, **kwargs) class _NamespacedTaggableManager(_TaggableManager): def __unicode__(self): """Return the list of tags as an editable string. Expensive: Does a DB query for the tags""" # HACK: Yes, I really do want to allow tags in admin change lists return edit_string_for_tags(self.all()) def all_ns(self, namespace=None): """Fetch tags by namespace, or collate all into namespaces""" tags = self.all() if namespace == '': # Empty namespace is special - just look for absence of ':' return tags.exclude(name__contains=':') if namespace is not None: # Namespace requested, so generate filtered set results = [] for tag in tags: if tag.name.startswith(namespace): results.append(tag) return results # No namespace requested, so collate into namespaces ns_tags = {} for tag in tags: (ns, name) = self._parse_ns(tag) if ns not in ns_tags: ns_tags[ns] = [tag] else: ns_tags[ns].append(tag) return ns_tags @require_instance_manager def add_ns(self, namespace, *tags): """Add tags within a namespace""" ns_tags = self._ensure_ns(namespace, tags) super(_NamespacedTaggableManager, self).add(*ns_tags) @require_instance_manager def remove_ns(self, namespace=None, *tags): """Remove tags within a namespace""" ns_tags = self._ensure_ns(namespace, tags) super(_NamespacedTaggableManager, self).remove(*ns_tags) @require_instance_manager def clear_ns(self, namespace=None): """Clear tags within a namespace""" lookup_kwargs = self._lookup_kwargs() lookup_kwargs['tag__name__startswith'] = namespace self.through.objects.filter(**lookup_kwargs).delete() @require_instance_manager def set_ns(self, namespace=None, *tags): """Set tags within a namespace""" self.clear_ns(namespace) self.add_ns(namespace, *tags) def _parse_ns(self, tag): """Extract namespace from tag name. Namespace is tag name text up to and including the last occurrence of ':' """ if (':' in tag.name): (ns, name) = tag.name.rsplit(':', 1) return ('%s:' % ns, name) else: return ('', tag.name) def _ensure_ns(self, namespace, tags): """Ensure each tag name in the list starts with the given namespace""" ns_tags = [] for t in tags: if not t.startswith(namespace): t = '%s%s' % (namespace, t) ns_tags.append(t) return ns_tags def parse_tag_namespaces(tag_list): """Parse a list of tags out into a dict of lists by namespace""" namespaces = {} for tag in tag_list: ns = (':' in tag) and ('%s:' % tag.rsplit(':', 1)[0]) or '' if ns not in namespaces: namespaces[ns] = [] namespaces[ns].append(tag) return namespaces def allows_tag_namespace_for(model_obj, ns, user): """Decide whether a tag namespace is editable by a user""" if user.is_staff or user.is_superuser: # Staff / superuser can manage any tag namespace return True if not ns.startswith('system:'): return True return False def resolve_allowed_tags(model_obj, tags_curr, tags_new, request_user=AnonymousUser): """Given a new set of tags and a user, build a list of allowed new tags with changes accepted only for namespaces where editing is allowed for the user. For disallowed namespaces, this object's current tag set will be imposed. No changes are made; the new tag list is just returned. """ # Produce namespaced sets of current and incoming new tags. ns_tags_curr = parse_tag_namespaces(tags_curr) ns_tags_new = parse_tag_namespaces(tags_new) # Produce a union of all namespaces, current and new tag set all_ns = set(ns_tags_curr.keys() + ns_tags_new.keys()) # Assemble accepted changed tag set according to permissions tags_out = [] for ns in all_ns: if model_obj.allows_tag_namespace_for(ns, request_user): # If the user is allowed this namespace, apply changes by # accepting new tags or lack thereof. if ns in ns_tags_new: tags_out.extend(ns_tags_new[ns]) elif ns in ns_tags_curr: # If the user is not allowed this namespace, carry over # existing tags or lack thereof tags_out.extend(ns_tags_curr[ns]) return tags_out class IPBanManager(models.Manager): def active(self, ip): return self.filter(ip=ip, deleted__isnull=True) def delete_old(self, days=30): cutoff_date = date.today() - timedelta(days=days) old_ip_bans = self.filter(created__lte=cutoff_date) old_ip_bans.delete()
mpl-2.0
erilyth/sugar
src/jarabe/testrunner.py
13
1674
# Copyright (C) 2013, Daniel Narvaez # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import os import sys import subprocess from gi.repository import GLib from sugar3.logger import get_logs_dir def _test_child_watch_cb(pid, condition, log_file): if os.WIFEXITED(condition): log_file.close() sys.exit(os.WEXITSTATUS(condition)) def check_environment(): run_test = os.environ.get("SUGAR_RUN_TEST", None) if run_test is not None: log_path = os.environ.get("SUGAR_TEST_LOG", None) if log_path is None: log_path = os.path.join(get_logs_dir(), "test.log") log_file = open(log_path, "w") else: log_file = open(log_path, "a") test_process = subprocess.Popen(run_test, stdout=log_file, stderr=subprocess.STDOUT, shell=True) GLib.child_watch_add(test_process.pid, _test_child_watch_cb, log_file)
gpl-2.0
BTCDDev/bitcoin
contrib/devtools/optimize-pngs.py
51
3392
#!/usr/bin/env python # Copyright (c) 2014-2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text). #pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text ''' import os import sys import subprocess import hashlib from PIL import Image def file_hash(filename): '''Return hash of raw file contents''' with open(filename, 'rb') as f: return hashlib.sha256(f.read()).hexdigest() def content_hash(filename): '''Return hash of RGBA contents of image''' i = Image.open(filename) i = i.convert('RGBA') data = i.tobytes() return hashlib.sha256(data).hexdigest() pngcrush = 'pngcrush' git = 'git' folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"] basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n') totalSaveBytes = 0 noHashChange = True outputArray = [] for folder in folders: absFolder=os.path.join(basePath, folder) for file in os.listdir(absFolder): extension = os.path.splitext(file)[1] if extension.lower() == '.png': print("optimizing "+file+"..."), file_path = os.path.join(absFolder, file) fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)}; fileMetaMap['contentHashPre'] = content_hash(file_path) pngCrushOutput = "" try: pngCrushOutput = subprocess.check_output( [pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path], stderr=subprocess.STDOUT).rstrip('\n') except: print "pngcrush is not installed, aborting..." sys.exit(0) #verify if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT): print "PNG file "+file+" is corrupted after crushing, check out pngcursh version" sys.exit(1) fileMetaMap['sha256New'] = file_hash(file_path) fileMetaMap['contentHashPost'] = content_hash(file_path) if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']: print "Image contents of PNG file "+file+" before and after crushing don't match" sys.exit(1) fileMetaMap['psize'] = os.path.getsize(file_path) outputArray.append(fileMetaMap) print("done\n"), print "summary:\n+++++++++++++++++" for fileDict in outputArray: oldHash = fileDict['sha256Old'] newHash = fileDict['sha256New'] totalSaveBytes += fileDict['osize'] - fileDict['psize'] noHashChange = noHashChange and (oldHash == newHash) print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n" print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
mit
psychopy/versions
psychopy/experiment/components/envelopegrating/__init__.py
1
15422
#!/usr/bin/env python # -*- coding: utf-8 -*- # Part of the PsychoPy library # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd. # Distributed under the terms of the GNU General Public License (GPL). from __future__ import absolute_import, print_function from builtins import super # provides Py3-style super() using python-future from os import path from psychopy.experiment.components import BaseVisualComponent, Param, getInitVals, _translate # the absolute path to the folder containing this path thisFolder = path.abspath(path.dirname(__file__)) iconFile = path.join(thisFolder, 'envelopegrating.png') tooltip = _translate('Envelope Grating: present cyclic textures including 2nd order envelope stimuli, prebuilt or from a ' 'file') # only use _localized values for label values, nothing functional: _localized = {'carrier': _translate('Carrier texture'), 'ori': _translate('Carrier Orientation'), 'mask': _translate('Mask'), 'sf': _translate('Carrier spatial frequency'), 'phase': _translate('Carrier phase (in cycles)'), 'contrast': _translate('Carrier contrast'), 'texture resolution': _translate('Texture resolution'), 'interpolate': _translate('Interpolate'), 'envelope': _translate('Envelope texture'), 'envsf':_translate('Envelope spatial frequency'), 'envori':_translate('Envelope orientation'), 'envphase':_translate('Envelope phase'), 'moddepth':_translate('Envelope modulation depth'), 'power':_translate('Power to which envelope is raised'), 'beat':_translate('Is modulation a beat'), 'blendmode':_translate('OpenGL blend mode') } class EnvGratingComponent(BaseVisualComponent): """A class for presenting grating stimuli""" def __init__(self, exp, parentName, name='env_grating', carrier='sin', mask='None', sf=1.0, interpolate='linear', units='from exp settings', color='$[1,1,1]', colorSpace='rgb', pos=(0, 0), size=(0.5, 0.5), ori=0, phase=0.0, texRes='128', envelope='sin',envsf=1.0,envori=0.0,envphase=0.0, beat=False, power=1.0, contrast=0.5, moddepth=1.0, blendmode='avg', startType='time (s)', startVal=0.0, stopType='duration (s)', stopVal=1.0, startEstim='', durationEstim=''): super().__init__( exp, parentName, name=name, units=units, color=color, colorSpace=colorSpace, pos=pos, size=size, ori=ori, startType=startType, startVal=startVal, stopType=stopType, stopVal=stopVal, startEstim=startEstim, durationEstim=durationEstim) self.type = 'EnvGrating' self.url = "http://www.psychopy.org/builder/components/EnvelopeGrating.html" self.order = ['carrier', 'mask'] # params self.params['ori'] = Param( ori, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=_translate("Orientation of this stimulus (in deg)"), label=_localized['ori'],categ="Carrier") msg = _translate("The (2D) texture of the background - can be sin, sqr," " sinXsin... or a filename (including path)") self.params['carrier'] = Param( carrier, valType='str', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['carrier'], categ="Carrier") msg = _translate("An image to define the alpha mask (ie shape)- " "gauss, circle... or a filename (including path)") self.params['mask'] = Param( mask, valType='str', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['mask'], categ="Carrier") msg = _translate("Contrast of background carrier") self.params['contrast'] = Param( contrast, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['contrast'], categ="Carrier") msg = _translate("Spatial frequency of background carrier repeats across the " "grating in 1 or 2 dimensions, e.g. 4 or [2,3]") self.params['sf'] = Param( sf, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['sf'], categ="Carrier") msg = _translate("Spatial positioning of the background carrier " "(wraps in range 0-1.0)") self.params['phase'] = Param( phase, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['phase'], categ="Carrier") msg = _translate( "Resolution of the texture for standard ones such as sin, sqr " "etc. For most cases a value of 256 pixels will suffice") self.params['texture resolution'] = Param( texRes, valType='code', allowedVals=['32', '64', '128', '256', '512'], updates='constant', allowedUpdates=[], hint=msg, label=_localized['texture resolution'], categ="Carrier") msg = _translate("How should the image be interpolated if/when " "rescaled") self.params['interpolate'] = Param( interpolate, valType='str', allowedVals=['linear', 'nearest'], updates='constant', allowedUpdates=[], hint=msg, label=_localized['interpolate'], categ="Carrier") msg = _translate("The (2D) texture of the envelope - can be sin, sqr," " sinXsin... or a filename (including path)") self.params['envelope'] = Param( envelope, valType='str', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['envelope'], categ="Envelope") msg = _translate("Spatial frequency of the modulation envelope repeats across the " "grating in 1 or 2 dimensions, e.g. 4 or [2,3]") self.params['envsf'] = Param( envsf, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['envsf'], categ="Envelope") msg = _translate("Spatial positioning of the modulation envelope" "(wraps in range 0-1.0)") self.params['envphase'] = Param( envphase, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['envphase'], categ="Envelope") msg = _translate("Orientation of the modulation envelope" "(wraps in range 0-360)") self.params['envori'] = Param( envori, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['envori'], categ="Envelope") msg = _translate("Modulation depth of modulation envelope") self.params['moddepth'] = Param( moddepth, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['moddepth'], categ="Envelope") msg = _translate("Power of modulation envelope. " "The modulator will be raised to this power " "according to the equation S=cC*(1+mM)^power, " "where C is the carrier and M is the modulator. " "and c and m are there respective contrast and modulation depth. " "Only works with AM envelopes (hence +1) in " "equation. Power is ignored if a beat is requested. " "This is used to obtain the square root of the modulator (power = 0.5) " "which is useful if combining two envelope gratings " "with different carriers and a 180 degree phase shift " "as the resulting combined signal will not " "have any reduction in local contrast at any point in the image. " "This is similar - but not identical to - the method used by " "Landy and Oruc, Vis Res 2002. " "Note overall contrast (apparent carrier contrast) will be altered.") self.params['power'] = Param( moddepth, valType='code', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['power'], categ="Envelope") msg = _translate("Do you want a 'beat'? [beat = carrier*envelope, " "no beat = carrier*(1+envelope), True/False, Y/N]") self.params['beat'] = Param( beat, valType='str', allowedTypes=[], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['beat'], categ="Envelope") msg = _translate("OpenGL Blendmode. Avg is most common mode" " in PsychoPy, add is useful if combining a beat with" " the carrier image or numpy array at point of display") self.params['blendmode'] = Param( blendmode, valType='str', allowedVals=['avg', 'add'], updates='constant', allowedUpdates=['constant', 'set every repeat', 'set every frame'], hint=msg, label=_localized['blendmode'], categ="Basic") def writeInitCode(self, buff): # do we need units code? if self.params['units'].val == 'from exp settings': unitsStr = "" else: unitsStr = "units=%(units)s, " % self.params #buff.writeIndented("from psychopy.visual.secondorder import EnvelopeGrating\n") # replaces variable params with defaults and sets sample updating flag inits = getInitVals(self.params) code = ("%s = visual.EnvelopeGrating(\n" % inits['name'] + " win=win, name='%s',%s\n" % (inits['name'], unitsStr) + " carrier=%(carrier)s, mask=%(mask)s,\n" % inits + " ori=%(ori)s, pos=%(pos)s, size=%(size)s,\n" % inits + " sf=%(sf)s, phase=%(phase)s,\n" % inits + " color=%(color)s, colorSpace=%(colorSpace)s,\n " % inits + " opacity=%(opacity)s, contrast=%(contrast)s,\n" % inits + " texRes=%(texture resolution)s, envelope=%(envelope)s,\n" % inits + " envori=%(envori)s, envsf=%(envsf)s,\n" % inits + " envphase=%(envphase)s, power=%(power)s,\n" % inits + " moddepth=%(moddepth)s, blendmode=%(blendmode)s" %inits ) if self.params['beat'].val in ['Y','y','Yes', 'yes','True','true']: code += ", beat=True" elif self.params['beat'].val in ['N','n','No', 'no','False','false']: code += ", beat=False" else: code += ", beat=%(beat)s" %inits if self.params['interpolate'].val == 'linear': code += ", interpolate=True" else: code += ", interpolate=False" depth = -self.getPosInRoutine() code += ", depth=%.1f)\n" % depth code += "if sys.version[0]=='3' and np.min(win.gamma) == None:\n" code += " logging.warning('Envelope grating in use with no gamma set. Unless you have hardware gamma correction the image will be distorted.')\n" code += "elif np.min(win.gamma) < 1.01:\n" code += " logging.warning('Envelope grating in use with window gamma <= 1.0 or no gamma set at all. Unless you have hardware gamma correction the image will be distorted.')\n" buff.writeIndentedLines(code) def writeRoutineStartCode(self,buff): super().writeRoutineStartCode(buff) #if self.params['blendmode'].val!='default': #buff.writeIndented("__allEnvSaveBlendMode=win.blendMode #required to clean up after %(name)s\n" %self.params) def writeFrameCode(self, buff): """Write the code that will be called every frame """ buff.writeIndented("\n") buff.writeIndented("# *%s* updates\n" % self.params['name']) # writes an if statement to determine whether to draw etc self.writeStartTestCode(buff) buff.writeIndented("%(name)s.setAutoDraw(True)\n" % self.params) #if self.params['blendmode'].val!='default': #buff.writeIndented("%(name)s_SaveBlendMode=win.blendMode\n" %self.params) #buff.writeIndented("win.blendMode=%(blendmode)s\n" %self.params) # to get out of the if statement buff.setIndentLevel(-1, relative=True) # test for stop (only if there was some setting for duration or stop) if self.params['stopVal'].val not in ('', None, -1, 'None'): # writes an if statement to determine whether to draw etc self.writeStopTestCode(buff) buff.writeIndented("%(name)s.setAutoDraw(False)\n" % self.params) #if self.params['blendmode'].val!='default': #buff.writeIndented("win.blendMode=%(name)s_SaveBlendMode\n" % self.params) # to get out of the if statement buff.setIndentLevel(-2, relative=True) # set parameters that need updating every frame # do any params need updating? (this method inherited from _base) if self.checkNeedToUpdate('set every frame'): code = "if %(name)s.status == STARTED: # only update if drawing\n" buff.writeIndented(code % self.params) buff.setIndentLevel(+1, relative=True) # to enter the if block self.writeParamUpdates(buff, 'set every frame') buff.setIndentLevel(-1, relative=True) # to exit the if block def writeRoutineEndCode(self, buff): super().writeRoutineEndCode(buff) # adds start/stop times to data #if self.params['blendmode'].val!='default': #buff.writeIndented("win.blendMode=__allEnvSaveBlendMode #clean up for %(name)s\n" %self.params)
gpl-3.0
alown/chromium
crserverlib/server_dispatch_header.py
4
1079
# Copyright (c) 2001, Stanford University # All rights reserved. # # See the file LICENSE.txt for information on redistributing this software. import sys sys.path.append( "../glapi_parser" ) import apiutil apiutil.CopyrightC() print """ /* DO NOT EDIT - THIS FILE AUTOMATICALLY GENERATED BY server_dispatch_header.py SCRIPT */ #ifndef SERVER_DISPATCH_HEADER #define SERVER_DISPATCH_HEADER #ifdef WINDOWS #define SERVER_DISPATCH_APIENTRY __stdcall #else #define SERVER_DISPATCH_APIENTRY #endif #include "chromium.h" #include "state/cr_statetypes.h" """ keys = apiutil.GetDispatchedFunctions("../glapi_parser/APIspec.txt") for func_name in keys: if ("get" in apiutil.Properties(func_name) or apiutil.FindSpecial( "server", func_name ) or apiutil.FindSpecial( "../state_tracker/state", func_name )): params = apiutil.Parameters(func_name) return_type = apiutil.ReturnType(func_name) print '%s SERVER_DISPATCH_APIENTRY crServerDispatch%s( %s );' % (return_type, func_name, apiutil.MakeDeclarationString( params )) print '#endif /* SERVER_DISPATCH_HEADER */'
bsd-3-clause
UnionEvoRobo/evofab
src/ann_runner.py
1
2802
from printer import Printer from camera import Camera from grid import Grid from vector import Vector from gridworld import GridWorld from ann import Network class AnnRunner(object): """Wraps up the gross reality of running a ``print'' using the printer simulation (controlled by a neural network)""" camera_size = 3 def __init__(self, ideal_grid_path, cell_size, units_per_cell=10): """Sets up all the pieces needed to perform a print with the simulated 3d printer (controlled by the neural network). Takes in a path to an a ``goal'' or ``ideal'' grid, and constructs the GridWorld based on the dimensions of that goal grid. Understands both a ``camera'', which observes the actual world (around the print head) and an ``ideal camera'' which observes the same location but based on the ``goal grid'' """ ideal_grid = Grid(path=ideal_grid_path, scale=cell_size) self.ideal_grid = ideal_grid self.gridworld = GridWorld(ideal_grid.width, ideal_grid.height, cell_size) self.gridworld.set_ideal_grid(ideal_grid) self.printer = Printer(10, 10, 9, self.gridworld, units_per_cell) #TODO: shouldn't be giving location values here when it's determined somewhere else. that smells a lot self.camera = Camera(self.gridworld.grid, self.printer, self.camera_size) self.ideal_camera = Camera(self.gridworld.ideal_grid, self.printer, self.camera_size) def run(self, n, iterations=10000): """Runs a simulated print run with the printer simulation (controlled by an ANN. Starts the printer in the location provided by the ideal grid spec """ #set the printer location to the starting postition as defined by the ideal_grid spec self.printer.set_position_on_grid(*self.gridworld.get_starting_position()) for i in xrange(iterations): self.printer.setPenDown() actual = self.camera.all_cell_values() ideal = self.ideal_camera.all_cell_values() pattern = [i - a for i,a in zip(actual, ideal)] result = n.propagate(pattern) result = [int(round(x)) for x in result] result = ''.join(map(str, result)) self.printer.set_printer_direction(self.get_velocity(result[:2]), self.get_velocity(result[2:])) self.printer.simulate() self.update() return (self.ideal_grid, self.gridworld.grid) def update(self): return def get_velocity(self, instruction): """Translates between the output of the neural network and direction instructions for the printer. leftright and updown are translated separately""" if instruction == "10": return -1 elif instruction == "01": return 1 else: return 0
gpl-2.0
globaltoken/globaltoken
test/functional/mempool_limit.py
1
3174
#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test mempool limiting together/eviction with the wallet.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class MempoolLimitTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]] def run_test(self): txouts = gen_return_txouts() relayfee = self.nodes[0].getnetworkinfo()['relayfee'] self.log.info('Check that mempoolminfee is minrelytxfee') assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) txids = [] utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91) self.log.info('Create a mempool tx that will be evicted') us0 = utxos.pop() inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}] outputs = {self.nodes[0].getnewaddress() : 0.0001} tx = self.nodes[0].createrawtransaction(inputs, outputs) self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee txF = self.nodes[0].fundrawtransaction(tx) self.nodes[0].settxfee(0) # return to automatic fee selection txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex']) txid = self.nodes[0].sendrawtransaction(txFS['hex']) relayfee = self.nodes[0].getnetworkinfo()['relayfee'] base_fee = relayfee*100 for i in range (3): txids.append([]) txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee) self.log.info('The tx should be evicted by now') assert(txid not in self.nodes[0].getrawmempool()) txdata = self.nodes[0].gettransaction(txid) assert(txdata['confirmations'] == 0) #confirmation should still be 0 self.log.info('Check that mempoolminfee is larger than minrelytxfee') assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000')) assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000')) self.log.info('Create a mempool tx that will not pass mempoolminfee') us0 = utxos.pop() inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}] outputs = {self.nodes[0].getnewaddress() : 0.0001} tx = self.nodes[0].createrawtransaction(inputs, outputs) # specifically fund this tx with a fee < mempoolminfee, >= than minrelaytxfee txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee}) txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex']) assert_raises_rpc_error(-26, "mempool min fee not met, 166 < 411 (code 66)", self.nodes[0].sendrawtransaction, txFS['hex']) if __name__ == '__main__': MempoolLimitTest().main()
mit
enthought/traitsgui
examples/file_node_tree.py
1
2630
#------------------------------------------------------------------------------ # Copyright (c) 2005, Enthought, Inc. # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # Thanks for using Enthought open source! # # Author: Enthought, Inc. # Description: <Enthought pyface package component> #------------------------------------------------------------------------------ """ A file system tree. """ # Standard library imports. from os import listdir from os.path import basename, isdir, isfile, join # Enthought library imports. from enthought.pyface.tree.api import NodeManager, NodeType class FileNode(NodeType): """ Node type for files. """ ########################################################################### # 'NodeType' interface. ########################################################################### def is_type_for(self, node): """ Returns True if this node type recognizes a node. """ return isfile(node) def allows_children(self, node): """ Does the node allow children (ie. a folder vs a file). """ return False def get_text(self, node): """ Returns the label text for a node. """ return basename(node) class FolderNode(NodeType): """ Node type for folders. """ ######################################################################### # 'NodeType' interface. ######################################################################### def is_type_for(self, node): """ Returns True if this node type recognizes a node. """ return isdir(node) def allows_children(self, node): """ Does the node allow children (ie. a folder vs a file). """ return True def has_children(self, node): """ Returns True if a node has children, otherwise False. """ return len(listdir(node)) > 0 def get_children(self, node): """ Returns the children of a node. """ return [join(node, filename) for filename in listdir(node)] def get_text(self, node): """ Returns the label text for a node. """ return basename(node) # Add all types to the node manager. node_manager = NodeManager() node_manager.add_node_type(FileNode()) node_manager.add_node_type(FolderNode()) ##### EOF #####################################################################
bsd-3-clause
vinchoi/fishplay
flask/lib/python2.7/site-packages/setuptools/ssl_support.py
100
8119
import os import socket import atexit import re from setuptools.extern.six.moves import urllib, http_client, map import pkg_resources from pkg_resources import ResolutionError, ExtractionError try: import ssl except ImportError: ssl = None __all__ = [ 'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths', 'opener_for' ] cert_paths = """ /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt /usr/share/ssl/certs/ca-bundle.crt /usr/local/share/certs/ca-root.crt /etc/ssl/cert.pem /System/Library/OpenSSL/certs/cert.pem /usr/local/share/certs/ca-root-nss.crt """.strip().split() try: HTTPSHandler = urllib.request.HTTPSHandler HTTPSConnection = http_client.HTTPSConnection except AttributeError: HTTPSHandler = HTTPSConnection = object is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection) try: from ssl import CertificateError, match_hostname except ImportError: try: from backports.ssl_match_hostname import CertificateError from backports.ssl_match_hostname import match_hostname except ImportError: CertificateError = None match_hostname = None if not CertificateError: class CertificateError(ValueError): pass if not match_hostname: def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") class VerifyingHTTPSHandler(HTTPSHandler): """Simple verifying handler: no auth, subclasses, timeouts, etc.""" def __init__(self, ca_bundle): self.ca_bundle = ca_bundle HTTPSHandler.__init__(self) def https_open(self, req): return self.do_open( lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req ) class VerifyingHTTPSConn(HTTPSConnection): """Simple verifying connection: no auth, subclasses, timeouts, etc.""" def __init__(self, host, ca_bundle, **kw): HTTPSConnection.__init__(self, host, **kw) self.ca_bundle = ca_bundle def connect(self): sock = socket.create_connection( (self.host, self.port), getattr(self, 'source_address', None) ) # Handle the socket if a (proxy) tunnel is present if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None): self.sock = sock self._tunnel() # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7 # change self.host to mean the proxy server host when tunneling is # being used. Adapt, since we are interested in the destination # host for the match_hostname() comparison. actual_host = self._tunnel_host else: actual_host = self.host self.sock = ssl.wrap_socket( sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle ) try: match_hostname(self.sock.getpeercert(), actual_host) except CertificateError: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() raise def opener_for(ca_bundle=None): """Get a urlopen() replacement that uses ca_bundle for verification""" return urllib.request.build_opener( VerifyingHTTPSHandler(ca_bundle or find_ca_bundle()) ).open _wincerts = None def get_win_certfile(): global _wincerts if _wincerts is not None: return _wincerts.name try: from wincertstore import CertFile except ImportError: return None class MyCertFile(CertFile): def __init__(self, stores=(), certs=()): CertFile.__init__(self) for store in stores: self.addstore(store) self.addcerts(certs) atexit.register(self.close) def close(self): try: super(MyCertFile, self).close() except OSError: pass _wincerts = MyCertFile(stores=['CA', 'ROOT']) return _wincerts.name def find_ca_bundle(): """Return an existing CA bundle path, or None""" if os.name=='nt': return get_win_certfile() else: for cert_path in cert_paths: if os.path.isfile(cert_path): return cert_path try: return pkg_resources.resource_filename('certifi', 'cacert.pem') except (ImportError, ResolutionError, ExtractionError): return None
gpl-3.0
with-git/tensorflow
tensorflow/contrib/keras/api/keras/activations/__init__.py
11
1882
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras built-in activation functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Activation functions. from tensorflow.contrib.keras.python.keras.activations import elu from tensorflow.contrib.keras.python.keras.activations import hard_sigmoid from tensorflow.contrib.keras.python.keras.activations import linear from tensorflow.contrib.keras.python.keras.activations import relu from tensorflow.contrib.keras.python.keras.activations import selu from tensorflow.contrib.keras.python.keras.activations import sigmoid from tensorflow.contrib.keras.python.keras.activations import softmax from tensorflow.contrib.keras.python.keras.activations import softplus from tensorflow.contrib.keras.python.keras.activations import softsign from tensorflow.contrib.keras.python.keras.activations import tanh # Auxiliary utils. # pylint: disable=g-bad-import-order from tensorflow.contrib.keras.python.keras.activations import deserialize from tensorflow.contrib.keras.python.keras.activations import serialize from tensorflow.contrib.keras.python.keras.activations import get del absolute_import del division del print_function
apache-2.0
pdellaert/ansible
lib/ansible/modules/cloud/hcloud/hcloud_server_type_info.py
21
5682
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Hetzner Cloud GmbH <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: hcloud_server_type_info short_description: Gather infos about the Hetzner Cloud server types. version_added: "2.8" description: - Gather infos about your Hetzner Cloud server types. - This module was called C(hcloud_server_type_facts) before Ansible 2.9, returning C(ansible_facts) and C(hcloud_server_type_facts). Note that the M(hcloud_server_type_info) module no longer returns C(ansible_facts) and the value was renamed to C(hcloud_server_type_info)! author: - Lukas Kaemmerling (@LKaemmerling) options: id: description: - The ID of the server type you want to get. type: int name: description: - The name of the server type you want to get. type: str extends_documentation_fragment: hcloud """ EXAMPLES = """ - name: Gather hcloud server type infos hcloud_server_type_info: register: output - name: Print the gathered infos debug: var: output.hcloud_server_type_info """ RETURN = """ hcloud_server_type_info: description: The server type infos as list returned: always type: complex contains: id: description: Numeric identifier of the server type returned: always type: int sample: 1937415 name: description: Name of the server type returned: always type: str sample: fsn1 description: description: Detail description of the server type returned: always type: str sample: Falkenstein DC Park 1 cores: description: Number of cpu cores a server of this type will have returned: always type: int sample: 1 memory: description: Memory a server of this type will have in GB returned: always type: int sample: 1 disk: description: Disk size a server of this type will have in GB returned: always type: int sample: 25 storage_type: description: Type of server boot drive returned: always type: str sample: local cpu_type: description: Type of cpu returned: always type: str sample: shared """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.hcloud import Hcloud try: from hcloud import APIException except ImportError: pass class AnsibleHcloudServerTypeInfo(Hcloud): def __init__(self, module): Hcloud.__init__(self, module, "hcloud_server_type_info") self.hcloud_server_type_info = None def _prepare_result(self): tmp = [] for server_type in self.hcloud_server_type_info: if server_type is not None: tmp.append({ "id": to_native(server_type.id), "name": to_native(server_type.name), "description": to_native(server_type.description), "cores": server_type.cores, "memory": server_type.memory, "disk": server_type.disk, "storage_type": to_native(server_type.storage_type), "cpu_type": to_native(server_type.cpu_type) }) return tmp def get_server_types(self): try: if self.module.params.get("id") is not None: self.hcloud_server_type_info = [self.client.server_types.get_by_id( self.module.params.get("id") )] elif self.module.params.get("name") is not None: self.hcloud_server_type_info = [self.client.server_types.get_by_name( self.module.params.get("name") )] else: self.hcloud_server_type_info = self.client.server_types.get_all() except APIException as e: self.module.fail_json(msg=e.message) @staticmethod def define_module(): return AnsibleModule( argument_spec=dict( id={"type": "int"}, name={"type": "str"}, **Hcloud.base_module_arguments() ), supports_check_mode=True, ) def main(): module = AnsibleHcloudServerTypeInfo.define_module() is_old_facts = module._name == 'hcloud_server_type_facts' if is_old_facts: module.deprecate("The 'hcloud_server_type_info' module has been renamed to 'hcloud_server_type_info', " "and the renamed one no longer returns ansible_facts", version='2.13') hcloud = AnsibleHcloudServerTypeInfo(module) hcloud.get_server_types() result = hcloud.get_result() if is_old_facts: ansible_info = { 'hcloud_server_type_info': result['hcloud_server_type_info'] } module.exit_json(ansible_facts=ansible_info) else: ansible_info = { 'hcloud_server_type_info': result['hcloud_server_type_info'] } module.exit_json(**ansible_info) if __name__ == "__main__": main()
gpl-3.0
thulasi-ram/django-feature-toggle
docs/conf.py
1
5358
# -*- coding: utf-8 -*- # # Feature Toggle documentation build configuration file, created by # sphinx-quickstart on Thu Sep 7 17:35:12 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.githubpages'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Feature Toggle' copyright = '2017, Damodharan Thulasiram' author = 'Damodharan Thulasiram' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2' # The full version, including alpha/beta/rc tags. release = '0.2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars # html_sidebars = { # '**': [ # 'about.html', # 'navigation.html', # 'relations.html', # needs 'show_related': True theme option to display # 'searchbox.html', # 'donate.html', # ] # } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'FeatureToggledoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'FeatureToggle.tex', 'Feature Toggle Documentation', 'Damodharan Thulasiram', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'featuretoggle', 'Feature Toggle Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'FeatureToggle', 'Feature Toggle Documentation', author, 'FeatureToggle', 'One line description of project.', 'Miscellaneous'), ]
gpl-3.0
marvinpinto/charlesbot-rundeck
charlesbot_rundeck/http.py
1
1032
import asyncio import aiohttp import logging log = logging.getLogger(__name__) @asyncio.coroutine def http_post_request(url, headers): # pragma: no cover response = yield from aiohttp.post(url, headers=headers) if not response.status == 200: text = yield from response.text() log.error("URL: %s" % url) log.error("Response status code was %s" % str(response.status)) log.error(response.headers) log.error(text) response.close() return "" return (yield from response.text()) @asyncio.coroutine def http_get_request(url, headers, params): # pragma: no cover response = yield from aiohttp.get(url, headers=headers, params=params) if not response.status == 200: text = yield from response.text() log.error("URL: %s" % url) log.error("Response status code was %s" % str(response.status)) log.error(response.headers) log.error(text) response.close() return "" return (yield from response.text())
mit
68foxboris/enigma2-openpli-vuplus
lib/python/Screens/Menu.py
6
13944
from Screen import Screen from Screens.MessageBox import MessageBox from Screens.ParentalControlSetup import ProtectedScreen from Components.Sources.List import List from Components.ActionMap import NumberActionMap, ActionMap from Components.Sources.StaticText import StaticText from Components.config import configfile from Components.PluginComponent import plugins from Components.config import config, ConfigDictionarySet, NoSave from Components.SystemInfo import SystemInfo from Components.Label import Label from Tools.BoundFunction import boundFunction from Plugins.Plugin import PluginDescriptor from Tools.Directories import resolveFilename, SCOPE_SKIN from enigma import eTimer import xml.etree.cElementTree from Screens.Setup import Setup, getSetupTitle # read the menu mdom = xml.etree.cElementTree.parse(resolveFilename(SCOPE_SKIN, 'menu.xml')) class MenuUpdater: def __init__(self): self.updatedMenuItems = {} def addMenuItem(self, id, pos, text, module, screen, weight): if not self.updatedMenuAvailable(id): self.updatedMenuItems[id] = [] self.updatedMenuItems[id].append([text, pos, module, screen, weight]) def delMenuItem(self, id, pos, text, module, screen, weight): self.updatedMenuItems[id].remove([text, pos, module, screen, weight]) def updatedMenuAvailable(self, id): return id in self.updatedMenuItems def getUpdatedMenu(self, id): return self.updatedMenuItems[id] menuupdater = MenuUpdater() class MenuSummary(Screen): pass class Menu(Screen, ProtectedScreen): ALLOW_SUSPEND = True def okbuttonClick(self): self.resetNumberKey() selection = self["menu"].getCurrent() if selection and selection[1]: selection[1]() def execText(self, text): exec text def runScreen(self, arg): # arg[0] is the module (as string) # arg[1] is Screen inside this module # plus possible arguments, as # string (as we want to reference # stuff which is just imported) # FIXME. somehow if arg[0] != "": exec "from " + arg[0] + " import *" self.openDialog(*eval(arg[1])) def nothing(self): #dummy pass def openDialog(self, *dialog): # in every layer needed self.session.openWithCallback(self.menuClosed, *dialog) def openSetup(self, dialog): self.session.openWithCallback(self.menuClosed, Setup, dialog) def addMenu(self, destList, node): requires = node.get("requires") if requires: if requires[0] == '!': if SystemInfo.get(requires[1:], False): return elif not SystemInfo.get(requires, False): return MenuTitle = _(node.get("text", "??").encode("UTF-8")) entryID = node.get("entryID", "undefined") weight = node.get("weight", 50) x = node.get("flushConfigOnClose") if x: a = boundFunction(self.session.openWithCallback, self.menuClosedWithConfigFlush, Menu, node) else: a = boundFunction(self.session.openWithCallback, self.menuClosed, Menu, node) #TODO add check if !empty(node.childNodes) destList.append((MenuTitle, a, entryID, weight)) def menuClosedWithConfigFlush(self, *res): configfile.save() self.menuClosed(*res) def menuClosed(self, *res): if res and res[0]: self.close(True) def addItem(self, destList, node): requires = node.get("requires") if requires: if requires[0] == '!': if SystemInfo.get(requires[1:], False): return elif not SystemInfo.get(requires, False): return configCondition = node.get("configcondition") if configCondition and not eval(configCondition + ".value"): return item_text = node.get("text", "").encode("UTF-8") entryID = node.get("entryID", "undefined") weight = node.get("weight", 50) for x in node: if x.tag == 'screen': module = x.get("module") screen = x.get("screen") if screen is None: screen = module # print module, screen if module: module = "Screens." + module else: module = "" # check for arguments. they will be appended to the # openDialog call args = x.text or "" screen += ", " + args destList.append((_(item_text or "??"), boundFunction(self.runScreen, (module, screen)), entryID, weight)) return elif x.tag == 'code': destList.append((_(item_text or "??"), boundFunction(self.execText, x.text), entryID, weight)) return elif x.tag == 'setup': id = x.get("id") if item_text == "": item_text = _(getSetupTitle(id)) else: item_text = _(item_text) destList.append((item_text, boundFunction(self.openSetup, id), entryID, weight)) return destList.append((item_text, self.nothing, entryID, weight)) def sortByName(self, listentry): return listentry[0].lower() def __init__(self, session, parent): self.parentmenu = parent Screen.__init__(self, session) self["menu"] = List([]) self["menu"].enableWrapAround = True self.createMenuList() # for the skin: first try a menu_<menuID>, then Menu self.skinName = [ ] if self.menuID: self.skinName.append("menu_" + self.menuID) self.skinName.append("Menu") ProtectedScreen.__init__(self) self["actions"] = NumberActionMap(["OkCancelActions", "MenuActions", "NumberActions"], { "ok": self.okbuttonClick, "cancel": self.closeNonRecursive, "menu": self.closeRecursive, "0": self.keyNumberGlobal, "1": self.keyNumberGlobal, "2": self.keyNumberGlobal, "3": self.keyNumberGlobal, "4": self.keyNumberGlobal, "5": self.keyNumberGlobal, "6": self.keyNumberGlobal, "7": self.keyNumberGlobal, "8": self.keyNumberGlobal, "9": self.keyNumberGlobal }) if config.usage.menu_sort_mode.value == "user": self["EditActions"] = ActionMap(["ColorActions"], { "blue": self.keyBlue, }) title = parent.get("title", "").encode("UTF-8") or None title = title and _(title) or _(parent.get("text", "").encode("UTF-8")) title = self.__class__.__name__ == "MenuSort" and _("Menusort (%s)") % title or title self["title"] = StaticText(title) self.setScreenPathMode(True) self.setTitle(title) self.number = 0 self.nextNumberTimer = eTimer() self.nextNumberTimer.callback.append(self.okbuttonClick) def createMenuList(self): self.list = [] self.menuID = None for x in self.parentmenu: #walk through the actual nodelist if not x.tag: continue if x.tag == 'item': item_level = int(x.get("level", 0)) if item_level <= config.usage.setup_level.index: self.addItem(self.list, x) count += 1 elif x.tag == 'menu': item_level = int(x.get("level", 0)) if item_level <= config.usage.setup_level.index: self.addMenu(self.list, x) count += 1 elif x.tag == "id": self.menuID = x.get("val") count = 0 if self.menuID: # menuupdater? if menuupdater.updatedMenuAvailable(self.menuID): for x in menuupdater.getUpdatedMenu(self.menuID): if x[1] == count: self.list.append((x[0], boundFunction(self.runScreen, (x[2], x[3] + ", ")), x[4])) count += 1 if self.menuID: # plugins for l in plugins.getPluginsForMenu(self.menuID): # check if a plugin overrides an existing menu plugin_menuid = l[2] for x in self.list: if x[2] == plugin_menuid: self.list.remove(x) break self.list.append((l[0], boundFunction(l[1], self.session, close=self.close), l[2], l[3] or 50)) if config.usage.menu_sort_mode.value == "user" and self.menuID == "mainmenu": plugin_list = [] id_list = [] for l in plugins.getPlugins([PluginDescriptor.WHERE_PLUGINMENU ,PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]): l.id = (l.name.lower()).replace(' ','_') if l.id not in id_list: id_list.append(l.id) plugin_list.append((l.name, boundFunction(l.__call__, self.session), l.id, 200)) if self.menuID is not None and config.usage.menu_sort_mode.value == "user": self.sub_menu_sort = NoSave(ConfigDictionarySet()) self.sub_menu_sort.value = config.usage.menu_sort_weight.getConfigValue(self.menuID, "submenu") or {} idx = 0 for x in self.list: entry = list(self.list.pop(idx)) m_weight = self.sub_menu_sort.getConfigValue(entry[2], "sort") or entry[3] entry.append(m_weight) self.list.insert(idx, tuple(entry)) self.sub_menu_sort.changeConfigValue(entry[2], "sort", m_weight) idx += 1 self.full_list = list(self.list) if config.usage.menu_sort_mode.value == "a_z": # Sort by Name self.list.sort(key=self.sortByName) elif config.usage.menu_sort_mode.value == "user": self.hide_show_entries() else: # Sort by Weight self.list.sort(key=lambda x: int(x[3])) if config.usage.menu_show_numbers.value: self.list = [(str(x[0] + 1) + " " +x[1][0], x[1][1], x[1][2]) for x in enumerate(self.list)] self["menu"].updateList(self.list) def keyNumberGlobal(self, number): self.number = self.number * 10 + number if self.number and self.number <= len(self["menu"].list): self["menu"].setIndex(self.number - 1) if len(self["menu"].list) < 10 or self.number >= 10: self.okbuttonClick() else: self.nextNumberTimer.start(1500, True) else: self.number = 0 def resetNumberKey(self): self.nextNumberTimer.stop() self.number = 0 def closeNonRecursive(self): self.resetNumberKey() self.close(False) def closeRecursive(self): self.resetNumberKey() self.close(True) def createSummary(self): return MenuSummary def isProtected(self): if config.ParentalControl.setuppinactive.value: if config.ParentalControl.config_sections.main_menu.value and not(hasattr(self.session, 'infobar') and self.session.infobar is None): return self.menuID == "mainmenu" elif config.ParentalControl.config_sections.configuration.value and self.menuID == "setup": return True elif config.ParentalControl.config_sections.timer_menu.value and self.menuID == "timermenu": return True elif config.ParentalControl.config_sections.standby_menu.value and self.menuID == "shutdown": return True def keyBlue(self): if config.usage.menu_sort_mode.value == "user": self.session.openWithCallback(self.menuSortCallBack, MenuSort, self.parentmenu) def menuSortCallBack(self, key=False): self.createMenuList() def keyCancel(self): self.closeNonRecursive() def hide_show_entries(self): self.list = [] for entry in self.full_list: if not self.sub_menu_sort.getConfigValue(entry[2], "hidden"): self.list.append(entry) if not self.list: self.list.append(('',None,'dummy','10',10)) self.list.sort(key=lambda listweight : int(listweight[4])) class MenuSort(Menu): def __init__(self, session, parent): self["key_red"] = Label(_("Exit")) self["key_green"] = Label(_("Save changes")) self["key_yellow"] = Label(_("Toggle show/hide")) self["key_blue"] = Label(_("Reset order (All)")) self.somethingChanged = False Menu.__init__(self, session, parent) self.skinName = "MenuSort" self["menu"].onSelectionChanged.append(self.selectionChanged) self["MoveActions"] = ActionMap(["WizardActions", "DirectionActions"], { "moveUp": boundFunction(self.moveChoosen, -1), "moveDown": boundFunction(self.moveChoosen, +1), }, -1 ) self["EditActions"] = ActionMap(["ColorActions"], { "red": self.closeMenuSort, "green": self.keySave, "yellow": self.keyToggleShowHide, "blue": self.resetSortOrder, }) self.onLayoutFinish.append(self.selectionChanged) def isProtected(self): return config.ParentalControl.setuppinactive.value and config.ParentalControl.config_sections.menu_sort.value def resetSortOrder(self, key = None): config.usage.menu_sort_weight.value = { "mainmenu" : {"submenu" : {} }} config.usage.menu_sort_weight.save() self.createMenuList() def hide_show_entries(self): self.list = list(self.full_list) if not self.list: self.list.append(('',None,'dummy','10',10)) self.list.sort(key=lambda listweight : int(listweight[4])) def selectionChanged(self): selection = self["menu"].getCurrent()[2] if self.sub_menu_sort.getConfigValue(selection, "hidden"): self["key_yellow"].setText(_("show")) else: self["key_yellow"].setText(_("hide")) def keySave(self): if self.somethingChanged: i = 10 idx = 0 for x in self.list: self.sub_menu_sort.changeConfigValue(x[2], "sort", i) if len(x) >= 5: entry = list(x) entry[4] = i entry = tuple(entry) self.list.pop(idx) self.list.insert(idx, entry) i += 10 idx += 1 config.usage.menu_sort_weight.changeConfigValue(self.menuID, "submenu", self.sub_menu_sort.value) config.usage.menu_sort_weight.save() self.close() def closeNonRecursive(self): self.closeMenuSort() def closeRecursive(self): self.closeMenuSort() def closeMenuSort(self): if self.somethingChanged: self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?")) else: self.close() def cancelConfirm(self, result): if result: config.usage.menu_sort_weight.cancel() self.close() def okbuttonClick(self): self.keyToggleShowHide() def keyToggleShowHide(self): self.somethingChanged = True selection = self["menu"].getCurrent()[2] if self.sub_menu_sort.getConfigValue(selection, "hidden"): self.sub_menu_sort.removeConfigValue(selection, "hidden") self["key_yellow"].setText(_("hide")) else: self.sub_menu_sort.changeConfigValue(selection, "hidden", 1) self["key_yellow"].setText(_("show")) def moveChoosen(self, direction): self.somethingChanged = True currentIndex = self["menu"].getSelectedIndex() swapIndex = (currentIndex + direction) % len(self["menu"].list) self["menu"].list[currentIndex], self["menu"].list[swapIndex] = self["menu"].list[swapIndex], self["menu"].list[currentIndex] self["menu"].updateList(self["menu"].list) if direction > 0: self["menu"].down() else: self["menu"].up() class MainMenu(Menu): #add file load functions for the xml-file def __init__(self, *x): self.skinName = "Menu" Menu.__init__(self, *x)
gpl-2.0
elingg/tensorflow
tensorflow/contrib/tensor_forest/client/__init__.py
164
1043
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Random forest implementation in tensorflow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.contrib.tensor_forest.client import eval_metrics from tensorflow.contrib.tensor_forest.client import random_forest # pylint: enable=unused-import
apache-2.0
azunite/chrome_build
tests/gclient_utils_test.py
44
7351
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import StringIO import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from testing_support.super_mox import SuperMoxTestBase from testing_support import trial_dir import gclient_utils import subprocess2 class GclientUtilBase(SuperMoxTestBase): def setUp(self): super(GclientUtilBase, self).setUp() gclient_utils.sys.stdout.flush = lambda: None self.mox.StubOutWithMock(subprocess2, 'Popen') self.mox.StubOutWithMock(subprocess2, 'communicate') class CheckCallAndFilterTestCase(GclientUtilBase): class ProcessIdMock(object): def __init__(self, test_string): self.stdout = StringIO.StringIO(test_string) self.pid = 9284 # pylint: disable=R0201 def wait(self): return 0 def _inner(self, args, test_string): cwd = 'bleh' gclient_utils.sys.stdout.write( '\n________ running \'boo foo bar\' in \'bleh\'\n') for i in test_string: gclient_utils.sys.stdout.write(i) # pylint: disable=E1101 subprocess2.Popen( args, cwd=cwd, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT, bufsize=0).AndReturn(self.ProcessIdMock(test_string)) os.getcwd() self.mox.ReplayAll() compiled_pattern = gclient_utils.re.compile(r'a(.*)b') line_list = [] capture_list = [] def FilterLines(line): line_list.append(line) assert isinstance(line, str), type(line) match = compiled_pattern.search(line) if match: capture_list.append(match.group(1)) gclient_utils.CheckCallAndFilterAndHeader( args, cwd=cwd, always=True, filter_fn=FilterLines) self.assertEquals(line_list, ['ahah', 'accb', 'allo', 'addb']) self.assertEquals(capture_list, ['cc', 'dd']) def testCheckCallAndFilter(self): args = ['boo', 'foo', 'bar'] test_string = 'ahah\naccb\nallo\naddb\n' self._inner(args, test_string) self.checkstdout('\n________ running \'boo foo bar\' in \'bleh\'\n' 'ahah\naccb\nallo\naddb\n\n' '________ running \'boo foo bar\' in \'bleh\'\nahah\naccb\nallo\naddb' '\n') def testNoLF(self): # Exactly as testCheckCallAndFilterAndHeader without trailing \n args = ['boo', 'foo', 'bar'] test_string = 'ahah\naccb\nallo\naddb' self._inner(args, test_string) self.checkstdout('\n________ running \'boo foo bar\' in \'bleh\'\n' 'ahah\naccb\nallo\naddb\n' '________ running \'boo foo bar\' in \'bleh\'\nahah\naccb\nallo\naddb') class SplitUrlRevisionTestCase(GclientUtilBase): def testSSHUrl(self): url = "ssh://[email protected]/test.git" rev = "ac345e52dc" out_url, out_rev = gclient_utils.SplitUrlRevision(url) self.assertEquals(out_rev, None) self.assertEquals(out_url, url) out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) url = "ssh://example.com/test.git" out_url, out_rev = gclient_utils.SplitUrlRevision(url) self.assertEquals(out_rev, None) self.assertEquals(out_url, url) out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) url = "ssh://example.com/git/test.git" out_url, out_rev = gclient_utils.SplitUrlRevision(url) self.assertEquals(out_rev, None) self.assertEquals(out_url, url) out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) rev = "test-stable" out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) url = "ssh://[email protected]/~/test.git" out_url, out_rev = gclient_utils.SplitUrlRevision(url) self.assertEquals(out_rev, None) self.assertEquals(out_url, url) out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) url = "ssh://[email protected]/~username/test.git" out_url, out_rev = gclient_utils.SplitUrlRevision(url) self.assertEquals(out_rev, None) self.assertEquals(out_url, url) out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) url = "[email protected]:dart-lang/spark.git" out_url, out_rev = gclient_utils.SplitUrlRevision(url) self.assertEquals(out_rev, None) self.assertEquals(out_url, url) out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) def testSVNUrl(self): url = "svn://example.com/test" rev = "ac345e52dc" out_url, out_rev = gclient_utils.SplitUrlRevision(url) self.assertEquals(out_rev, None) self.assertEquals(out_url, url) out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev)) self.assertEquals(out_rev, rev) self.assertEquals(out_url, url) class GClientUtilsTest(trial_dir.TestCase): def testHardToDelete(self): # Use the fact that tearDown will delete the directory to make it hard to do # so. l1 = os.path.join(self.root_dir, 'l1') l2 = os.path.join(l1, 'l2') l3 = os.path.join(l2, 'l3') f3 = os.path.join(l3, 'f3') os.mkdir(l1) os.mkdir(l2) os.mkdir(l3) gclient_utils.FileWrite(f3, 'foo') os.chmod(f3, 0) os.chmod(l3, 0) os.chmod(l2, 0) os.chmod(l1, 0) def testUpgradeToHttps(self): values = [ ['', ''], [None, None], ['foo', 'https://foo'], ['http://foo', 'https://foo'], ['foo/', 'https://foo/'], ['ssh-svn://foo', 'ssh-svn://foo'], ['ssh-svn://foo/bar/', 'ssh-svn://foo/bar/'], ['codereview.chromium.org', 'https://codereview.chromium.org'], ['codereview.chromium.org/', 'https://codereview.chromium.org/'], ['http://foo:10000', 'http://foo:10000'], ['http://foo:10000/bar', 'http://foo:10000/bar'], ['foo:10000', 'http://foo:10000'], ['foo:', 'https://foo:'], ] for content, expected in values: self.assertEquals( expected, gclient_utils.UpgradeToHttps(content)) def testParseCodereviewSettingsContent(self): values = [ ['# bleh\n', {}], ['\t# foo : bar\n', {}], ['Foo:bar', {'Foo': 'bar'}], ['Foo:bar:baz\n', {'Foo': 'bar:baz'}], [' Foo : bar ', {'Foo': 'bar'}], [' Foo : bar \n', {'Foo': 'bar'}], ['a:b\n\rc:d\re:f', {'a': 'b', 'c': 'd', 'e': 'f'}], ['an_url:http://value/', {'an_url': 'http://value/'}], [ 'CODE_REVIEW_SERVER : http://r/s', {'CODE_REVIEW_SERVER': 'https://r/s'} ], ['VIEW_VC:http://r/s', {'VIEW_VC': 'https://r/s'}], ] for content, expected in values: self.assertEquals( expected, gclient_utils.ParseCodereviewSettingsContent(content)) if __name__ == '__main__': import unittest unittest.main() # vim: ts=2:sw=2:tw=80:et:
bsd-3-clause
anryko/ansible
lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py
40
9856
#!/usr/bin/python # # Copyright (c) 2018 Zim Kalinowski, <[email protected]> # Copyright (c) 2019 Matti Ranta, (@techknowlogick) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_mariadbfirewallrule version_added: "2.8" short_description: Manage MariaDB firewall rule instance description: - Create, update and delete instance of MariaDB firewall rule. options: resource_group: description: - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. required: True server_name: description: - The name of the server. required: True name: description: - The name of the MariaDB firewall rule. required: True start_ip_address: description: - The start IP address of the MariaDB firewall rule. Must be IPv4 format. end_ip_address: description: - The end IP address of the MariaDB firewall rule. Must be IPv4 format. state: description: - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present. default: present choices: - absent - present extends_documentation_fragment: - azure author: - Zim Kalinowski (@zikalino) - Matti Ranta (@techknowlogick) ''' EXAMPLES = ''' - name: Create (or update) MariaDB firewall rule azure_rm_mariadbfirewallrule: resource_group: myResourceGroup server_name: testserver name: rule1 start_ip_address: 10.0.0.17 end_ip_address: 10.0.0.20 ''' RETURN = ''' id: description: - Resource ID. returned: always type: str sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire wallRules/rule1" ''' import time from ansible.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError from msrest.polling import LROPoller from azure.mgmt.rdbms.mariadb import MariaDBManagementClient from msrest.serialization import Model except ImportError: # This is handled in azure_rm_common pass class Actions: NoAction, Create, Update, Delete = range(4) class AzureRMMariaDbFirewallRule(AzureRMModuleBase): """Configuration class for an Azure RM MariaDB firewall rule resource""" def __init__(self): self.module_arg_spec = dict( resource_group=dict( type='str', required=True ), server_name=dict( type='str', required=True ), name=dict( type='str', required=True ), start_ip_address=dict( type='str' ), end_ip_address=dict( type='str' ), state=dict( type='str', default='present', choices=['present', 'absent'] ) ) self.resource_group = None self.server_name = None self.name = None self.start_ip_address = None self.end_ip_address = None self.results = dict(changed=False) self.state = None self.to_do = Actions.NoAction super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec, supports_check_mode=True, supports_tags=False) def exec_module(self, **kwargs): """Main module execution method""" for key in list(self.module_arg_spec.keys()): if hasattr(self, key): setattr(self, key, kwargs[key]) old_response = None response = None resource_group = self.get_resource_group(self.resource_group) old_response = self.get_firewallrule() if not old_response: self.log("MariaDB firewall rule instance doesn't exist") if self.state == 'absent': self.log("Old instance didn't exist") else: self.to_do = Actions.Create else: self.log("MariaDB firewall rule instance already exists") if self.state == 'absent': self.to_do = Actions.Delete elif self.state == 'present': self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated") if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']): self.to_do = Actions.Update if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']): self.to_do = Actions.Update if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): self.log("Need to Create / Update the MariaDB firewall rule instance") if self.check_mode: self.results['changed'] = True return self.results response = self.create_update_firewallrule() if not old_response: self.results['changed'] = True else: self.results['changed'] = old_response.__ne__(response) self.log("Creation / Update done") elif self.to_do == Actions.Delete: self.log("MariaDB firewall rule instance deleted") self.results['changed'] = True if self.check_mode: return self.results self.delete_firewallrule() # make sure instance is actually deleted, for some Azure resources, instance is hanging around # for some time after deletion -- this should be really fixed in Azure while self.get_firewallrule(): time.sleep(20) else: self.log("MariaDB firewall rule instance unchanged") self.results['changed'] = False response = old_response if response: self.results["id"] = response["id"] return self.results def create_update_firewallrule(self): ''' Creates or updates MariaDB firewall rule with the specified configuration. :return: deserialized MariaDB firewall rule instance state dictionary ''' self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name)) try: response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group, server_name=self.server_name, firewall_rule_name=self.name, start_ip_address=self.start_ip_address, end_ip_address=self.end_ip_address) if isinstance(response, LROPoller): response = self.get_poller_result(response) except CloudError as exc: self.log('Error attempting to create the MariaDB firewall rule instance.') self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc))) return response.as_dict() def delete_firewallrule(self): ''' Deletes specified MariaDB firewall rule instance in the specified subscription and resource group. :return: True ''' self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name)) try: response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group, server_name=self.server_name, firewall_rule_name=self.name) except CloudError as e: self.log('Error attempting to delete the MariaDB firewall rule instance.') self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e))) return True def get_firewallrule(self): ''' Gets the properties of the specified MariaDB firewall rule. :return: deserialized MariaDB firewall rule instance state dictionary ''' self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name)) found = False try: response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group, server_name=self.server_name, firewall_rule_name=self.name) found = True self.log("Response : {0}".format(response)) self.log("MariaDB firewall rule instance : {0} found".format(response.name)) except CloudError as e: self.log('Did not find the MariaDB firewall rule instance.') if found is True: return response.as_dict() return False def main(): """Main execution""" AzureRMMariaDbFirewallRule() if __name__ == '__main__': main()
gpl-3.0
jramos/p2pool
SOAPpy/GSIServer.py
289
5238
from __future__ import nested_scopes """ GSIServer - Contributed by Ivan R. Judson <[email protected]> ################################################################################ # # SOAPpy - Cayce Ullman ([email protected]) # Brian Matthews ([email protected]) # Gregory Warnes ([email protected]) # Christopher Blunck ([email protected]) # ################################################################################ # Copyright (c) 2003, Pfizer # Copyright (c) 2001, Cayce Ullman. # Copyright (c) 2001, Brian Matthews. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # Neither the name of actzero, inc. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ ident = '$Id: GSIServer.py 1468 2008-05-24 01:55:33Z warnes $' from version import __version__ #import xml.sax import re import socket import sys import SocketServer from types import * import BaseHTTPServer # SOAPpy modules from Parser import parseSOAPRPC from Config import SOAPConfig from Types import faultType, voidType, simplify from NS import NS from SOAPBuilder import buildSOAP from Utilities import debugHeader, debugFooter try: from M2Crypto import SSL except: pass ##### from Server import * from pyGlobus.io import GSITCPSocketServer, ThreadingGSITCPSocketServer from pyGlobus import ioc def GSIConfig(): config = SOAPConfig() config.channel_mode = ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_GSI_WRAP config.delegation_mode = ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_FULL_PROXY config.tcpAttr = None config.authMethod = "_authorize" return config Config = GSIConfig() class GSISOAPServer(GSITCPSocketServer, SOAPServerBase): def __init__(self, addr = ('localhost', 8000), RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8', config = Config, namespace = None): # Test the encoding, raising an exception if it's not known if encoding != None: ''.encode(encoding) self.namespace = namespace self.objmap = {} self.funcmap = {} self.encoding = encoding self.config = config self.log = log self.allow_reuse_address= 1 GSITCPSocketServer.__init__(self, addr, RequestHandler, self.config.channel_mode, self.config.delegation_mode, tcpAttr = self.config.tcpAttr) def get_request(self): sock, addr = GSITCPSocketServer.get_request(self) return sock, addr class ThreadingGSISOAPServer(ThreadingGSITCPSocketServer, SOAPServerBase): def __init__(self, addr = ('localhost', 8000), RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8', config = Config, namespace = None): # Test the encoding, raising an exception if it's not known if encoding != None: ''.encode(encoding) self.namespace = namespace self.objmap = {} self.funcmap = {} self.encoding = encoding self.config = config self.log = log self.allow_reuse_address= 1 ThreadingGSITCPSocketServer.__init__(self, addr, RequestHandler, self.config.channel_mode, self.config.delegation_mode, tcpAttr = self.config.tcpAttr) def get_request(self): sock, addr = ThreadingGSITCPSocketServer.get_request(self) return sock, addr
gpl-3.0
atmark-techno/atmark-dist
user/python/Lib/knee.py
4
3522
"""An Python re-implementation of hierarchical module import. This code is intended to be read, not executed. However, it does work -- all you need to do to enable it is "import knee". (The name is a pun on the klunkier predecessor of this module, "ni".) """ import sys, imp, __builtin__, string # Replacement for __import__() def import_hook(name, globals=None, locals=None, fromlist=None): parent = determine_parent(globals) q, tail = find_head_package(parent, name) m = load_tail(q, tail) if not fromlist: return q if hasattr(m, "__path__"): ensure_fromlist(m, fromlist) return m def determine_parent(globals): if not globals or not globals.has_key("__name__"): return None pname = globals['__name__'] if globals.has_key("__path__"): parent = sys.modules[pname] assert globals is parent.__dict__ return parent if '.' in pname: i = string.rfind(pname, '.') pname = pname[:i] parent = sys.modules[pname] assert parent.__name__ == pname return parent return None def find_head_package(parent, name): if '.' in name: i = string.find(name, '.') head = name[:i] tail = name[i+1:] else: head = name tail = "" if parent: qname = "%s.%s" % (parent.__name__, head) else: qname = head q = import_module(head, qname, parent) if q: return q, tail if parent: qname = head parent = None q = import_module(head, qname, parent) if q: return q, tail raise ImportError, "No module named " + qname def load_tail(q, tail): m = q while tail: i = string.find(tail, '.') if i < 0: i = len(tail) head, tail = tail[:i], tail[i+1:] mname = "%s.%s" % (m.__name__, head) m = import_module(head, mname, m) if not m: raise ImportError, "No module named " + mname return m def ensure_fromlist(m, fromlist, recursive=0): for sub in fromlist: if sub == "*": if not recursive: try: all = m.__all__ except AttributeError: pass else: ensure_fromlist(m, all, 1) continue if sub != "*" and not hasattr(m, sub): subname = "%s.%s" % (m.__name__, sub) submod = import_module(sub, subname, m) if not submod: raise ImportError, "No module named " + subname def import_module(partname, fqname, parent): try: return sys.modules[fqname] except KeyError: pass try: fp, pathname, stuff = imp.find_module(partname, parent and parent.__path__) except ImportError: return None try: m = imp.load_module(fqname, fp, pathname, stuff) finally: if fp: fp.close() if parent: setattr(parent, partname, m) return m # Replacement for reload() def reload_hook(module): name = module.__name__ if '.' not in name: return import_module(name, name, None) i = string.rfind(name, '.') pname = name[:i] parent = sys.modules[pname] return import_module(name[i+1:], name, parent) # Save the original hooks original_import = __builtin__.__import__ original_reload = __builtin__.reload # Now install our hooks __builtin__.__import__ = import_hook __builtin__.reload = reload_hook
gpl-2.0
orangeduck/PyAutoC
Python27/Lib/xml/sax/xmlreader.py
113
12632
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers should be based on this code. """ import handler from _exceptions import SAXNotSupportedException, SAXNotRecognizedException # ===== XMLREADER ===== class XMLReader: """Interface for reading an XML document using callbacks. XMLReader is the interface that an XML parser's SAX2 driver must implement. This interface allows an application to set and query features and properties in the parser, to register event handlers for document processing, and to initiate a document parse. All SAX interfaces are assumed to be synchronous: the parse methods must not return until parsing is complete, and readers must wait for an event-handler callback to return before reporting the next event.""" def __init__(self): self._cont_handler = handler.ContentHandler() self._dtd_handler = handler.DTDHandler() self._ent_handler = handler.EntityResolver() self._err_handler = handler.ErrorHandler() def parse(self, source): "Parse an XML document from a system identifier or an InputSource." raise NotImplementedError("This method must be implemented!") def getContentHandler(self): "Returns the current ContentHandler." return self._cont_handler def setContentHandler(self, handler): "Registers a new object to receive document content events." self._cont_handler = handler def getDTDHandler(self): "Returns the current DTD handler." return self._dtd_handler def setDTDHandler(self, handler): "Register an object to receive basic DTD-related events." self._dtd_handler = handler def getEntityResolver(self): "Returns the current EntityResolver." return self._ent_handler def setEntityResolver(self, resolver): "Register an object to resolve external entities." self._ent_handler = resolver def getErrorHandler(self): "Returns the current ErrorHandler." return self._err_handler def setErrorHandler(self, handler): "Register an object to receive error-message events." self._err_handler = handler def setLocale(self, locale): """Allow an application to set the locale for errors and warnings. SAX parsers are not required to provide localization for errors and warnings; if they cannot support the requested locale, however, they must throw a SAX exception. Applications may request a locale change in the middle of a parse.""" raise SAXNotSupportedException("Locale support not implemented") def getFeature(self, name): "Looks up and returns the state of a SAX2 feature." raise SAXNotRecognizedException("Feature '%s' not recognized" % name) def setFeature(self, name, state): "Sets the state of a SAX2 feature." raise SAXNotRecognizedException("Feature '%s' not recognized" % name) def getProperty(self, name): "Looks up and returns the value of a SAX2 property." raise SAXNotRecognizedException("Property '%s' not recognized" % name) def setProperty(self, name, value): "Sets the value of a SAX2 property." raise SAXNotRecognizedException("Property '%s' not recognized" % name) class IncrementalParser(XMLReader): """This interface adds three extra methods to the XMLReader interface that allow XML parsers to support incremental parsing. Support for this interface is optional, since not all underlying XML parsers support this functionality. When the parser is instantiated it is ready to begin accepting data from the feed method immediately. After parsing has been finished with a call to close the reset method must be called to make the parser ready to accept new data, either from feed or using the parse method. Note that these methods must _not_ be called during parsing, that is, after parse has been called and before it returns. By default, the class also implements the parse method of the XMLReader interface using the feed, close and reset methods of the IncrementalParser interface as a convenience to SAX 2.0 driver writers.""" def __init__(self, bufsize=2**16): self._bufsize = bufsize XMLReader.__init__(self) def parse(self, source): import saxutils source = saxutils.prepare_input_source(source) self.prepareParser(source) file = source.getByteStream() buffer = file.read(self._bufsize) while buffer != "": self.feed(buffer) buffer = file.read(self._bufsize) self.close() def feed(self, data): """This method gives the raw XML data in the data parameter to the parser and makes it parse the data, emitting the corresponding events. It is allowed for XML constructs to be split across several calls to feed. feed may raise SAXException.""" raise NotImplementedError("This method must be implemented!") def prepareParser(self, source): """This method is called by the parse implementation to allow the SAX 2.0 driver to prepare itself for parsing.""" raise NotImplementedError("prepareParser must be overridden!") def close(self): """This method is called when the entire XML document has been passed to the parser through the feed method, to notify the parser that there are no more data. This allows the parser to do the final checks on the document and empty the internal data buffer. The parser will not be ready to parse another document until the reset method has been called. close may raise SAXException.""" raise NotImplementedError("This method must be implemented!") def reset(self): """This method is called after close has been called to reset the parser so that it is ready to parse new documents. The results of calling parse or feed after close without calling reset are undefined.""" raise NotImplementedError("This method must be implemented!") # ===== LOCATOR ===== class Locator: """Interface for associating a SAX event with a document location. A locator object will return valid results only during calls to DocumentHandler methods; at any other time, the results are unpredictable.""" def getColumnNumber(self): "Return the column number where the current event ends." return -1 def getLineNumber(self): "Return the line number where the current event ends." return -1 def getPublicId(self): "Return the public identifier for the current event." return None def getSystemId(self): "Return the system identifier for the current event." return None # ===== INPUTSOURCE ===== class InputSource: """Encapsulation of the information needed by the XMLReader to read entities. This class may include information about the public identifier, system identifier, byte stream (possibly with character encoding information) and/or the character stream of an entity. Applications will create objects of this class for use in the XMLReader.parse method and for returning from EntityResolver.resolveEntity. An InputSource belongs to the application, the XMLReader is not allowed to modify InputSource objects passed to it from the application, although it may make copies and modify those.""" def __init__(self, system_id = None): self.__system_id = system_id self.__public_id = None self.__encoding = None self.__bytefile = None self.__charfile = None def setPublicId(self, public_id): "Sets the public identifier of this InputSource." self.__public_id = public_id def getPublicId(self): "Returns the public identifier of this InputSource." return self.__public_id def setSystemId(self, system_id): "Sets the system identifier of this InputSource." self.__system_id = system_id def getSystemId(self): "Returns the system identifier of this InputSource." return self.__system_id def setEncoding(self, encoding): """Sets the character encoding of this InputSource. The encoding must be a string acceptable for an XML encoding declaration (see section 4.3.3 of the XML recommendation). The encoding attribute of the InputSource is ignored if the InputSource also contains a character stream.""" self.__encoding = encoding def getEncoding(self): "Get the character encoding of this InputSource." return self.__encoding def setByteStream(self, bytefile): """Set the byte stream (a Python file-like object which does not perform byte-to-character conversion) for this input source. The SAX parser will ignore this if there is also a character stream specified, but it will use a byte stream in preference to opening a URI connection itself. If the application knows the character encoding of the byte stream, it should set it with the setEncoding method.""" self.__bytefile = bytefile def getByteStream(self): """Get the byte stream for this input source. The getEncoding method will return the character encoding for this byte stream, or None if unknown.""" return self.__bytefile def setCharacterStream(self, charfile): """Set the character stream for this input source. (The stream must be a Python 2.0 Unicode-wrapped file-like that performs conversion to Unicode strings.) If there is a character stream specified, the SAX parser will ignore any byte stream and will not attempt to open a URI connection to the system identifier.""" self.__charfile = charfile def getCharacterStream(self): "Get the character stream for this input source." return self.__charfile # ===== ATTRIBUTESIMPL ===== class AttributesImpl: def __init__(self, attrs): """Non-NS-aware implementation. attrs should be of the form {name : value}.""" self._attrs = attrs def getLength(self): return len(self._attrs) def getType(self, name): return "CDATA" def getValue(self, name): return self._attrs[name] def getValueByQName(self, name): return self._attrs[name] def getNameByQName(self, name): if not name in self._attrs: raise KeyError, name return name def getQNameByName(self, name): if not name in self._attrs: raise KeyError, name return name def getNames(self): return self._attrs.keys() def getQNames(self): return self._attrs.keys() def __len__(self): return len(self._attrs) def __getitem__(self, name): return self._attrs[name] def keys(self): return self._attrs.keys() def has_key(self, name): return name in self._attrs def __contains__(self, name): return name in self._attrs def get(self, name, alternative=None): return self._attrs.get(name, alternative) def copy(self): return self.__class__(self._attrs) def items(self): return self._attrs.items() def values(self): return self._attrs.values() # ===== ATTRIBUTESNSIMPL ===== class AttributesNSImpl(AttributesImpl): def __init__(self, attrs, qnames): """NS-aware implementation. attrs should be of the form {(ns_uri, lname): value, ...}. qnames of the form {(ns_uri, lname): qname, ...}.""" self._attrs = attrs self._qnames = qnames def getValueByQName(self, name): for (nsname, qname) in self._qnames.items(): if qname == name: return self._attrs[nsname] raise KeyError, name def getNameByQName(self, name): for (nsname, qname) in self._qnames.items(): if qname == name: return nsname raise KeyError, name def getQNameByName(self, name): return self._qnames[name] def getQNames(self): return self._qnames.values() def copy(self): return self.__class__(self._attrs, self._qnames) def _test(): XMLReader() IncrementalParser() Locator() if __name__ == "__main__": _test()
bsd-2-clause
dosiecki/NewsBlur
apps/newsletters/views.py
3
111728
from pprint import pprint from django.http import HttpResponse, Http404 from django.conf import settings from utils import log as logging from apps.newsletters.models import EmailNewsletter from apps.rss_feeds.models import Feed, MStory def newsletter_receive(request): # params = { # 'stripped-signature':'Thanks,\nBob', # 'From':'Test mailer <[email protected]>', # 'attachment-count':'2', # 'To':'Alice <[email protected]>', # 'subject':'Test Newsletter The Skimm', # 'from':'Test mailer <[email protected]>', # 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130308 Thunderbird/17.0.4', # 'stripped-html':'<html><head><meta content="text/html; charset=ISO-8859-1" http-equiv="Content-Type"></head><body text="#000000" bgcolor="#FFFFFF">\n <div class="moz-cite-prefix">\n <div style="color: rgb(34, 34, 34); font-family: arial,\n sans-serif; font-size: 12.666666984558105px; font-style: normal;\n font-variant: normal; font-weight: normal; letter-spacing:\n normal; line-height: normal; orphans: auto; text-align: start;\n text-indent: 0px; text-transform: none; white-space: normal;\n widows: auto; word-spacing: 0px; -webkit-text-size-adjust: auto;\n -webkit-text-stroke-width: 0px; background-color: rgb(255, 255,\n 255);">Hi Alice,</div>\n <div style="color: rgb(34, 34, 34); font-family: arial,\n sans-serif; font-size: 12.666666984558105px; font-style: normal;\n font-variant: normal; font-weight: normal; letter-spacing:\n normal; line-height: normal; orphans: auto; text-align: start;\n text-indent: 0px; text-transform: none; white-space: normal;\n widows: auto; word-spacing: 0px; -webkit-text-size-adjust: auto;\n -webkit-text-stroke-width: 0px; background-color: rgb(255, 255,\n 255);"><br></div>\n <div style="color: rgb(34, 34, 34); font-family: arial,\n sans-serif; font-size: 12.666666984558105px; font-style: normal;\n font-variant: normal; font-weight: normal; letter-spacing:\n normal; line-height: normal; orphans: auto; text-align: start;\n text-indent: 0px; text-transform: none; white-space: normal;\n widows: auto; word-spacing: 0px; -webkit-text-size-adjust: auto;\n -webkit-text-stroke-width: 0px; background-color: rgb(255, 255,\n 255);">This is Bob.<span class="Apple-converted-space">&#160;<img alt="" src="cid:[email protected]" height="15" width="33"></span></div>\n <div style="color: rgb(34, 34, 34); font-family: arial,\n sans-serif; font-size: 12.666666984558105px; font-style: normal;\n font-variant: normal; font-weight: normal; letter-spacing:\n normal; line-height: normal; orphans: auto; text-align: start;\n text-indent: 0px; text-transform: none; white-space: normal;\n widows: auto; word-spacing: 0px; -webkit-text-size-adjust: auto;\n -webkit-text-stroke-width: 0px; background-color: rgb(255, 255,\n 255);"><br>\n I also attached a file.<br><br></div>\n <div style="color: rgb(34, 34, 34); font-family: arial,\n sans-serif; font-size: 12.666666984558105px; font-style: normal;\n font-variant: normal; font-weight: normal; letter-spacing:\n normal; line-height: normal; orphans: auto; text-align: start;\n text-indent: 0px; text-transform: none; white-space: normal;\n widows: auto; word-spacing: 0px; -webkit-text-size-adjust: auto;\n -webkit-text-stroke-width: 0px; background-color: rgb(255, 255,\n 255);">Thanks,</div>\n <div style="color: rgb(34, 34, 34); font-family: arial,\n sans-serif; font-size: 12.666666984558105px; font-style: normal;\n font-variant: normal; font-weight: normal; letter-spacing:\n normal; line-height: normal; orphans: auto; text-align: start;\n text-indent: 0px; text-transform: none; white-space: normal;\n widows: auto; word-spacing: 0px; -webkit-text-size-adjust: auto;\n -webkit-text-stroke-width: 0px; background-color: rgb(255, 255,\n 255);">Bob</div>\n <br><br></div>\n <br></body></html>', # 'In-Reply-To':'<[email protected]>', # 'Date':'Fri, 26 Apr 2013 11:50:29 -0700', # 'Message-Id':'<[email protected]>', # 'body-plain':'Hi Alice,\n\nThis is Bob.\n\nI also attached a file.\n\nThanks,\nBob\n\nOn 04/26/2013 11:29 AM, Alice wrote:\n> Hi Bob,\n>\n> This is Alice. How are you doing?\n>\n> Thanks,\n> Alice\n\n', # 'Mime-Version':'1.0', # 'Received':'from [10.20.76.69] (Unknown [50.56.129.169]) by mxa.mailgun.org with ESMTP id 517acc75.4b341f0-worker2; Fri, 26 Apr 2013 18:50:29 -0000 (UTC)', # 'content-id-map':'{"<[email protected]>": "attachment-1"}', # 'Sender':'[email protected]', # 'timestamp':'1455054990', # 'message-headers':'[["Received", "by luna.mailgun.net with SMTP mgrt 8788212249833; Fri, 26 Apr 2013 18:50:30 +0000"], ["Received", "from [10.20.76.69] (Unknown [50.56.129.169]) by mxa.mailgun.org with ESMTP id 517acc75.4b341f0-worker2; Fri, 26 Apr 2013 18:50:29 -0000 (UTC)"], ["Message-Id", "<[email protected]>"], ["Date", "Fri, 26 Apr 2013 11:50:29 -0700"], ["From", "Test mailer <[email protected]>"], ["User-Agent", "Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20130308 Thunderbird/17.0.4"], ["Mime-Version", "1.0"], ["To", "Alice <[email protected]>"], ["Subject", "Re: Sample POST request"], ["References", "<[email protected]>"], ["In-Reply-To", "<[email protected]>"], ["X-Mailgun-Variables", "{\\"my_var_1\\": \\"Mailgun Variable #1\\", \\"my-var-2\\": \\"awesome\\"}"], ["Content-Type", "multipart/mixed; boundary=\\"------------020601070403020003080006\\""], ["Sender", "[email protected]"]]', # 'stripped-text':'Hi Alice,\n\nThis is Bob.\n\nI also attached a file.', # 'recipient':'[email protected]', # 'sender':'[email protected]', # 'X-Mailgun-Variables':'{"my_var_1": "Mailgun Variable #1", "my-var-2": "awesome"}', # 'token':'cb2ef40ca2fee03a099f7da78ca07384228f00f023026c77a4', # 'body-html':"""\r\n\r\n<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\r\n<html>\r\n <head>\r\n <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">\r\n <!-- Responsive Design -->\r\n <!-- Facebook sharing information tags -->\r\n <img src="http://click.morning7.theskimm.com/open.aspx?ffcb10-ff2d11787561-fe8b1d737d6d077c76-fe881372756c027a7c-ff9d1670-fe8c1d737d6c067d70-fefc15727c6502" width="1" height="1">\r\n <meta property="og:image" content="http://cdn.theskimm.com/assets/skimm-fb-logo.png">\r\n <meta name="viewport" content="width=device-width">\r\n\r\n <style type="text/css">\r\n body{\r\n color:#000 !important;\r\n }\r\n p.skimm-p a,a,a:link,a:hover,a:visited{\r\n color:#009f9c!important;\r\n text-decoration:none;\r\n }\r\n a:hover{\r\n text-decoration:underline;\r\n }\r\n p{\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-size:15px;\r\n line-height:20px;\r\n letter-spacing:0em;\r\n color:#000;\r\n margin:0;\r\n padding:0;\r\n }\r\n #logo{\r\n text-decoration:none;\r\n display:block;\r\n padding-top:18px;\r\n margin:0 auto;\r\n }\r\n #missed{\r\n padding:34px 0;\r\n }\r\n #missed p{\r\n text-align:center;\r\n padding:0 0 3px;\r\n font-size:14px;\r\n }\r\n #sharing{\r\n padding:24px 0 32px;\r\n margin:0 auto 35px;\r\n background:#009f9b;\r\n width:100%;\r\n }\r\n #sharing h2{\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-weight:bold;\r\n text-transform:uppercase;\r\n text-align:center;\r\n letter-spacing:0.28em;\r\n padding:0 0 24px;\r\n margin:0;\r\n font-style:normal;\r\n font-size:13px;\r\n color:#fff;\r\n }\r\n #sharing .share_icons{\r\n margin:0 auto;\r\n text-align:center;\r\n }\r\n #sharing .share_icons .share{\r\n display:inline-block;\r\n text-transform:uppercase;\r\n color:#fff;\r\n text-decoration:none;\r\n text-align:center;\r\n margin-right:15px;\r\n }\r\n #sharing .share_icons .share img{\r\n display:block;\r\n font-size:28px;\r\n margin:0 auto 10px;\r\n }\r\n #sharing .share_icons span:last-child .share:last-child{\r\n margin-left:7px;\r\n }\r\n #sharing .share_icons .share:last-child img{\r\n margin:0 auto 8px;\r\n }\r\n #sharing .share_icons .share span{\r\n display:block;\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-size:11px;\r\n font-weight:bold;\r\n letter-spacing:3px;\r\n }\r\n .img_el{\r\n margin:0 auto 22px;\r\n display:block;\r\n }\r\n .retinaonlyicon{\r\n width:46px;\r\n height:49px;\r\n }\r\n .theskimm{\r\n text-transform:none !important;\r\n }\r\n #rss-content p,#rss-content h1,#rss-content h2,#rss-content h3,#rss-content img,#rss-content hr{\r\n margin-left:auto;\r\n margin-right:auto;\r\n }\r\n .skimm-birthdays,.skimm-shareus,.skimm-gift,.skimm-life{\r\n padding-bottom:15px !important;\r\n }\r\n .skimm-h3.skimm-shareus{\r\n background:url(http://cdn.theskimm.com/email/3/normal/skimmsend_icon.png) no-repeat 50% 0;\r\n text-align:center;\r\n padding-top:50px;\r\n margin-top:20px;\r\n }\r\n .skimm-p{\r\n line-height:23px;\r\n color:#000;\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-size:16px;\r\n padding-bottom:15px !important;\r\n }\r\n .share-jumpto-links{\r\n margin-top:0px !important;\r\n margin-bottom:12px !important;\r\n margin-left:auto;\r\n margin-right:auto;\r\n }\r\n .skimm-h1{\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-size:22px !important;\r\n font-weight:bold;\r\n color:#000;\r\n border-bottom:3px solid #00A49F;\r\n padding:12px 0 !important;\r\n margin-top:8px;\r\n margin-bottom:24px;\r\n\r\n text-align:left;\r\n letter-spacing:0.08em;\r\n }\r\n .skimm-h2{\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-weight:bold;\r\n font-size:20px;\r\n letter-spacing:0.01em;\r\n color:#000;\r\n padding:0 0 0 0;\r\n margin-top:22px;\r\n margin-bottom:12px;\r\n text-align:left;\r\n }\r\n .skimm-h3{\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-weight:bold !important;\r\n font-size:15px;\r\n letter-spacing:0.15em;\r\n line-height:20px;\r\n color:#000;\r\n padding:0;\r\n margin-bottom:4px;\r\n text-align:left;\r\n }\r\n .skimm-h3.skimm-h3-quote{\r\n text-align:left;\r\n }\r\n .skimm-h3.skimm-birthdays{\r\n text-align:center;\r\n margin-top:20px;\r\n }\r\n .skimm-h3.skimm-life{\r\n text-align:center;\r\n margin-top:20px;\r\n }\r\n .skimm-h3.skimm-gift{\r\n text-align:center;\r\n margin-top:20px;\r\n }\r\n .skimm-hr{\r\n border:0;\r\n margin-bottom:0 !important;\r\n }\r\n .skimm-hr.skimm-hr-thick{\r\n border-top:3px solid #009f9c !important;\r\n background:none !important;\r\n margin-bottom:25px !important;\r\n }\r\n .skimm-hr.skimm-hr-thin{\r\n border-top:1px solid #bfbfbf !important;\r\n height:0 !important;\r\n background:#fff !important;\r\n margin-bottom:20px !important;\r\n }\r\n .skimm-hr.skimm-hr-thin.skimm-hr-teal{\r\n border-top:1px solid #009f9c !important;\r\n }\r\n #outlook a{\r\n padding:0;\r\n }\r\n body{\r\n width:100% !important;\r\n }\r\n .ReadMsgBody{\r\n width:100%;\r\n }\r\n .ExternalClass{\r\n width:100%;\r\n }\r\n body{\r\n -webkit-text-size-adjust:none;\r\n }\r\n body{\r\n margin:0;\r\n padding:0;\r\n }\r\n img{\r\n border:none;\r\n height:auto;\r\n line-height:100%;\r\n margin:0;\r\n outline:none;\r\n padding:0;\r\n text-decoration:none;\r\n }\r\n #backgroundTable{\r\n margin:0;\r\n padding:0;\r\n width:100% !important;\r\n }\r\n /*\r\n @tab Page\r\n @section background color\r\n @tip Set the background color for your email. You may want to choose one that matches your company\'s branding.\r\n @theme page\r\n */\r\n body,#backgroundTable{\r\n /*@editable*/background-color:#ffffff;\r\n }\r\n /*\r\n @tab Page\r\n @section heading 1\r\n @tip Set the styling for all first-level headings in your emails. These should be the largest of your headings.\r\n @style heading 1\r\n */\r\n h1,.h1{\r\n /*@editable*/color:#202020;\r\n display:block;\r\n /*@editable*/font-family:Helvetica,Arial,sans-serif;\r\n /*@editable*/font-size:32px;\r\n /*@editable*/font-weight:bold;\r\n /*@editable*/line-height:100%;\r\n margin-top:0;\r\n margin-right:0;\r\n margin-bottom:10px;\r\n margin-left:0;\r\n /*@editable*/text-align:left;\r\n }\r\n /*\r\n @tab Page\r\n @section heading 2\r\n @tip Set the styling for all second-level headings in your emails.\r\n @style heading 2\r\n */\r\n h2,.h2{\r\n /*@editable*/color:#303030;\r\n display:block;\r\n /*@editable*/font-family:Helvetica,Arial,sans-serif;\r\n /*@editable*/font-size:26px;\r\n /*@editable*/font-weight:bold;\r\n /*@editable*/line-height:100%;\r\n margin-top:0;\r\n margin-right:0;\r\n margin-bottom:10px;\r\n margin-left:0;\r\n padding-bottom:6px;\r\n /*@editable*/text-align:left;\r\n }\r\n /*\r\n @tab Page\r\n @section heading 3\r\n @tip Set the styling for all third-level headings in your emails.\r\n @style heading 3\r\n */\r\n h3,.h3{\r\n /*@editable*/color:#404040;\r\n display:block;\r\n /*@editable*/font-family:Helvetica,Arial,sans-serif;\r\n /*@editable*/font-size:22px;\r\n /*@editable*/font-weight:bold;\r\n /*@editable*/line-height:100%;\r\n margin-top:0;\r\n margin-right:0;\r\n margin-bottom:10px;\r\n margin-left:0;\r\n /*@editable*/text-align:left;\r\n }\r\n /*\r\n @tab Page\r\n @section heading 4\r\n @tip Set the styling for all fourth-level headings in your emails. These should be the smallest of your headings.\r\n @style heading 4\r\n */\r\n h4,.h4{\r\n /*@editable*/color:#505050;\r\n display:block;\r\n /*@editable*/font-family:Helvetica,Arial,sans-serif;\r\n /*@editable*/font-size:18px;\r\n /*@editable*/font-weight:bold;\r\n /*@editable*/line-height:100%;\r\n margin-top:0;\r\n margin-right:0;\r\n margin-bottom:10px;\r\n margin-left:0;\r\n /*@editable*/text-align:left;\r\n }\r\n /*\r\n @tab Header\r\n @section preheader style\r\n @tip Set the background color for your email\'s preheader area.\r\n @theme page\r\n */\r\n #templatePreheader{\r\n /*@editable*/background-color:#ffffff;\r\n }\r\n /*\r\n @tab Header\r\n @section preheader text\r\n @tip Set the styling for your email\'s preheader text. Choose a size and color that is easy to read.\r\n */\r\n .preheaderContent{\r\n /*@tab Header\r\n@section preheader text\r\n@tip Set the styling for your email\'s preheader text. Choose a size and color that is easy to read.*/border-bottom:3px solid #000;\r\n }\r\n .preheaderContent div{\r\n /*@editable*/color:#505050;\r\n /*@editable*/font-family:Helvetica,Arial,sans-serif;\r\n /*@editable*/font-size:10px;\r\n /*@editable*/line-height:100%;\r\n /*@editable*/text-align:center;\r\n }\r\n /*\r\n @tab Header\r\n @section preheader link\r\n @tip Set the styling for your email\'s preheader links. Choose a color that helps them stand out from your text.\r\n */\r\n .preheaderContent div a:link,.preheaderContent div a:visited,.preheaderContent div a .yshortcuts{\r\n /*@editable*/color:#009f9c !important;\r\n /*@editable*/font-weight:normal;\r\n /*@editable*/text-decoration:underline;\r\n }\r\n /*\r\n @tab Header\r\n @section header style\r\n @tip Set the background color and border for your email\'s header area.\r\n\r\n @theme header\r\n */\r\n #templateHeader{\r\n /*@editable*/background-color:#ffffff;\r\n /*@editable*/border-bottom:0;\r\n padding:0px;\r\n }\r\n /*\r\n @tab Body\r\n @section body style\r\n @tip Set the background color for your email\'s body area.\r\n */\r\n #templateContainer,.bodyContent{\r\n /*@editable*/background-color:#FFFFFF;\r\n }\r\n #sharing-cells{\r\n margin-top:15px;\r\n }\r\n #sharing-cells td{\r\n width:50%;\r\n padding:0 8px;\r\n }\r\n #sharing-cells td:first-child{\r\n padding-left:0;\r\n }\r\n #sharing-cells td:last-child{\r\n padding-right:0;\r\n }\r\n #sharing-cells a:link,#sharing-cells a:visited,#sharing-cells a{\r\n display:block;\r\n height:22px;\r\n line-height:22px;\r\n background:#009f9c;\r\n position:relative;\r\n color:#ffffff;\r\n text-decoration:none;\r\n font-family:Helvetica,Arial,sans-serif;\r\n font-size:13px;\r\n font-weight:bold;\r\n }\r\n #sharing-cells img{\r\n position:relative;\r\n margin-left:10px;\r\n }\r\n #sharing-cells .header-email-img{\r\n top:1px;\r\n }\r\n #sharing-cells .header-twitter-img{\r\n top:1px;\r\n }\r\n #sharing-cells .header-facebook-img{\r\n top:2px;\r\n }\r\n /*\r\n @tab Body\r\n @section body text\r\n @tip Set the styling for your email\'s main content text. Choose a size and color that is easy to read.\r\n @theme main\r\n */\r\n .bodyContent{\r\n /*@tab Body\r\n@section body text\r\n@tip Set the styling for your email\'s main content text. Choose a size and color that is easy to read.\r\n@theme main*/text-align:left;\r\n }\r\n .bodyContent p{\r\n margin:0 auto;\r\n padding-bottom:26px;\r\n font-size:14px;\r\n text-align:left;\r\n }\r\n .bodyContent p strong{\r\n font-weight:bold;\r\n }\r\n .bodyContent hr{\r\n height:3px;\r\n background:#000;\r\n border-top:0;\r\n border-bottom:0;\r\n border-left:0;\r\n border-right:0;\r\n width:145px;\r\n margin-bottom:25px;\r\n }\r\n /*\r\n @tab Footer\r\n @section footer style\r\n @tip Set the background color and top border for your email\'s footer area.\r\n @theme footer\r\n */\r\n #templateFooter{\r\n /*@editable*/background-color:#FFFFFF;\r\n /*@editable*/border-top:0;\r\n }\r\n /*\r\n @tab Footer\r\n @section footer text\r\n @tip Set the styling for your email\'s footer text. Choose a size and color that is easy to read.\r\n @theme footer\r\n */\r\n .footerContent div{\r\n /*@tab Footer\r\n@section footer text\r\n@tip Set the styling for your email\'s footer text. Choose a size and color that is easy to read.\r\n@theme footer*/font-size:12px;\r\n font-family:Helvetica,Arial,sans-serif;\r\n }\r\n /*\r\n @tab Footer\r\n @section footer link\r\n @tip Set the styling for your email\'s footer links. Choose a color that helps them stand out from your text.\r\n */\r\n .footerContent div p,.footerContent div a:link,.footerContent div a:visited,.footerContent div a .yshortcuts{\r\n /*@tab Footer\r\n@section footer link\r\n@tip Set the styling for your email\'s footer links. Choose a color that helps them stand out from your text.*/text-align:left;\r\n font-size:12px;\r\n }\r\n .footerContent img{\r\n display:inline;\r\n }\r\n #footerContentLeft a,#monkeyRewards a{\r\n color:#009f9c;\r\n text-decoration:none;\r\n }\r\n /*\r\n @tab Footer\r\n @section social bar style\r\n @tip Set the background color and border for your email\'s footer social bar.\r\n @theme footer\r\n */\r\n #social{\r\n /*@editable*/background-color:#FAFAFA;\r\n /*@editable*/border:0;\r\n }\r\n /*\r\n @tab Footer\r\n @section social bar style\r\n @tip Set the background color and border for your email\'s footer social bar.\r\n */\r\n #social div{\r\n /*@editable*/text-align:center;\r\n }\r\n /*\r\n @tab Footer\r\n @section utility bar style\r\n @tip Set the background color and border for your email\'s footer utility bar.\r\n @theme footer\r\n */\r\n #utility{\r\n /*@editable*/background-color:#FFFFFF;\r\n /*@editable*/border:0;\r\n }\r\n /*\r\n @tab Footer\r\n @section utility bar style\r\n @tip Set the background color and border for your email\'s footer utility bar.\r\n */\r\n #utility div{\r\n /*@editable*/text-align:center;\r\n }\r\n #monkeyRewards img{\r\n max-width:190px;\r\n }\r\n .headerContent a{\r\n color:#000000;\r\n text-decoration:none;\r\n }\r\n .headerContent p{\r\n font-weight:bold;\r\n }\r\n @media only screen and (max-width: 675px),\r\n (-webkit-min-device-pixel-ratio: 1.5),\r\n (min-resolution: 144dpi) and (device-width: 1080px) and (orientation: portrait),\r\n (-webkit-min-device-pixel-ratio: 3.0){\r\n body,table,td,p,a,li,blockquote{\r\n -webkit-text-size-adjust:none !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n table[id=templatePreheader],table[id=templateContainer],table[id=templateHeader],table[id=templateBody],table[id=templateFooter],table[id=innerTemplateContainer],.skimm-shareus-bg{\r\n width:100% !important;\r\n margin:0 !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n table[id=templatePreheader] td,table[id=templateContainer] td,table[id=templateHeader] td,table[id=templateBody] td,table[id=templateFooter] td,table[id=innerTemplateContainer] td,.skimm-shareus-bg{\r\n padding-left:0 !important;\r\n padding-right:0 !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing-cells{\r\n width:100% !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing-cells a{\r\n height:35px !important;\r\n line-height:35px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing-cells a span{\r\n display:none;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing-cells td{\r\n width:50% !important;\r\n padding:0 2px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing-cells td:first-child{\r\n padding-left:0 !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing-cells td:last-child{\r\n padding-right:0 !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n .preheaderContent div{\r\n font-size:14px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n .preheaderContent a{\r\n display:block;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #rss-content p,#rss-content h1,#rss-content h2,#rss-content h3,#rss-content hr,.skimm-shareus-bg,.share-jumpto-links{\r\n margin-left:5px !important;\r\n margin-right:5px !important;\r\n width:auto !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #missed p span{\r\n display:block;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing h2{\r\n font-size:13px !important;\r\n padding-bottom:15px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing .share_icons .share{\r\n margin-right:3px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #sharing .share_icons span{\r\n display:block;\r\n margin-bottom:15px;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #footerContentLeft,#monkeyRewards{\r\n width:100% !important;\r\n display:block !important;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n #footerContentLeft{\r\n margin-bottom:15px;\r\n }\r\n\r\n} @media only screen and (max-width: 675px){\r\n .skimm-shareus-bg{\r\n padding-left:5px !important;\r\n padding-right:5px !important;\r\n }\r\n\r\n} #outlook a{\r\n padding:0;\r\n }\r\n body{\r\n width:100% !important;\r\n -webkit-text-size-adjust:100%;\r\n -ms-text-size-adjust:100%;\r\n margin:0;\r\n padding:0;\r\n }\r\n .ExternalClass{\r\n width:100%;\r\n }\r\n .ExternalClass,.ExternalClass p,.ExternalClass span,.ExternalClass font,.ExternalClass td,.ExternalClass div{\r\n line-height:100%;\r\n }\r\n .ecxfacebook span,.ecxtwitter span{\r\n padding-right:10px!important;\r\n }\r\n a.ecxshare span{\r\n padding:10px 0!important;\r\n }\r\n a.ecxshare img{\r\n display:inline-block!important;\r\n }\r\n #backgroundTable{\r\n margin:0;\r\n padding:0;\r\n width:100% !important;\r\n line-height:100% !important;\r\n }\r\n img{\r\n outline:none;\r\n text-decoration:none;\r\n border:none;\r\n -ms-interpolation-mode:bicubic;\r\n }\r\n a img{\r\n border:none;\r\n }\r\n .image_fix{\r\n display:block;\r\n }\r\n p{\r\n margin:0px 0px !important;\r\n }\r\n table td{\r\n border-collapse:collapse;\r\n }\r\n table{\r\n border-collapse:collapse;\r\n mso-table-lspace:0pt;\r\n mso-table-rspace:0pt;\r\n }\r\n a{\r\n color:#009f9c !important;\r\n text-decoration:none!important;\r\n }\r\n table[class=full]{\r\n width:100%;\r\n clear:both;\r\n }\r\n @media only screen and (max-width: 640px){\r\n a[href^=tel],a[href^=sms]{\r\n text-decoration:none;\r\n color:#009f9c;\r\n cursor:default;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n .mobile_link a[href^=tel],.mobile_link a[href^=sms]{\r\n text-decoration:default;\r\n color:#009f9c !important;\r\n pointer-events:auto;\r\n cursor:default;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n table[class=devicewidth]{\r\n width:440px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n td[class=devicewidth]{\r\n width:440px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n img[class=devicewidth]{\r\n width:440px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n img[class=banner]{\r\n width:440px!important;\r\n height:147px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n table[class=devicewidthinner]{\r\n width:420px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n img[class=devicewidthinner]{\r\n width:420px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n table[class=icontext]{\r\n width:345px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n img[class=colimg2]{\r\n width:420px!important;\r\n height:243px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n table[class=emhide]{\r\n display:none!important;\r\n }\r\n\r\n} @media only screen and (max-width: 640px){\r\n img[class=logo]{\r\n width:425px!important;\r\n height:198px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n a[href^=tel],a[href^=sms]{\r\n text-decoration:none;\r\n color:#009f9c;\r\n cursor:default;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n .mobile_link a[href^=tel],.mobile_link a[href^=sms]{\r\n text-decoration:default;\r\n color:#009f9c !important;\r\n pointer-events:auto;\r\n cursor:default;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n table[class=devicewidth]{\r\n width:300px!important;\r\n min-width:300px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n td[class=devicewidth]{\r\n width:300px!important;\r\n min-width:300px!important;\r\n text-align:left!important;\r\n font-size:14px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n p[class=skimm-p]{\r\n width:300px!important;\r\n min-width:300px!important;\r\n text-align:left!important;\r\n font-size:16px!important;\r\n line-height:22px!important;\r\n padding-bottom:20px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n img[class=banner]{\r\n width:300px!important;\r\n height:93px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n table[class=devicewidthinner]{\r\n width:300px!important;\r\n min-width:300px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n table[class=icontext]{\r\n width:186px!important;\r\n text-align:left!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n img[class=colimg2]{\r\n width:260px!important;\r\n height:150px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n table[class=emhide]{\r\n display:none!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n img[class=logo]{\r\n width:300px!important;\r\n height:140px!important;\r\n margin:0 auto;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n body[yahoofix] .mobile-block{\r\n display:block !important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n body[yahoofix] .share-column-top{\r\n padding:12px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n body[yahoofix] .bullet{\r\n display:block !important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n body[yahoofix] .email-title{\r\n text-align:center !important;\r\n min-width:300px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n body[yahoofix] .share-header{\r\n min-width:300px!important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n body[yahoofix] .share-facebook-and-twitter{\r\n text-align:center !important;\r\n min-width:320px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n body[yahoofix] .share-tumblr-instagram-pinterest{\r\n text-align:center !important;\r\n min-width:320px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n table[id=canspamBar] td{\r\n font-size:14px !important;\r\n }\r\n\r\n} @media only screen and (max-width: 480px){\r\n table[id=canspamBar] td a{\r\n display:block !important;\r\n margin-top:10px !important;\r\n }\r\n\r\n}</style></head>\r\n<custom type="content" name="feed">\r\n<body yahoofix="">\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="2"></td>\r\n </tr>\r\n <tr>\r\n <td width="100%" align="center" valign="middle" style="font-family: Helvetica, arial, sans-serif; font-size: 10px;color:#222222;padding:10px;">\r\n <img src="https://knifeopen.com/open?key=tbXNJbs7ktiOLQaM&e=c2FtdWVsQG9mYnJvb2tseW4uY29t" border="0" width="1" height="1" style="height:1px !important; width:1px !important; border: 0 !important; margin: 0 !important; padding: 0 !important; overflow:hidden !important">\r\n Is this email not displaying correctly?\r\n <a href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496682b8c2a8ce3b89f149c3b0e3d4bb79d27aff69849d4262d99cf9ca3edc7323ac8fb26da3883ad2171" target="_blank" style="color: #009f9c !important; text-decoration: none" class="mobile-block">View it in your browser.</a>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="2"></td>\r\n </tr>\r\n <tr bgcolor="#202020">\r\n <td width="100%" height="3"></td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr valign="middle">\r\n <td width="49%" style="max-width:305px;font-family: \'Helvetica\', \'Arial\', sans-serif; font-size: 14px; font-weight: normal; hyphens: auto; padding:14px;" bgcolor="#009f9c" valign="middle" class="share-column-top">\r\n <a class="twitter" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668355d8e9df63905f97725a44ebbed84a874deecd217e78c40b34e726327df44f0239bc0801dc2b0a2" style="color: #fff !important; display: block; font-size: 14px; font-weight: bold; text-align: center; text-decoration: none"><span style="">SHARE THIS</span><img class="header-twitter-img" src="http://cdn.theskimm.com/email/3/retina/header_twitter.png" width="17" height="12" style="-ms-interpolation-mode: bicubic; border: none; clear: both; display: inline-block; float: none; margin-left: 10px; max-width: 100%; outline: none; position: relative; text-decoration: none;left:5px;" align="none"></a>\r\n </td>\r\n <td width="2%" style="max-width:10px;"></td>\r\n <td width="49%" style="max-width:305px;font-family: \'Helvetica\', \'Arial\', sans-serif; font-size: 14px; font-weight: normal; hyphens: auto; padding:14px;" bgcolor="#009f9c" valign="middle" class="share-column-top">\r\n <a class="facebook" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496685aa05331be5024f7d4c5b6ac5a6644628ee1fabfd4fe5d99dd2a1927cc9c0ad3eb9b788f0307a8a7" style="color: #fff !important; display: block; font-size: 14px; font-weight: bold; text-align: center; text-decoration: none"><span style="">SHARE THIS</span><img class="header-facebook-img" src="http://cdn.theskimm.com/email/3/retina/header_facebook.png" width="5" height="12" style="-ms-interpolation-mode: bicubic; border: none; clear: both; display: inline-block; float: none; margin-left: 10px; max-width: 100%; outline: none; position: relative; text-decoration: none; left:5px;" align="none"></a>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="21"></td>\r\n </tr>\r\n <tr>\r\n <td width="100%" align="center">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" align="center" cellspacing="0" cellpadding="0" border="0" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td align="center" class="email-title-logo">\r\n\t\t\t <a target="_blank" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966896ba0fd9aa0d6f5e516a15a2e8e7ca1be07d2a3ef5e4a4db879fb260ad995c5a"><img width="500" border="0" height="233" alt="" style="display:block; border:none; outline:none; text-decoration:none;" src="http://cdn.theskimm.com/email/3/retina/preflight/logo_dunkin_skimm_20161116.png" class="logo"></a> </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="22"></td>\r\n </tr>\r\n <tr>\r\n <td align="center" style="color: #222222;font-family: \'Helvetica\', \'Arial\', sans-serif; font-size: 18px;font-weight: bold;min-width: 320px; width: 100%" class="email-title">\r\n Skimm for November 16th\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="22"></td>\r\n </tr>\r\n <tr>\r\n <td align="center">\r\n <a target="_blank" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496682d0eeaaeae295fc27f0d5cd22756d2ae4e78e57151c1d5809d4b42d2bb2d7f888666b29c176b6dbc"><img height="40" width="205" border="0" alt="" style="display:block; border:none; outline:none;text-decoration:none;" src="http://cdn.theskimm.com/email/3/retina/inviteCTA.png"></a>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="30"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n Skimm&#8217;d at Friendsgiving with coffee that tastes like your fav baked goods. <a href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668c3bcc1d0e3afbff63b0279170e2c2fdf703c9bb1b8cf3fc642c83a0de5dd0603" target="_blank">Drink up here.</a>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="20"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td style="font-family: Helvetica, arial, sans-serif; font-size: 15px; font-weight:bold; color: #000000; text-align:left;line-height: 20px; letter-spacing:0.15em;" class="title">\r\n QUOTE OF THE DAY\r\n </td>\r\n </tr>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p class="skimm-p">&#8220;What up, peach butt?&#8221; - <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496689a95679d7cee905acd1bd6bb4f9a114a6ad0a607c7a8bc4ef0cabc958cf2fb32" target="_blank">One person&#8217;s reaction</a>&#160;to Apple bringing back the OG peach emoji. Because the Internet loves big peaches and it cannot lie.</p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <div class="share-jumpto-links"><div style="box-sizing:border-box;position:relative;height:45px;">\r\n <div style="padding:0;overflow:hidden;color:#009f9c;font-family:\'Raleway\',sans-serif;font-weight:800;font-size: 12px;text-transform: uppercase;line-height: 30px;letter-spacing: 0.28em;text-align:left;">\r\n <span style="display:inline-block;margin-right:6px;">Skimm This</span><a style="width:20px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966833128e1dee0ee6de0affe606e7df71c41589a66347535625737e36f7002c2293"><img src="http://cdn.theskimm.com/email/3/retina/logo_facebook_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:0 auto !important;" width="10" height="24" alt="Like Us" /></a><a style="width:30px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496681a9f9e0c2b717ffcd0ff065052083ef574492f42d19003343a16417b51d1c5e2"><img src="http://cdn.theskimm.com/email/3/retina/logo_twitter_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:4px auto 0 !important;" width="25" height="19" alt="Tweet with Us" /></a>\r\n <a style="width: 20px; height: 25px; vertical-align: bottom; margin: 5px 5px 0; display: inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668b45e2f0e181622f538351d174b653e094d53348383175ae22951ea3fbee530ce"><img src="http://cdn.theskimm.com/email/3/retina/icon_share_instagram.png" style="display: block; vertical-align: middle; padding-top: 0px; margin: 0 auto !important;" width="20" height="20" alt="Insta This" /></a>\r\n </div>\r\n </div></div>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td style="font-family: Helvetica, arial, sans-serif; font-size: 24px; font-weight:bold; color: #000000; text-align:left;line-height: 24px; letter-spacing:2px;">\r\n\r\n <h1 class=\'skimm-h1\' id=\'top-story\' style=\'font-size:24px!important;font-weight:bold;color:#000;border-bottom:3px solid #00A49F;padding:12px0!important;margin-top:8px;margin-bottom:24px;text-rendering:geometricPrecision;text-align:left;letter-spacing:0.08em;\'>FAKE \'N BAKE</h1> </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-weight:bold;font-size:20px;letter-spacing:0.01em;color:#000;padding:0000;padding-bottom:12px;text-align:left;">\r\n </td>\r\n </tr>\r\n <!--skimmPH:[top-story]--> <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <h3 class="skimm-h3">THE STORY</h3>\r\n<p class="skimm-p">Facebook and Google are trying to stop certain websites from <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966894872e7112c671ca9489ec768e9d7be63adbec52357289184ab96c9153b3ab14" target="_blank">faking it</a>.</p>\r\n<h3 class="skimm-h3">WAIT&#8230;BACK UP.</h3>\r\n<p class="skimm-p">During and after the presidential election, there were a lot of fake news articles floating around the Interwebs - especially on <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496688d33e8875fc47a70828b52f21ce18b16d6ca6e40c32a4c6ab593567f2558b134" target="_blank">Google</a> and <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966814079903a573fec91cd595e7f4fa59d0d41663992c83703541adb93ed58e1c38" target="_blank">Facebook</a>. See: stories on Facebook claiming that <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966866dabb649428b6ca61f14367bf10578571a081deb7e3cc0502dc984cbdfef003" target="_blank">Pope Francis</a> and <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668e44d7b4019f39666965810830390f69939c80af4e0f1a248c34b51b96652e035" target="_blank">Denzel Washington</a> had endorsed Donald Trump. False. Also see: a news story <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668cb9a811eb752f96911cdbb07cd8a98905e44e20e74cb959d1114dbe83d98ef4e" target="_blank">leading Google&#8217;s search results</a> claiming that Trump had won the popular vote. Again, false.</p>\r\n<h3 class="skimm-p skimm-h3">HOW DID THIS HAPPEN?</h3>\r\n<p class="skimm-p">Both Google and Facebook use algorithms to decide what people see in their search results and News Feeds. And<span style="color: #000000;">&#160;people</span>&#160;are more likely to see websites and stories that are already getting attention. During this election, people were apparently clicking a lot on attention grabbing - but fake - stories. So Google and Facebook have been got a lot of side <span style="color: #000000;">eye&#160;for maybe&#160;helping spread false news that misled&#160;voters.</span></p>\r\n<h3 class="skimm-h3">SO WHAT NOW?</h3>\r\n<p class="skimm-p">Earlier this week, both tech companies said they&#8217;re banning fake news sites from using their ad platforms. Hint: tools that help other websites make some cash money&#160;by filling ad spaces on their pages. So this move could hit fake sites where it hurts&#8230;their bank accounts. But these actions won&#8217;t actually stop fake stories from showing up in search results and News Feeds. So, problem not solved.</p>\r\n<h3 class="skimm-h3">theSKIMM</h3>\r\n<p class="skimm-p">Turns out your grandma&#160;who always says &#8216;don&#8217;t believe what you read on the Internet&#8217; is onto something. The issue is that lately, it&#8217;s been getting harder to tell fact from fiction. Think of Google and Facebook&#8217;s moves as baby steps in the right direction.</p>\r\n<p class="skimm-p"><strong>THE *:</strong> Twitter&#8217;s also battling some demons. Yesterday it announced a new <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496686dea1ff5bcde88085c4fcc1fe0a5cd5707ea931fadf2af7aafcbae5ccae6c656" target="_blank">&#8216;mute&#8217;</a> button for hate speech that can be used to block trolls. Slow. Clap.</p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <div class="share-jumpto-links" style="margin-top:0px!important;margin-bottom:12px!important;margin-left:auto;margin-right:auto;">\r\n <div style="box-sizing:border-box;position:relative;height:45px;">\r\n <div style="padding:0;overflow:hidden;color:#009f9c;font-family:\'Raleway\',sans-serif;font-weight:800;font-size: 12px;text-transform: uppercase;line-height: 30px;letter-spacing: 0.28em;text-align:left;">\r\n <span style="display:inline-block;margin-right:6px;">Skimm This</span><a style="width:20px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668fee71bbd266055643382d3a550d47f2bee4932d14c28394bdeb627d14637cefa"><img src="http://cdn.theskimm.com/email/3/retina/logo_facebook_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:0 auto !important;" width="10" height="24" alt="Like Us" /></a><a style="width:30px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668b2b2e5ca3b4277a55656ba4dd5e178387d3067c2526c327fcd86d2b2a1f56afe"><img src="http://cdn.theskimm.com/email/3/retina/logo_twitter_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:4px auto 0 !important;" width="25" height="19" alt="Tweet with Us" /></a>\r\n </div>\r\n </div>\r\n </div>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td>\r\n\r\n <h1 class=\'skimm-h1\' style=\'font-size:24px!important;font-weight:bold;color:#000;border-bottom:3px solid #00A49F;padding:12px0!important;margin-top:8px;margin-bottom:24px;text-rendering:geometricPrecision;text-align:left;letter-spacing:0.08em;\'>REPEAT AFTER ME...</h1> </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <h3 class="skimm-h3"><span style="color: #000000;">WHAT TO SAY TO YOUR FRIEND </span><span style="color: #000000;">WHO&#160;OVERUSES THE FLOWER CROWN FILTER&#8230;</span></h3>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p class="skimm-p"><span id="m_-8610233401664684211gmail-m_4406734970754570424gmail-m_8170365571861193419gmail-m_3032498056971523395gmail-docs-internal-guid-81c14444-6b4b-35ce-5787-3b1660046764" style="color: #000000;">Big news. Yesterday, it came out that <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668e687db272906e8193de881a3ac4d328b971b252b9ba902707af67356f9a1b057" target="_blank">Snapchat</a> -&#160;aka Snap Inc. - reportedly filed to go public. The Dancing Ghost could start trading shares as soon as March next year. And be valued at around $25 billion. That would make it one of the largest IPOs since Alibaba went public in 2014 for more than $170 billion. Not bad for a company that once said &#8216;as if&#8217; to a $3 billion takeover bid from Facebook. Now, the app known for high-pitched deer voices and major key has more than 100 million active users</span><span style="color: #000000;">.</span><span style="color: #000000;"> </span><span style="color: #000000;">Snapchat&#8217;s not sending any disappearing comments confirming or denying these reports.&#160;For now.</span></p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <div class="share-jumpto-links" style="margin-top:0px!important;margin-bottom:12px!important;margin-left:auto;margin-right:auto;">\r\n <div style="box-sizing:border-box;position:relative;height:45px;">\r\n <div style="padding:0;overflow:hidden;color:#009f9c;font-family:\'Raleway\',sans-serif;font-weight:800;font-size: 12px;text-transform: uppercase;line-height: 30px;letter-spacing: 0.28em;text-align:left;">\r\n <span style="display:inline-block;margin-right:6px;">Skimm This</span><a style="width:20px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668a22029c811fe912cbcaa55e4c7690f941dfb6205e6e5d8384839006d04e1fca5"><img src="http://cdn.theskimm.com/email/3/retina/logo_facebook_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:0 auto !important;" width="10" height="24" alt="Like Us" /></a><a style="width:30px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966868ad7e11670909ba84b405df77932786cebaf2a1c73f1fb069bcb399601b9878"><img src="http://cdn.theskimm.com/email/3/retina/logo_twitter_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:4px auto 0 !important;" width="25" height="19" alt="Tweet with Us" /></a>\r\n </div>\r\n </div>\r\n </div>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <h3 class="skimm-h3">WHAT TO SAY WHEN DECIDING BETWEEN TAKEOUT AND LEFTOVERS&#8230;</h3>\r\n<h3 class="skimm-h3"></h3>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p class="skimm-p">Decisions, decisions. DC&#8217;s making a lot of those too. Yesterday, House Republicans <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668a15574a3921755c32d9019545f192c33f350169a723a31a9835d8a6afb98d856" target="_blank">re-nominated Rep. Paul Ryan (R-WI)</a>&#160;for House Speaker. So he&#8217;ll continue as the voice of the GOP in Congress when President-elect Donald Trump takes over the White House. Ryan and Trump <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668ad715d9dbd396cf40f08831a4c53e5e1aba451d50f8ae9e7a80c8e4912c8894d" target="_blank">haven&#8217;t been huge fans</a> of each other in the past, so this could be fun. Meanwhile, House Dems are in &#8216;reassess&#8217; mode since last week&#8217;s election upset. Yesterday, they <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668ca9fcce12ba859dafbd313a3021f96f605f9805d5f9695dcac7b59508873838f" target="_blank">postponed an election for House Minority Leader</a> until the end of the month. So it&#8217;s unclear whether current leader Rep. Nancy Pelosi (D-CA) will get to keep her job, which she&#8217;s had for more than a decade. Speaking of jobs, some of Trump&#8217;s <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966890467bfeb8316cfc343f6be84933493e97b133a509207bd311408f1ad7b4b2b2" target="_blank">transition team</a> members don&#8217;t have one anymore. Yesterday, two aides that were helping with national security and foreign policy were pushed out. One of them reportedly over his close ties to NJ Gov. Chris Christie - who was recently demoted from his own transition team job - and his <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668759d0f85d61c9bd91740d3ffd680756d942049f438826d9dde2f04b4425f74f2" target="_blank">bridge full of problems.</a> VP-elect Mike Pence is heading everything up now. And he has a lot of open positions to fill on the US gov&#8217;s website.&#160;Bueller&#8230;?</p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <div class="share-jumpto-links" style="margin-top:0px!important;margin-bottom:12px!important;margin-left:auto;margin-right:auto;">\r\n <div style="box-sizing:border-box;position:relative;height:45px;">\r\n <div style="padding:0;overflow:hidden;color:#009f9c;font-family:\'Raleway\',sans-serif;font-weight:800;font-size: 12px;text-transform: uppercase;line-height: 30px;letter-spacing: 0.28em;text-align:left;">\r\n <span style="display:inline-block;margin-right:6px;">Skimm This</span><a style="width:20px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668eb047e0045b064327cf993f88c43d8f1f7fd142ba4c3730ee11e619a005b8135"><img src="http://cdn.theskimm.com/email/3/retina/logo_facebook_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:0 auto !important;" width="10" height="24" alt="Like Us" /></a><a style="width:30px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966893f90884d2e283822d36b8328fea527e796862be805d266f161a939c47f16e81"><img src="http://cdn.theskimm.com/email/3/retina/logo_twitter_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:4px auto 0 !important;" width="25" height="19" alt="Tweet with Us" /></a>\r\n </div>\r\n </div>\r\n </div>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <h3 class="skimm-h3">WHAT TO SAY TO YOUR FRIEND WHO <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966838f2c2bf9f1ad12d3ee7669be439b5753af2413b197148ea933a4842d1bf559f" target="_blank">&#8216;CHECKED IN&#8217; TO STANDING ROCK ON FB</a>&#8230;</h3>\r\n<h3 class="skimm-h3"></h3>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p class="skimm-p">And the story continues. Earlier this week, the company building the Dakota Access Pipeline <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668587be35d0420e3b300f4b7f20ffa641fdca8fffe4d4bab68aafaf2bff32b3d96" target="_blank">asked a federal court to step in</a> and give them the green light to finish the project. That&#8217;s because the US gov recently <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668e6e37e03e89776ee26a111a229ce6e6c99051c298370a97ad1e28502591249f0" target="_blank">put off making a decision</a> about whether pipeline construction could continue near a Native American reservation in North Dakota. Reminder: The Dakota Access Pipeline is a more than 1,000 mile-long oil pipeline that would run through four states. Native Americans and environmentalists have spent months protesting the project in North Dakota, saying it could contaminate the water supply of a nearby tribe and demolish sacred sites. The tribe filed a lawsuit, and after a lot of legal back-and-forth, the project&#8217;s been put on hold by the Obama administration. Now, the pipeline company&#8217;s saying delays have cost about $100 million and it&#8217;s &#8216;time to get this show on the road.&#8217; Meanwhile critics are saying &#8216;nope, fight&#8217;s not over.&#8217; Yesterday, hundreds of people protested in cities across the US.</p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <div class="share-jumpto-links" style="margin-top:0px!important;margin-bottom:12px!important;margin-left:auto;margin-right:auto;">\r\n <div style="box-sizing:border-box;position:relative;height:45px;">\r\n <div style="padding:0;overflow:hidden;color:#009f9c;font-family:\'Raleway\',sans-serif;font-weight:800;font-size: 12px;text-transform: uppercase;line-height: 30px;letter-spacing: 0.28em;text-align:left;">\r\n <span style="display:inline-block;margin-right:6px;">Skimm This</span><a style="width:20px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668e60bf549785af95fd2eecd1c223477cdd64d73be224af43546f4250719165c2e"><img src="http://cdn.theskimm.com/email/3/retina/logo_facebook_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:0 auto !important;" width="10" height="24" alt="Like Us" /></a><a style="width:30px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496687558f10a661cf02787cc0382fa1746b257f8c50406fe109cae8e80477a4249eb"><img src="http://cdn.theskimm.com/email/3/retina/logo_twitter_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:4px auto 0 !important;" width="25" height="19" alt="Tweet with Us" /></a>\r\n </div>\r\n </div>\r\n </div>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <h3 class="skimm-h3">WHAT TO SAY WHEN YOU&#160;FIND A HAIR IN YOUR SOUP...</h3>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p class="skimm-p">That&#8217;s nothing. <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496682fcd1f9b9aecfa2a7f592838f5083fa8cc4de5ea878f7540239e79545d837620" target="_blank">Really.</a></p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <div class="share-jumpto-links" style="margin-top:0px!important;margin-bottom:12px!important;margin-left:auto;margin-right:auto;">\r\n <div style="box-sizing:border-box;position:relative;height:45px;">\r\n <div style="padding:0;overflow:hidden;color:#009f9c;font-family:\'Raleway\',sans-serif;font-weight:800;font-size: 12px;text-transform: uppercase;line-height: 30px;letter-spacing: 0.28em;text-align:left;">\r\n <span style="display:inline-block;margin-right:6px;">Skimm This</span><a style="width:20px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668f7756a67344bc84f4633e55ffce21c8650b0fba307a821a868594bcd5e1128f8"><img src="http://cdn.theskimm.com/email/3/retina/logo_facebook_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:0 auto !important;" width="10" height="24" alt="Like Us" /></a><a style="width:30px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966896eb0d1c6f010fe5ac0be20cf13bedffe9d223f6e63009deb8c7c9ffe8baf42c"><img src="http://cdn.theskimm.com/email/3/retina/logo_twitter_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:4px auto 0 !important;" width="25" height="19" alt="Tweet with Us" /></a>\r\n </div>\r\n </div>\r\n </div>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="0"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <h1 class="skimm-h1" style="font-size:24px!important;font-weight:bold;color:#000;border-bottom:3px solid #00A49F;padding:12px0!important;margin-top:8px;margin-bottom:24px;text-rendering:geometricPrecision;text-align:left;letter-spacing:0.08em;">THING TO KNOW</h1>\r\n </td>\r\n </tr>\r\n <!--skimmPH:[skimm-thing-to-know-THING TO KNOW]--> <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p class="skimm-p"><strong><a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668c78d2d0af98be16bad76c055a8f44143371f5f10237bd7b536cbb1903e3e7419" target="_blank">Young Joe Biden:</a></strong> A picture of Vice President Joe Biden. When he was younger. And really, really, really, ridiculously good looking. And when &#8216;malarkey&#8217; might have worked in a pickup line.</p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <div class="share-jumpto-links" style="margin-top:0px!important;margin-bottom:12px!important;margin-left:auto;margin-right:auto;">\r\n <div style="box-sizing:border-box;position:relative;height:45px;">\r\n <div style="padding:0;overflow:hidden;color:#009f9c;font-family:\'Raleway\',sans-serif;font-weight:800;font-size: 12px;text-transform: uppercase;line-height: 30px;letter-spacing: 0.28em;text-align:left;">\r\n <span style="display:inline-block;margin-right:6px;">Skimm This</span><a style="width:20px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668968ba54c6c8358c8bfff15e59d5e6b874f5ff6e1970b16b22ce6fa5aaaceebe5"><img src="http://cdn.theskimm.com/email/3/retina/logo_facebook_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:0 auto !important;" width="10" height="24" alt="Like Us" /></a><a style="width:30px;height:30px;vertical-align:bottom;margin: 0 5px 0;display:inline-block;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668a2502b4fe990c36cdb3ef4020551c926382324cef782c7fba68f85ed0ba845e9"><img src="http://cdn.theskimm.com/email/3/retina/logo_twitter_teal.png" style="display:block;vertical-align:middle;padding-top:0px;margin:4px auto 0 !important;" width="25" height="19" alt="Tweet with Us" /></a>\r\n </div>\r\n </div>\r\n </div>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="0"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <img width="42" height="50" src="http://cdn.theskimm.com/email/3/retina/skimmlife_icon.png" border="0" style="border:none;min-height:auto;line-height:100%;margin:0 auto 22px;outline:none;padding:0;text-decoration:none;display:block;margin-left:auto;margin-right:auto">\r\n <h3 style="text-transform:uppercase;color:#000;display:block;font-family:Helvetica,Arial,sans-serif;font-size:15px;font-weight:bold!important;line-height:20px;margin-top:20px;margin-right:auto;margin-bottom:4px;margin-left:auto;text-align:center;letter-spacing:0.15em;padding:0;padding-bottom:15px!important">SKIMM LIFE</h3>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="text-align:left!important;margin:0 auto!important;max-width:595px" cellpadding="0" cellspacing="0" border="0" align="left">\r\n <tbody>\r\n <tr>\r\n <td width="100%" align="center" valign="middle">\r\n <img src="http://cdn.theskimm.com/email/3/retina/brought-to-you-by-Casper.png" width="300" height="28" border="0" style="border:none;min-height:auto;line-height:100%;margin:0;outline:none;padding:0;text-decoration:none">\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="10"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr> <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p class="skimm-p">Pillow talk just got a lot more comfortable. Meet your new mattress that will make it extra hard to get out of bed. $50 off included. Get it&#160;<a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966807a19aaf9c299ce80282cd4d859990644edf2f8879fac889f7ff73a7439f9cc8" target="_blank">here</a>.</p>\r\n<p>&nbsp;</p>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="20"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <h3 style="text-transform:uppercase;color:#000;display:block;font-family:Helvetica,Arial,sans-serif;font-size:15px;font-weight:bold!important;line-height:20px;margin-top:20px;margin-right:auto;margin-bottom:4px;margin-left:auto;text-align:center;letter-spacing:0.15em;padding:0;padding-bottom:15px!important">SKIMM SHARE</h3>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="text-align:left!important;margin:0 auto!important;max-width:595px" cellpadding="0" cellspacing="0" border="0" align="left">\r\n <tbody>\r\n <tr>\r\n <td width="100%" align="center" valign="middle">\r\n <img src="http://cdn.theskimm.com/email/3/retina/brought-to-you-by-Feed.png" width="300" height="28" border="0" style="border:none;min-height:auto;line-height:100%;margin:0;outline:none;padding:0;text-decoration:none">\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="10"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr> <tr>\r\n <td class="share-button-copy" style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p dir="ltr" style="color: #222222;" class="skimm-p">Thanksgiving comin&#8217; in hot. We want it to be all gravy for you - and kids around the world. Say hi to <span style="color: #0000ff;"><a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668addb3e5e53489ee3fc0128aa9e26256520e3877f670bc283030c7d5610a2b9e3" target="_blank">FEED</a></span>. They&#8217;re dedicated to fighting world hunger, and we think you should help.</p>\r\n<p dir="ltr" style="color: #222222;" class="skimm-p">So here&#8217;s what we&#8217;re thinking&#8230;</p>\r\n<p dir="ltr" style="color: #222222;" class="skimm-p">* From now through <span class="aBn" data-term="goog_696035946" tabindex="0"><span class="aQJ">Thursday</span></span>, every time you share theSkimm, FEED will donate a meal to a child in need</p>\r\n<p dir="ltr" style="color: #222222;" class="skimm-p">* You can share here, or using your referral link</p>\r\n<p class="skimm-p">* Your referral link: <a href="https://www.theskimm.com/?r=65f2f865" target="_blank" style="color:#009f9c!important;text-decoration:none;">https://www.theskimm.com/?r=65f2f865</a></p><p \r\n<p dir="ltr" style="color: #222222;" class="skimm-p">Questions? Here are your <a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=d763cee0c4487653768c25cc7c9dfc78a3f38477994c7fd430c4408bcf0de925f9376680fc6eeae6" target="_blank">answers.</a></p>\r\n<p dir="ltr" style="color: #222222;" class="skimm-p">\r\n </td>\r\n </tr>\r\n <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;text-align:center;">\r\n <a style="color:#009f9c!important;text-decoration:none;" href="http://www.theskimm.com/invite/v2/[email protected]&utm_source=email&utm_medium=invite&utm_campaign=bottom" target="_blank"><img src="https://cdn.theskimm.com/email/3/retina/badge_share-the-skimm.png" width="100" height="100" alt="Share theSkimm" /></a>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="50"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n<table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="0"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:600px;" align="center" cellpadding="0" cellspacing="0" border="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <img width="36" height="39" src="http://cdn.theskimm.com/email/3/retina/skimmbirthdays_icon.png" border="0" style="border:none;min-height:auto;line-height:100%;margin:0 auto 22px;outline:none;padding:0;text-decoration:none;display:block;margin-left:auto;margin-right:auto">\r\n <h3 style="text-transform:uppercase;color:#000;display:block;font-family:Helvetica,Arial,sans-serif;font-size:15px;font-weight:bold!important;line-height:20px;margin-top:20px;margin-right:auto;margin-bottom:4px;margin-left:auto;text-align:center;letter-spacing:0.15em;padding:0;padding-bottom:15px!important">SKIMM BIRTHDAYS</h3>\r\n </td>\r\n </tr>\r\n <!--skimmPH:[skimm-birthdays-SKIMM BIRTHDAYS]--> <tr>\r\n <td style="font-family:Helvetica,Arial,sans-serif;font-size:16px;line-height:20px;letter-spacing:0em;color:#000;margin:0;padding:0;">\r\n <p style="color: #222222;" class="skimm-p"><b><a style="color:#009f9c!important;text-decoration:none;" href="http://click.morning7.theskimm.com/?qs=d763cee0c4487653a10d863e3c54ce20da1bd1b22c5fbb9859fa75c263a7640bbb41ea283e5eba90" target="_blank">* indicates Skimm&#8217;bassador.</a>&#160;Go big.</b></p>\r\n<div style="color: #222222;">\r\n<p class="skimm-p"><b><span style="color: #222222;">Trish Engleman </span></b><span style="color: #222222;">(New York, NY); <strong>Sarah Finn</strong> (New York, NY);</span><b><span style="color: #222222;">&#160;</span>*Jackie O&#8217;Shaughnessy&#160;</b>(Chapel Hill, NC);&#160;<b>*Gabriella Rossi</b>&#160;(New York, NY);&#160;<b>*Jenna Levy&#160;</b>(Baltimore, MD);&#160;<b>*Allison Wynant&#160;</b>(North Potomac, MD);&#160;<b>*Heather Modicut&#160;</b>(Loranger, LA);&#160;<b>*Mirela Raic&#160;</b>(Dubai, UAE);&#160;<b>*Heather McCowen</b>&#160;(Chicago, IL);&#160;<b>*Kimberly Novosel&#160;</b>(Nashville, TN);&#160;<b>*Armand Mignot</b>&#160;(Boston, MA);&#160;<b>*Beth Zentmeyer-Harvey&#160;</b>(Tampa, FL);&#160;<b>*Sue Fannin&#160;</b>(Greensboro, NC);&#160;<b>*Sokphal Tun&#160;</b>(Arlington, VA);&#160;<b>*Mary Augustine-Morris</b>&#160;(New York, NY);&#160;<b>*Lisa Watkins</b>&#160;(Austin, TX);&#160;<b>*Alexandra Perrault&#160;</b>(Cincinnati, OH);&#160;<b>*Courtney Crotty</b>&#160;(Glendale, CO);&#160;<b>*Keri Robinson&#160;</b>(Highland, NY);&#160;<b>*Rachel Gold&#160;</b>(Denver, CO);&#160;<b>Caitriona Brannigan&#160;</b>(Dresher, PA);&#160;<b>Jessica Reagan</b>&#160;(Cary, NC);&#160;<b>Rachael Drake&#160;</b>(Covington, KY);&#160;<b>Abbie Gabel&#160;</b>(Lincoln, NE);&#160;<b>Sarah Catherine Norris</b>&#160;(Dallas, TX);&#160;<b>Erika Sorensen</b>&#160;(West Fargo, ND);&#160;<b>Laurie Crane</b>&#160;(Augusta, GA);&#160;<strong>Alli Fleder&#160;</strong>(New York, NY);&#160;<b>Brenna Delk</b>&#160;(Houston, TX);&#160;<b>Mary A McKay</b>&#160;(Lubbock, TX);&#160;<b>Melissa Gay&#160;</b>(Chicago, IL);&#160;<strong>Susanne Erni</strong> (Atlanta, GA);&#160;<b>Shayla Castrelos</b>&#160;(Springfield, NJ);&#160;<b>Russell VanDommelen</b>&#160;(Byron Center, MI);&#160;<b>Olivia Gay&#160;</b>(Chicago, IL);&#160;<b>Katie Otto</b>&#160;(Park Ridge, IL);&#160;<b>Nancy Thorpe Calhoun&#160;</b>(Plano, TX);&#160;<b>Allie Sauls</b>&#160;(Houston, TX);&#160;<b>Kendra Harris</b>&#160;(Traverse City, MI);&#160;<b>Kelsey Bowman</b>&#160;(Washington, DC);&#160;<b>Teresa Decker&#160;</b>(Ames, IA);&#160;<b>Hannah Epstein&#160;</b>(Ann Arbor, MI);&#160;<b>Shelby Harris</b>&#160;(Traverse City, MI);&#160;<b>Bridget Mayer&#160;</b>(Chicago, IL);&#160;<b>Megan Hewett&#160;</b>(Cincinnati, OH);&#160;<b>Kaitlyn Watson</b>&#160;(Richmond, VA);&#160;<b>Sean Levitt&#160;</b>(Encino, CA);&#160;<b>Amy Babington</b>&#160;(Brooklyn, NY);&#160;<b>Dominique Buhl&#160;</b>(Portland, OR);&#160;<b>Ali Kaiser</b>&#160;(Denver, CO);&#160;<b>Andrea Brzozoski</b>&#160;(Atlanta, GA);&#160;<b>Karen Grant-Davie&#160;</b>(London, England);&#160;<b>Kelly Leary</b>;&#160;<b>Kelsey Shorette</b>;&#160;<b>Kiva Eisenstock</b>&#160;(Pacific Palisades, CA);&#160;<b>Lauren Ciaccio Sistrunk</b>&#160;(New Orleans, LA);&#160;<b>Lillian Anselmi</b>&#160;(New York, NY);&#160;<b>Samantha Sullivan</b>&#160;(Boston, MA);&#160;<b>Jessica Sheely&#160;</b>(Houston, TX);&#160;<b>Sharona Sokolow</b>&#160;(Los Angeles, CA)</p>\r\n</div>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="15"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n</table>\r\n\r\n\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="21"></td>\r\n </tr>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" align="center" cellspacing="0" cellpadding="0" border="0" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="22"></td>\r\n </tr>\r\n <tr>\r\n <td style="color: #000; font-family: \'Helvetica\', \'Arial\', sans-serif;font-size: 14px;font-weight: normal;line-height: 20px; text-align:center;">\r\n Skimm\'d something we missed?\r\n <br>\r\n <br>\r\n Email <a href="mailto:[email protected]" target="_blank" style="color: #009f9c !important; text-decoration: none">[email protected]</a> <span class="bullet">&bull;</span> <a href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496682b8c2a8ce3b89f149c3b0e3d4bb79d27aff69849d4262d99cf9ca3edc7323ac8fb26da3883ad2171" target="_blank" style="color: #009f9c !important; text-decoration: none">Read in browser &raquo;</a>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="22"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#009f9c" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" align="left" border="0" cellpadding="0" cellspacing="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td style="padding:10px;font-family: \'Helvetica\', \'Arial\', sans-serif; color: #fff !important; display: block; font-size: 13px; font-weight: bold; text-align: center; text-decoration: none;letter-spacing:3px; min-width: 320px; " class="share-header">\r\n SHARE &amp; FOLLOW US\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n <table bgcolor="#009f9c" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="" align="left" border="0" cellpadding="0" cellspacing="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="10"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="" align="center" border="0" cellpadding="0" cellspacing="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td style="font-family: Helvetica, arial, sans-serif; font-size: 12px; color: #333333; text-align:center;line-height: 24px;min-width:245px;" class="share-facebook-and-twitter">\r\n <a class="share share-facebook" target="_blank" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668b8a219a19a4a11f7fbe00b3be2b3a67d604585348adbc462e8d827955d9906e5" style="color: #009f9c !important; display: inline-block; margin:0 15px; width:90px; white-space:nowrap; text-align: center; text-decoration: none; text-transform: uppercase"><img src="http://cdn.theskimm.com/email/3/retina/footer_facebook.png" width="10" height="23" alt="Like Us" style="-ms-interpolation-mode: bicubic; border: none; clear: both; display: block; float: none; font-size: 28px; margin: 0 auto 10px; max-width: 100%; outline: none; text-decoration: none;" align="none"><span style="color: #fff !important; display: block; font-size: 11px; font-weight: bold; letter-spacing: 3px; margin: 15px auto">Facebook</span></a>\r\n <a class="share share-twitter" target="_blank" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966898e1043a8cc241984730cf63d0784bd543423d9f1df397f2a7a4203f959e362b" style="color: #009f9c !important; display: inline-block; margin:0 15px; width:90px; white-space:nowrap; text-align: center; text-decoration: none; text-transform: uppercase"><img src="http://cdn.theskimm.com/email/3/retina/footer_twitter.png" width="25" height="19" alt="Tweet with Us" style="-ms-interpolation-mode: bicubic; border: none; clear: both; display: block; float: none; font-size: 28px; margin: 0 auto 10px; max-width: 100%; outline: none; text-decoration: none;" align="none"><span style="color: #fff !important; display: block; font-size: 11px; font-weight: bold; letter-spacing: 3px; margin: 15px auto">Twitter</span></a>\r\n <a class="share share-tumblr" target="_blank" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe4966849a03da5efae6876e0409669af55199d72299531e60a31b4a6931035c958b874" style="color: #009f9c !important; display: inline-block; margin:0 15px; width:90px; white-space:nowrap; text-align: center; text-decoration: none; text-transform: uppercase"><img src="http://cdn.theskimm.com/email/3/retina/footer_tumblr.png" width="13" height="21" alt="Tumble with Us" style="-ms-interpolation-mode: bicubic; border: none; clear: both; display: block; float: none; font-size: 28px; margin: 0 auto 8px; max-width: 100%; outline: none; text-decoration: none;" align="none"><span style="color: #fff !important; display: block; font-size: 11px; font-weight: bold; letter-spacing: 3px; margin: 15px auto">Tumblr</span></a>\r\n <a class="share share-instagram" target="_blank" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe496683afbe8e0ef4d9cc7aefbfed95a6baafebe6a73c968fb025ecdf2f31a6db046db" style="color: #009f9c !important; display: inline-block; margin:0 15px; width:90px; white-space:nowrap; text-align: center; text-decoration: none; text-transform: uppercase"><img src="http://cdn.theskimm.com/email/3/retina/footer_instagram.png" width="21" height="22" alt="Instagram Us" style="-ms-interpolation-mode: bicubic; border: none; clear: both; display: block; float: none; font-size: 28px; margin: 0 auto 10px; max-width: 100%; outline: none; text-decoration: none;" align="none"><span style="color: #fff !important; display: block; font-size: 11px; font-weight: bold; letter-spacing: 3px; margin: 15px auto">Instagram</span></a>\r\n <a class="share share-pinterest" target="_blank" href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668f83d6ecd33ef7fd9704213f209da019e868a39f71d2272cde348d4cdcc3fcb2d" style="color: #009f9c !important; display: inline-block; margin:0 15px; width:90px; white-space:nowrap; text-align: center; text-decoration: none; text-transform: uppercase"><img src="http://cdn.theskimm.com/email/3/retina/footer_pinterest.png" width="24" height="24" alt="Pin Us" style="-ms-interpolation-mode: bicubic; border: none; clear: both; display: block; float: none; font-size: 28px; margin: 0 auto 8px; max-width: 100%; outline: none; text-decoration: none;" align="none"><span style="color: #fff !important; display: block; font-size: 11px; font-weight: bold; letter-spacing: 3px; margin: 15px auto">Pinterest</span></a>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n\r\n <table width="100%" bgcolor="#ffffff" cellpadding="0" cellspacing="0" border="0" id="backgroundTable">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%">\r\n <table bgcolor="#ffffff" width="100%" style="max-width:620px" cellpadding="0" cellspacing="0" border="0" align="center" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:290px;" align="left" border="0" cellpadding="0" cellspacing="0" class="devicewidth">\r\n <tbody>\r\n <tr>\r\n <td width="100%" height="10"></td>\r\n </tr>\r\n <tr>\r\n <td>\r\n <table width="100%" style="max-width:270px;" align="center" border="0" cellpadding="0" cellspacing="0" class="devicewidthinner">\r\n <tbody>\r\n <tr>\r\n <td style="font-family: Helvetica, arial, sans-serif; font-size: 12px; color: #333333; text-align:left;line-height: 24px;">\r\n Copyright &#169; 2016 theSkimm, All rights reserved.\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="22"></td>\r\n </tr>\r\n <tr>\r\n <td style="font-family: Helvetica, arial, sans-serif; font-size: 12px; color: #333333; text-align:left;line-height: 24px;">\r\n <b>Our mailing address is: </b><br />\r\n theSkimm Inc.<br />\r\n 49 W 23rd Street, 10th Floor<br />\r\n New York, NY, 10010, United States\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="22"></td>\r\n </tr>\r\n <tr>\r\n <td style="font-family: Helvetica, arial, sans-serif; font-size: 12px; color: #333333; text-align:left;line-height: 24px;">\r\n <a href="http://click.morning7.theskimm.com/profile_center.aspx?qs=a9002aa5f6954ac9bd3702a4eac3ef750c124ba6406cfbb7f60cc269a1b4d8aebe14ef93ad7e94477f4156c5a27370f7e0422cc5321d81e2" >Update Profile</a><br/>\r\n <a href="http://click.morning7.theskimm.com/?qs=5d95ad2dcbe49668caba5850dff6b71d76b1bac535ba3008782c3ceffefdcc0b32ef6129f5abef5c" >Unsubscribe</a>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="60"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n <tr>\r\n <td width="100%" height="10"></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n\r\n<img src="https://pixel.monitor1.returnpath.net/pixel.gif?r=3e9fa24c7442d95337a14dfcd7b45f193869d154" width="1" height="1" />\r\n<IMG SRC="https://ad.doubleclick.net/ddm/ad/N2335.1915120THESKIMM/B10592123.141253295;sz=1x1;ord=0bf40eb5-9e50-4f75-8f4f-dee229746301;dc_lat=;dc_rdid=;tag_for_child_directed_treatment=?" BORDER="0" HEIGHT="1" WIDTH="1" ALT="Advertisement">\r\n<IMG SRC="https://ad.doubleclick.net/ddm/ad/N2335.1915120THESKIMM/B10592123.141254194;sz=1x1;ord=e632fb76-3773-4c02-af47-63ecc0003c2f;dc_lat=;dc_rdid=;tag_for_child_directed_treatment=?" BORDER="0" HEIGHT="1" WIDTH="1" ALT="Advertisement">\r\n</body>\r\n</html>\r\n # """, # 'References':'<[email protected]>', # 'signature':'1369fa4dcc7de7fac51f5bb408bd5c9daa8730e80d394e8a128658d74e66904skimm2', # 'Content-Type':'multipart/mixed; boundary="------------020601070403020003080006"', # 'Subject':'Test Newsletter theskimm' # } params = request.REQUEST response = HttpResponse('OK') if settings.DEBUG or 'samuel' in params.get('To', ''): logging.debug(" ---> Email newsletter: %s" % params) email_newsletter = EmailNewsletter() story = email_newsletter.receive_newsletter(params) if not story: raise Http404 return response def newsletter_story(request, story_hash): story = MStory.objects.get(story_hash=story_hash) story = Feed.format_story(story) return HttpResponse(story['story_content'])
mit
Sorsly/subtle
google-cloud-sdk/lib/third_party/grpc/framework/interfaces/base/__init__.py
901
1528
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
mit
google/telluride_decoding
test/scaled_lda_test.py
1
6747
# Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for telluride_decoding.scaled_lda.""" import os from absl.testing import absltest import matplotlib.pyplot as plt import numpy as np from telluride_decoding import scaled_lda class ScaledLdaTest(absltest.TestCase): def test_one_dimensional_data(self): num_points = 1000 d1 = np.random.randn(num_points,) - 5 d2 = np.random.randn(num_points,) + 5 lda = scaled_lda.ScaledLinearDiscriminantAnalysis() lda.fit_two_classes(d1, d2) d1_transformed = lda.transform(d1) self.assertAlmostEqual(np.mean(d1_transformed), 0) d2_transformed = lda.transform(d2) self.assertAlmostEqual(np.mean(d2_transformed), 1) def test_two_dimensional_data(self): num_points = 1000 num_dims = 2 d1 = np.matmul(np.random.randn(num_points, num_dims), [[2, 0], [0, 0.5]]) + [-2, 1] d2 = np.matmul(np.random.randn(num_points, num_dims), [[2, 0], [0, 0.5]]) + [2, -1] # Plot the original data. plt.clf() plt.subplot(2, 1, 1) plt.plot(d1[:, 0], d1[:, 1], 'rx') plt.plot(d2[:, 0], d2[:, 1], 'bo') plt.title('Original Data') x = np.concatenate((d1, d2), axis=0) y = np.concatenate((np.ones(d1.shape[0])*42, np.ones(d2.shape[0])*-12)) lda = scaled_lda.LinearDiscriminantAnalysis() with self.assertRaisesRegex( ValueError, 'Must fit the model before transforming.'): lda.transform(d1) with self.assertRaisesRegex( ValueError, 'Must fit the model before transforming.'): lda.explained_variance_ratio() x_lda = lda.fit_transform(x, y) labels = lda.labels self.assertLen(labels, 2) # Plot the transformed data. plt.subplot(2, 1, 2) plt.plot(x_lda[y == labels[0], 0], x_lda[y == labels[0], 1], 'rx') plt.plot(x_lda[y == labels[1], 0], x_lda[y == labels[1], 1], 'bo') plt.title('Transfomed Data') # Make sure the transformed centers are symmetric on the first (x) axis. mean_vectors = [np.reshape(v, (1, -1)) for v in lda.mean_vectors] centers = lda.transform(np.concatenate(mean_vectors, axis=0)) print('Transformed centers are:', centers) self.assertAlmostEqual(centers[0, 0], -centers[1, 0], delta=0.1) np.testing.assert_allclose(centers[:, 1], [0., 0.], atol=0.1) plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp', 'scaled_lda.png')) with self.assertRaisesRegex( TypeError, 'Inconsistent training and transform sizes'): lda.transform(d1[:, 0:1]) # Now test model from saved parameters nlda = scaled_lda.LinearDiscriminantAnalysis() nlda.model_parameters = lda.model_parameters # Get/set parameters test centers = nlda.transform(np.concatenate(mean_vectors, axis=0)) self.assertAlmostEqual(centers[0, 0], -centers[1, 0], delta=0.1) np.testing.assert_allclose(centers[:, 1], [0., 0.], atol=0.1) def test_fitted_data(self): """Makes sure we can generate a fitted model with .from_fitted_data. """ num_points = 1000 num_dims = 2 d1 = np.matmul(np.random.randn(num_points, num_dims), [[2, 0], [0, 0.5]]) + [-2, 1] d2 = np.matmul(np.random.randn(num_points, num_dims), [[2, 0], [0, 0.5]]) + [2, -1] x = np.concatenate((d1, d2), axis=0) y = np.concatenate((np.ones(d1.shape[0])*42, np.ones(d2.shape[0])*-12)) lda = scaled_lda.LinearDiscriminantAnalysis.from_fitted_data(x, y) explained = lda.explained_variance_ratio() np.testing.assert_allclose(explained, [1., 0.], atol=1e-8) def test_three_class_data(self): num_points = 1000 num_dims = 2 d1 = np.matmul(np.random.randn(num_points, num_dims), [[2, 0], [0, 0.5]]) + [-2, 1] d2 = np.matmul(np.random.randn(num_points, num_dims), [[2, 0], [0, 0.5]]) + [2, -1] d3 = np.matmul(np.random.randn(num_points, num_dims), [[2, 0], [0, 0.5]]) x = np.concatenate((d1, d2, d3), axis=0) y = np.concatenate((np.ones(d1.shape[0])*42, np.ones(d2.shape[0])*-12, np.ones(d3.shape[0]))) lda = scaled_lda.LinearDiscriminantAnalysis() x_lda = lda.fit_transform(x, y) self.assertEqual(x_lda.shape[0], 3*num_points) self.assertEqual(x_lda.shape[1], 2) # Only two dimensional data. labels = lda.labels self.assertLen(labels, 3) def test_four_dimensional_data(self): num_points = 1000 num_dims = 4 center = np.array([-2, 1, 3, 2]) # Arbitrary m1 = np.random.randn(num_points, num_dims) + center m2 = np.random.randn(num_points, num_dims) + -center x = np.concatenate((m1, m2), axis=0) y = np.concatenate((np.ones(m1.shape[0])*0, np.ones(m2.shape[0])*1.0)) slda = scaled_lda.ScaledLinearDiscriminantAnalysis() slda.fit_two_classes(m1, m2) m_lda = slda.transform(x) self.assertEqual(m_lda.shape, (2*num_points, 2)) self.assertEqual(slda.coef_array.shape[0], num_dims) self.assertLen(slda.labels, slda.coef_array.shape[1]) mean_vectors = [np.reshape(v, (1, -1)) for v in slda.mean_vectors] centers = slda.transform(np.concatenate(mean_vectors, axis=0))[:, 0] np.testing.assert_allclose(centers, [0., 1.0], atol=1e-8) explained = slda.explained_variance_ratio() np.testing.assert_allclose(explained, [1., 0., 0., 0.], atol=1e-8) # Now test save and restoring parameters. param_dict = slda.model_parameters nlda = scaled_lda.ScaledLinearDiscriminantAnalysis() nlda.model_parameters = param_dict mean_vectors = [np.reshape(v, (1, -1)) for v in nlda.mean_vectors] centers = nlda.transform(np.concatenate(mean_vectors, axis=0))[:, 0] np.testing.assert_allclose(centers, [0., 1.0], atol=1e-8) # Make sure we fail with more than two classes. with self.assertRaisesRegex( ValueError, 'Scaled LDA can only be done on two-class data.'): y[0:2] = 42 slda.fit_transform(x, y) if __name__ == '__main__': absltest.main()
apache-2.0
mrbandrews/bitcoin
qa/rpc-tests/receivedby.py
8
7385
#!/usr/bin/env python # Copyright (c) 2014 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the listreceivedbyaddress API from test_framework import BitcoinTestFramework from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * def get_sub_array_from_array(object_array, to_match): ''' Finds and returns a sub array from an array of arrays. to_match should be a unique idetifier of a sub array ''' num_matched = 0 for item in object_array: all_match = True for key,value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue return item return [] def check_array_result(object_array, to_match, expected, should_not_find = False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find == True: expected = { } num_matched = 0 for item in object_array: all_match = True for key,value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue for key,value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value))) num_matched = num_matched+1 if num_matched == 0 and should_not_find != True: raise AssertionError("No objects matched %s"%(str(to_match))) if num_matched > 0 and should_not_find == True: raise AssertionError("Objects was matched %s"%(str(to_match))) class ReceivedByTest(BitcoinTestFramework): def run_test(self): ''' listreceivedbyaddress Test ''' # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() #Check not listed in listreceivedbyaddress because has 0 confirmations check_array_result(self.nodes[1].listreceivedbyaddress(), {"address":addr}, { }, True) #Bury Tx under 10 block so it will be returned by listreceivedbyaddress self.nodes[1].setgenerate(True, 10) self.sync_all() check_array_result(self.nodes[1].listreceivedbyaddress(), {"address":addr}, {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]}) #With min confidence < 10 check_array_result(self.nodes[1].listreceivedbyaddress(5), {"address":addr}, {"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]}) #With min confidence > 10, should not find Tx check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True) #Empty Tx addr = self.nodes[1].getnewaddress() check_array_result(self.nodes[1].listreceivedbyaddress(0,True), {"address":addr}, {"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]}) ''' getreceivedbyaddress Test ''' # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() #Check balance is 0 because of 0 confirmations balance = self.nodes[1].getreceivedbyaddress(addr) if balance != Decimal("0.0"): raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) #Check balance is 0.1 balance = self.nodes[1].getreceivedbyaddress(addr,0) if balance != Decimal("0.1"): raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) #Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress self.nodes[1].setgenerate(True, 10) self.sync_all() balance = self.nodes[1].getreceivedbyaddress(addr) if balance != Decimal("0.1"): raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) ''' listreceivedbyaccount + getreceivedbyaccount Test ''' #set pre-state addrArr = self.nodes[1].getnewaddress() account = self.nodes[1].getaccount(addrArr) received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account}) if len(received_by_account_json) == 0: raise AssertionError("No accounts found in node") balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account) txid = self.nodes[0].sendtoaddress(addr, 0.1) # listreceivedbyaccount should return received_by_account_json because of 0 confirmations check_array_result(self.nodes[1].listreceivedbyaccount(), {"account":account}, received_by_account_json) # getreceivedbyaddress should return same balance because of 0 confirmations balance = self.nodes[1].getreceivedbyaccount(account) if balance != balance_by_account: raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) self.nodes[1].setgenerate(True, 10) self.sync_all() # listreceivedbyaccount should return updated account balance check_array_result(self.nodes[1].listreceivedbyaccount(), {"account":account}, {"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))}) # getreceivedbyaddress should return updates balance balance = self.nodes[1].getreceivedbyaccount(account) if balance != balance_by_account + Decimal("0.1"): raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) #Create a new account named "mynewaccount" that has a 0 balance self.nodes[1].getaccountaddress("mynewaccount") received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"}) if len(received_by_account_json) == 0: raise AssertionError("No accounts found in node") # Test includeempty of listreceivedbyaccount if received_by_account_json["amount"] != Decimal("0.0"): raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"])) # Test getreceivedbyaccount for 0 amount accounts balance = self.nodes[1].getreceivedbyaccount("mynewaccount") if balance != Decimal("0.0"): raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) if __name__ == '__main__': ReceivedByTest().main()
mit
ATIX-AG/ansible
lib/ansible/modules/network/interface/net_interface.py
58
3658
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: net_interface version_added: "2.4" author: "Ganesh Nalawade (@ganeshrn)" short_description: Manage Interface on network devices description: - This module provides declarative management of Interfaces on network devices. options: name: description: - Name of the Interface. required: true description: description: - Description of Interface. enabled: description: - Configure interface link status. speed: description: - Interface link speed. mtu: description: - Maximum size of transmit packet. duplex: description: - Interface link status default: auto choices: ['full', 'half', 'auto'] tx_rate: description: - Transmit rate in bits per second (bps). - This is state check parameter only. - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) rx_rate: description: - Receiver rate in bits per second (bps). - This is state check parameter only. - Supports conditionals, see L(Conditionals in Networking Modules,../network/user_guide/network_working_with_command_output.html) delay: description: - Time in seconds to wait before checking for the operational state on remote device. This wait is applicable for operational state argument which are I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate). default: 10 aggregate: description: List of Interfaces definitions. purge: description: - Purge Interfaces not defined in the aggregate parameter. This applies only for logical interface. default: no state: description: - State of the Interface configuration, C(up) indicates present and operationally up and C(down) indicates present and operationally C(down) default: present choices: ['present', 'absent', 'up', 'down'] """ EXAMPLES = """ - name: configure interface net_interface: name: ge-0/0/1 description: test-interface - name: remove interface net_interface: name: ge-0/0/1 state: absent - name: make interface up net_interface: name: ge-0/0/1 description: test-interface enabled: True - name: make interface down net_interface: name: ge-0/0/1 description: test-interface enabled: False - name: Create interface using aggregate net_interface: aggregate: - { name: ge-0/0/1, description: test-interface-1 } - { name: ge-0/0/2, description: test-interface-2 } speed: 1g duplex: full mtu: 512 - name: Delete interface using aggregate junos_interface: aggregate: - { name: ge-0/0/1 } - { name: ge-0/0/2 } state: absent - name: Check intent arguments net_interface: name: fxp0 state: up tx_rate: ge(0) rx_rate: le(0) - name: Config + intent net_interface: name: fxp0 enabled: False state: down """ RETURN = """ commands: description: The list of configuration mode commands to send to the device. returned: always, except for the platforms that use Netconf transport to manage the device. type: list sample: - interface 20 - name test-interface """
gpl-3.0
palmerjh/iEBE
PlayGround/job-2/iSS/for_paraview/lib152/DataSetAttr.py
9
1758
#!/usr/bin/env python """ Copyright 2001 Pearu Peterson all rights reserved, Pearu Peterson <[email protected]> Permission to use, modify, and distribute this software is given under the terms of the LGPL. See http://www.fsf.org NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Revision: 1.1 $ $Date: 2001-05-20 12:51:29 $ Pearu Peterson """ __version__ = "$Id: DataSetAttr.py,v 1.1 2001-05-20 12:51:29 pearu Exp $" import common import string class DataSetAttr(common.Common): """Abstract class for VTK data.""" counters = {} default_value = 0 def _get_default_name(self): n = self.__class__.__name__ try: self.counters[n] = self.counters[n] + 1 except KeyError: self.counters[n] = 0 return self.__class__.__name__+str(self.counters[n]) def _get_name(self,name): if name is None: name = self._get_default_name() self.warning('Using name=%s'%(`name`)) return name if common.is_string(name): name = string.replace(string.strip(name),' ','_') #name = name.strip().replace(' ','_') if name: return name raise ValueError,'name=%s must be non-empty string'%(`name`) def _get_lookup_table(self,name): if name is None: name = 'default' self.warning('Using lookup_table=%s'%(`name`)) return name if common.is_string(name): name = string.replace(string.strip(name),' ','_') #name = name.strip().replace(' ','_') if name: return name raise ValueError,'lookup_table=%s must be nonempty string'%(`name`) if __name__ == "__main__": pass
gpl-3.0
PierreBizouard/pizco-utils
pizcoutils/helpers/BuildExeStartup.py
1
2974
#------------------------------------------------------------------------------ # BuildExeStartup.py # Initialization script for cx_Freeze which manipulates the path so that the # directory in which the executable is found is searched for extensions but # no other directory is searched. It also sets the attribute sys.frozen so that # the Win32 extensions behave as expected. #------------------------------------------------------------------------------ import os import sys subpath = "bin" # if trap for frozen script wrapping base_path = os.path.join(os.path.dirname(sys.executable),subpath) sys.path.insert(0,base_path+'\\library.zip') sys.path.insert(0,base_path) os.environ['MATPLOTLIBDATA'] = os.path.join(os.path.dirname(sys.executable),subpath+'\\mpl-data') import zipimport sys.frozen = True sys.path = sys.path[:2] #print "IPython can require the zip_imp utils // patching qt_loaders allows this" #from zip_imp import patch #patch() #However it does work so we end monkey patching qt loading from helpers.LogUtils import * if (os.path.isdir(DIR_NAME+"\\IPython") or os.path.isdir(base_path+"\\IPython")): debug("monkey patching ipython") os.environ["IPYTHONDIR"] = base_path from IPython.external import qt_loaders from IPython.external.qt_loaders import * def new_load_qt(api_option): loaders = {QT_API_PYSIDE: import_pyside, QT_API_PYQT: import_pyqt4, QT_API_PYQTv1: partial(import_pyqt4, version=1), QT_API_PYQT_DEFAULT: partial(import_pyqt4, version=None) } api = loaded_api() result = loaders[api]() api = result[-1] # changed if api = QT_API_PYQT_DEFAULT commit_api(loaded_api()) return result qt_loaders.load_qt = new_load_qt os.environ["TCL_LIBRARY"] = os.path.join(DIR_NAME, "tcl") os.environ["TK_LIBRARY"] = os.path.join(DIR_NAME, "tk") #Enforce sip vars version on loading if (os.path.isfile(DIR_NAME+"\\QtGui4.dll") or os.path.isfile(base_path+"\\QtGui4.dll")): debug("setting sip to v2") #perform qt4 rthook like pyinstaller import sip sip.setapi(u'QDate', 2) sip.setapi(u'QDateTime', 2) sip.setapi(u'QString', 2) sip.setapi(u'QTextStream', 2) sip.setapi(u'QTime', 2) sip.setapi(u'QUrl', 2) sip.setapi(u'QVariant', 2) m = __import__("__main__") importer = zipimport.zipimporter(INITSCRIPT_ZIP_FILE_NAME) if INITSCRIPT_ZIP_FILE_NAME != SHARED_ZIP_FILE_NAME: moduleName = m.__name__ else: name, ext = os.path.splitext(os.path.basename(os.path.normcase(FILE_NAME))) moduleName = "%s__main__" % name code = importer.get_code(moduleName) exec(code, m.__dict__) versionInfo = sys.version_info[:3] if versionInfo >= (2, 5, 0) and versionInfo <= (2, 6, 4): module = sys.modules.get("threading") if module is not None: module._shutdown()
bsd-3-clause
pajlada/pajbot
pajbot/apiwrappers/base.py
1
3753
import logging from urllib.parse import quote, urlparse, urlunparse import datetime from requests import Session from pajbot import constants from pajbot.apiwrappers.response_cache import APIResponseCache log = logging.getLogger(__name__) class BaseAPI: def __init__(self, base_url, redis=None): self.base_url = base_url self.session = Session() self.timeout = 20 # e.g. pajbot1/1.35 self.session.headers["User-Agent"] = f"pajbot/{constants.VERSION}" if redis is not None: self.cache = APIResponseCache(redis) @staticmethod def quote_path_param(param): return quote(param, safe="") @staticmethod def fill_in_url_scheme(url, default_scheme="https"): """Fill in the scheme part of a given URL string, e.g. with given inputs of url = "//example.com/abc" and default_scheme="https", the output would be "https://example.com/abc" If the given input URL already has a scheme, the scheme is not altered. """ parsed_template = urlparse(url, scheme=default_scheme) return urlunparse(parsed_template) @staticmethod def parse_datetime(datetime_str): """Parses date strings in the format of 2015-09-11T23:01:11Z to a tz-aware datetime object.""" naive_dt = datetime.datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%SZ") return naive_dt.replace(tzinfo=datetime.timezone.utc) @staticmethod def join_base_and_list(base, path_segments): url = base for path_segment in path_segments: # str(endpoint) so numbers can be used as path segments too url = BaseAPI.join_base_and_string(url, BaseAPI.quote_path_param(str(path_segment))) return url @staticmethod def join_base_and_string(base, endpoint): base = base.rstrip("/") endpoint = endpoint.lstrip("/") return base + "/" + endpoint @staticmethod def join_base_and_endpoint(base, endpoint): # For use cases with no base and absolute endpoint URLs if base is None: return endpoint if isinstance(endpoint, list): return BaseAPI.join_base_and_list(base, endpoint) else: return BaseAPI.join_base_and_string(base, endpoint) def request(self, method, endpoint, params, headers, json=None, **request_options): full_url = self.join_base_and_endpoint(self.base_url, endpoint) response = self.session.request( method, full_url, params=params, headers=headers, json=json, timeout=self.timeout, **request_options ) response.raise_for_status() return response def get(self, endpoint, params=None, headers=None, **request_options): return self.request("GET", endpoint, params, headers, **request_options).json() def get_response(self, endpoint, params=None, headers=None, **request_options): return self.request("GET", endpoint, params, headers, **request_options) def get_binary(self, endpoint, params=None, headers=None, **request_options): return self.request("GET", endpoint, params, headers, **request_options).content def post(self, endpoint, params=None, headers=None, json=None, **request_options): return self.request("POST", endpoint, params, headers, json, **request_options).json() def put(self, endpoint, params=None, headers=None, json=None, **request_options): return self.request("PUT", endpoint, params, headers, json, **request_options).json() def patch(self, endpoint, params=None, headers=None, json=None, **request_options): return self.request("PATCH", endpoint, params, headers, json, **request_options)
mit
pcabido/socorro
alembic/versions/235c80dc2e12_fixes_bug_1047079_remove_processors_.py
13
2071
"""Fixes bug 1047079 - remove processors, jobs tables Revision ID: 235c80dc2e12 Revises: 556e11f2d00f Create Date: 2014-12-30 13:29:15.108296 """ # revision identifiers, used by Alembic. revision = '235c80dc2e12' down_revision = '556e11f2d00f' from alembic import op from socorro.lib import citexttype, jsontype, buildtype from socorro.lib.migrations import fix_permissions, load_stored_proc import sqlalchemy as sa from sqlalchemy import types from sqlalchemy.dialects import postgresql from sqlalchemy.sql import table, column def upgrade(): op.drop_table('jobs') op.drop_table('processors') op.alter_column('server_status', 'processors_count', nullable=True) def downgrade(): op.alter_column('server_status', 'processors_count', nullable=False) op.execute(""" CREATE TABLE processors ( id serial NOT NULL PRIMARY KEY, name varchar(255) NOT NULL UNIQUE, startdatetime timestamp with time zone NOT NULL, lastseendatetime timestamp with time zone ) """) op.execute(""" CREATE TABLE jobs ( id serial NOT NULL PRIMARY KEY, pathname character varying(1024) NOT NULL, uuid varchar(50) NOT NULL UNIQUE, owner integer, priority integer DEFAULT 0, queueddatetime timestamp with time zone, starteddatetime timestamp with time zone, completeddatetime timestamp with time zone, success boolean, message text, FOREIGN KEY (owner) REFERENCES processors (id) ) """) op.execute(""" CREATE INDEX jobs_owner_key ON jobs (owner) """) op.execute(""" CREATE INDEX jobs_owner_starteddatetime_key ON jobs (owner, starteddatetime) """) op.execute(""" CREATE INDEX jobs_owner_starteddatetime_priority_key ON jobs (owner, starteddatetime, priority DESC) """) op.execute(""" CREATE INDEX jobs_completeddatetime_queueddatetime_key ON jobs (completeddatetime, queueddatetime) """)
mpl-2.0
GaetanCambier/CouchPotatoServer
libs/requests/api.py
361
4344
# -*- coding: utf-8 -*- """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the request in seconds. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ session = sessions.Session() return session.request(method=method, url=url, **kwargs) def get(url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('get', url, **kwargs) def options(url, **kwargs): """Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): """Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('post', url, data=data, **kwargs) def put(url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): """Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): """Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ return request('delete', url, **kwargs)
gpl-3.0
ASCIT/donut
donut/modules/feedback/helpers.py
2
9848
from donut import email_utils from donut.modules.feedback import email_templates import flask import pymysql.cursors from donut.modules.feedback.groups import groupInt, groupName import donut.modules.groups.helpers as groups import donut.modules.newsgroups.helpers as newsgroups def send_update_email(group, email, complaint_id): ''' Sends an email to [email] of poster and group ''' msg = email_templates.added_message.format(group, get_link(group, complaint_id)) subject = "Received {} Feedback".format(group) try: email_utils.send_email(email, msg, subject, group=group) return True except: return False def register_complaint(group, data, notification=True): ''' Inputs a complaint into the database and returns the complaint id associated with this complaint data should be a dict with keys 'course', 'msg' and optionally 'name', 'email' if required fields are missing, returns false ''' if not (data and data['subject'] and data['msg']): return False # Register complaint query = """ INSERT INTO complaint_info (org, subject, resolved, ombuds, uuid) VALUES (%s, %s, FALSE, %s, UNHEX(REPLACE(UUID(), '-', ''))) """ if 'ombuds' not in data: data['ombuds'] = 0 with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, (groupInt[group], data['subject'], data['ombuds'])) complaint_id = cursor.lastrowid # Add email to db if applicable if data['email']: for email in data['email'].split(','): add_email(groupInt[group], complaint_id, email.strip(), False) # Add message to database add_msg(group, complaint_id, data['msg'], data['name'], notification) return complaint_id def send_to_group(group, data, complaint_id=None): group_id = groups.get_group_id(groupName[group]) data['group'] = group_id data['group_name'] = group data['poster'] = "{} Feedback".format(group) data['plain'] = data['msg'] if complaint_id: data['plain'] += "\nLink to the issue: {}".format( get_link(group, complaint_id)) data['msg'] = None newsgroups.send_email(data) def add_email(group, complaint_id, email, notification=True): ''' Adds an email to list of addresses subscribed to this complaint returns false if complaint_id is invalid ''' if not get_subject(group, complaint_id): return False query = """ INSERT INTO complaint_emails (complaint_id, email) VALUES (%s, %s) """ with flask.g.pymysql_db.cursor() as cursor: try: cursor.execute(query, (complaint_id, email)) except pymysql.err.IntegrityError: return False if notification: send_update_email(group, email, complaint_id) return True def remove_email(group, complaint_id, email): ''' Removes 'email' from the list of emails subscribed to this complaint returns False if complaint_id is invalid ''' if not get_subject(group, complaint_id): return False query = 'DELETE FROM complaint_emails WHERE complaint_id = %s AND email = %s' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, (complaint_id, email)) return True def add_msg(group, complaint_id, message, poster, notification=True): ''' Adds a message to a complaint in the database and updates status of complaint to 'new_msg' if poster is None or an empty string, it will be replaced with "(anonymous)" If complaint_id is invalid, returns False ''' subject = get_subject(group, complaint_id) if not subject: return False # Add the message query = """ INSERT INTO complaint_messages (complaint_id, message, poster, time) VALUES (%s, %s, %s, NOW()) """ # Update the status to new_msg query2 = 'UPDATE complaint_info SET resolved = FALSE WHERE complaint_id = %s' if not poster: poster = '(anonymous)' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, (complaint_id, message, poster)) cursor.execute(query2, complaint_id) if notification: data = {'msg': message, 'subject': subject} send_to_group(group, data, complaint_id) query = 'SELECT email FROM complaint_emails WHERE complaint_id = %s' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, complaint_id) res = cursor.fetchall() for row in res: send_update_email(group, row['email'], complaint_id) def get_link(group, complaint_id): ''' Gets a (fully qualified) link to the view page for this complaint id ''' query = 'SELECT HEX(uuid) AS uuid FROM complaint_info WHERE complaint_id = %s' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, complaint_id) res = cursor.fetchone() if not res: return None uuid = res['uuid'] return flask.url_for( 'feedback.feedback_view_complaint', group=group, id=uuid, _external=True) def get_id(group, uuid): ''' Returns the complaint_id associated with a uuid or false if the uuid is not found ''' query = 'SELECT complaint_id FROM complaint_info WHERE org = %s AND uuid = UNHEX(%s)' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, (groupInt[group], uuid)) if not cursor.rowcount: return False return cursor.fetchone()['complaint_id'] def get_messages(group, complaint_id): ''' Returns timestamps, posters, messages, and message_id's on this complaint in ascending order of timestamp ''' query = """ SELECT time, poster, message, message_id FROM complaint_messages WHERE complaint_id = %s ORDER BY time """.format(group) with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, (complaint_id)) return cursor.fetchall() def get_summary(group, complaint_id): ''' Returns a dict with the following fields: subject, status ''' query = 'SELECT subject, resolved FROM complaint_info WHERE complaint_id = %s' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, complaint_id) return cursor.fetchone() def get_subject(group, complaint_id): ''' Returns the suject or None if complaint_id is invalid ''' res = get_summary(group, complaint_id) return res['subject'] if res else None def get_status(group, complaint_id): ''' Returns the status of a post or None if complaint_id is invalid ''' res = get_summary(group, complaint_id) return res['resolved'] if res else None def set_resolved(group, complaint_id, status): ''' Sets the status of this complaint to resolved/unresolved ''' query = "UPDATE complaint_info SET resolved=%s WHERE complaint_id = %s" with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, (status, complaint_id)) def get_emails(group, complaint_id): ''' Returns a list of subscribed emails for this complaint (which may be empty) or an empty list if complaint_id is invalid ''' query = 'SELECT email FROM complaint_emails WHERE complaint_id = %s' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, complaint_id) res = cursor.fetchall() return [row['email'] for row in res] def get_ombuds(complaint_id): ''' Returns whether the person has already talked to an ombuds/TA/instructor about their problem. ''' query = 'SELECT ombuds FROM complaint_info WHERE complaint_id = %s' with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, complaint_id) return cursor.fetchone()['ombuds'] def set_ombuds(complaint_id, ombuds): ''' Sets the status of whether the user has spoken to an ombuds/TA/instructor. ''' query = "UPDATE complaint_info SET ombuds = %s WHERE complaint_id = %s" with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, [ombuds, complaint_id]) def get_all_fields(group, complaint_id): ''' Returns a dict with emails, messages, subject, status Returns None if complaint_id is invalid ''' if not get_subject(group, complaint_id): return None data = { 'emails': get_emails(group, complaint_id), 'messages': get_messages(group, complaint_id), 'subject': get_subject(group, complaint_id), 'resolved': get_status(group, complaint_id) } if group == 'arc': data['ombuds'] = get_ombuds(complaint_id) return data def get_posts(group, view_unresolved): ''' Returns posts and their associated list of messages. If view_all is false, only returns unresolved posts. Will be an array of dicts with keys complaint_id, subject, resolved, uuid, message, poster, time Note that message and poster refer to the latest comment on this complaint ''' query = """SELECT post.complaint_id AS complaint_id, post.subject AS subject, post.resolved AS resolved, post.uuid AS uuid, comment.message AS message, comment.poster AS poster, comment.time AS time FROM complaint_info post NATURAL JOIN complaint_messages comment INNER JOIN ( SELECT complaint_id, max(time) AS time FROM complaint_messages GROUP BY complaint_id ) maxtime ON maxtime.time = comment.time AND maxtime.complaint_id = comment.complaint_id WHERE post.org = %s """ if view_unresolved: query += " AND post.resolved = FALSE" query += " ORDER BY comment.time DESC" with flask.g.pymysql_db.cursor() as cursor: cursor.execute(query, groupInt[group]) return cursor.fetchall()
mit
Microsoft/PTVS
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/urllib3/packages/ordered_dict.py
2040
8935
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. # Copyright 2009 Raymond Hettinger, released under the MIT License. # http://code.activestate.com/recipes/576693/ try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
apache-2.0
marc-sensenich/ansible
test/units/plugins/httpapi/test_ftd.py
22
14894
# Copyright (c) 2018 Cisco and/or its affiliates. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import json from ansible.module_utils.six.moves.urllib.error import HTTPError from units.compat import mock from units.compat import unittest from units.compat.builtins import BUILTINS from units.compat.mock import mock_open, patch from ansible.errors import AnsibleConnectionFailure from ansible.module_utils.connection import ConnectionError from ansible.module_utils.network.ftd.common import HTTPMethod, ResponseParams from ansible.module_utils.network.ftd.fdm_swagger_client import SpecProp, FdmSwaggerParser from ansible.module_utils.six import BytesIO, StringIO from ansible.plugins.httpapi.ftd import HttpApi EXPECTED_BASE_HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/json' } class FakeFtdHttpApiPlugin(HttpApi): def __init__(self, conn): super(FakeFtdHttpApiPlugin, self).__init__(conn) self.hostvars = { 'token_path': '/testLoginUrl', 'spec_path': '/testSpecUrl' } def get_option(self, var): return self.hostvars[var] class TestFtdHttpApi(unittest.TestCase): def setUp(self): self.connection_mock = mock.Mock() self.ftd_plugin = FakeFtdHttpApiPlugin(self.connection_mock) self.ftd_plugin.access_token = 'ACCESS_TOKEN' self.ftd_plugin._load_name = 'httpapi' def test_login_should_request_tokens_when_no_refresh_token(self): self.connection_mock.send.return_value = self._connection_response( {'access_token': 'ACCESS_TOKEN', 'refresh_token': 'REFRESH_TOKEN'} ) self.ftd_plugin.login('foo', 'bar') assert 'ACCESS_TOKEN' == self.ftd_plugin.access_token assert 'REFRESH_TOKEN' == self.ftd_plugin.refresh_token assert {'Authorization': 'Bearer ACCESS_TOKEN'} == self.ftd_plugin.connection._auth expected_body = json.dumps({'grant_type': 'password', 'username': 'foo', 'password': 'bar'}) self.connection_mock.send.assert_called_once_with(mock.ANY, expected_body, headers=mock.ANY, method=mock.ANY) def test_login_should_update_tokens_when_refresh_token_exists(self): self.ftd_plugin.refresh_token = 'REFRESH_TOKEN' self.connection_mock.send.return_value = self._connection_response( {'access_token': 'NEW_ACCESS_TOKEN', 'refresh_token': 'NEW_REFRESH_TOKEN'} ) self.ftd_plugin.login('foo', 'bar') assert 'NEW_ACCESS_TOKEN' == self.ftd_plugin.access_token assert 'NEW_REFRESH_TOKEN' == self.ftd_plugin.refresh_token assert {'Authorization': 'Bearer NEW_ACCESS_TOKEN'} == self.ftd_plugin.connection._auth expected_body = json.dumps({'grant_type': 'refresh_token', 'refresh_token': 'REFRESH_TOKEN'}) self.connection_mock.send.assert_called_once_with(mock.ANY, expected_body, headers=mock.ANY, method=mock.ANY) def test_login_should_use_host_variable_when_set(self): temp_token_path = self.ftd_plugin.hostvars['token_path'] self.ftd_plugin.hostvars['token_path'] = '/testFakeLoginUrl' self.connection_mock.send.return_value = self._connection_response( {'access_token': 'ACCESS_TOKEN', 'refresh_token': 'REFRESH_TOKEN'} ) self.ftd_plugin.login('foo', 'bar') self.connection_mock.send.assert_called_once_with('/testFakeLoginUrl', mock.ANY, headers=mock.ANY, method=mock.ANY) self.ftd_plugin.hostvars['token_path'] = temp_token_path def test_login_raises_exception_when_no_refresh_token_and_no_credentials(self): with self.assertRaises(AnsibleConnectionFailure) as res: self.ftd_plugin.login(None, None) assert 'Username and password are required' in str(res.exception) def test_login_raises_exception_when_invalid_response(self): self.connection_mock.send.return_value = self._connection_response( {'no_access_token': 'ACCESS_TOKEN'} ) with self.assertRaises(ConnectionError) as res: self.ftd_plugin.login('foo', 'bar') assert 'Server returned response without token info during connection authentication' in str(res.exception) def test_login_raises_exception_when_http_error(self): self.connection_mock.send.side_effect = HTTPError('http://testhost.com', 400, '', {}, StringIO('{"message": "Failed to authenticate user"}')) with self.assertRaises(ConnectionError) as res: self.ftd_plugin.login('foo', 'bar') assert 'Failed to authenticate user' in str(res.exception) def test_logout_should_revoke_tokens(self): self.ftd_plugin.access_token = 'ACCESS_TOKEN_TO_REVOKE' self.ftd_plugin.refresh_token = 'REFRESH_TOKEN_TO_REVOKE' self.connection_mock.send.return_value = self._connection_response(None) self.ftd_plugin.logout() assert self.ftd_plugin.access_token is None assert self.ftd_plugin.refresh_token is None expected_body = json.dumps({'grant_type': 'revoke_token', 'access_token': 'ACCESS_TOKEN_TO_REVOKE', 'token_to_revoke': 'REFRESH_TOKEN_TO_REVOKE'}) self.connection_mock.send.assert_called_once_with(mock.ANY, expected_body, headers=mock.ANY, method=mock.ANY) def test_send_request_should_send_correct_request(self): exp_resp = {'id': '123', 'name': 'foo'} self.connection_mock.send.return_value = self._connection_response(exp_resp) resp = self.ftd_plugin.send_request('/test/{objId}', HTTPMethod.PUT, body_params={'name': 'foo'}, path_params={'objId': '123'}, query_params={'at': 0}) assert {ResponseParams.SUCCESS: True, ResponseParams.STATUS_CODE: 200, ResponseParams.RESPONSE: exp_resp} == resp self.connection_mock.send.assert_called_once_with('/test/123?at=0', '{"name": "foo"}', method=HTTPMethod.PUT, headers=EXPECTED_BASE_HEADERS) def test_send_request_should_return_empty_dict_when_no_response_data(self): self.connection_mock.send.return_value = self._connection_response(None) resp = self.ftd_plugin.send_request('/test', HTTPMethod.GET) assert {ResponseParams.SUCCESS: True, ResponseParams.STATUS_CODE: 200, ResponseParams.RESPONSE: {}} == resp self.connection_mock.send.assert_called_once_with('/test', None, method=HTTPMethod.GET, headers=EXPECTED_BASE_HEADERS) def test_send_request_should_return_error_info_when_http_error_raises(self): self.connection_mock.send.side_effect = HTTPError('http://testhost.com', 500, '', {}, StringIO('{"errorMessage": "ERROR"}')) resp = self.ftd_plugin.send_request('/test', HTTPMethod.GET) assert {ResponseParams.SUCCESS: False, ResponseParams.STATUS_CODE: 500, ResponseParams.RESPONSE: {'errorMessage': 'ERROR'}} == resp def test_send_request_raises_exception_when_invalid_response(self): self.connection_mock.send.return_value = self._connection_response('nonValidJson') with self.assertRaises(ConnectionError) as res: self.ftd_plugin.send_request('/test', HTTPMethod.GET) assert 'Invalid JSON response' in str(res.exception) def test_handle_httperror_should_update_tokens_and_retry_on_auth_errors(self): self.ftd_plugin.refresh_token = 'REFRESH_TOKEN' self.connection_mock.send.return_value = self._connection_response( {'access_token': 'NEW_ACCESS_TOKEN', 'refresh_token': 'NEW_REFRESH_TOKEN'} ) retry = self.ftd_plugin.handle_httperror(HTTPError('http://testhost.com', 401, '', {}, None)) assert retry assert 'NEW_ACCESS_TOKEN' == self.ftd_plugin.access_token assert 'NEW_REFRESH_TOKEN' == self.ftd_plugin.refresh_token def test_handle_httperror_should_not_retry_on_non_auth_errors(self): assert not self.ftd_plugin.handle_httperror(HTTPError('http://testhost.com', 500, '', {}, None)) def test_handle_httperror_should_not_retry_when_ignoring_http_errors(self): self.ftd_plugin._ignore_http_errors = True assert not self.ftd_plugin.handle_httperror(HTTPError('http://testhost.com', 401, '', {}, None)) @patch('os.path.isdir', mock.Mock(return_value=False)) def test_download_file(self): self.connection_mock.send.return_value = self._connection_response('File content') open_mock = mock_open() with patch('%s.open' % BUILTINS, open_mock): self.ftd_plugin.download_file('/files/1', '/tmp/test.txt') open_mock.assert_called_once_with('/tmp/test.txt', 'wb') open_mock().write.assert_called_once_with(b'File content') @patch('os.path.isdir', mock.Mock(return_value=True)) def test_download_file_should_extract_filename_from_headers(self): filename = 'test_file.txt' response = mock.Mock() response.info.return_value = {'Content-Disposition': 'attachment; filename="%s"' % filename} dummy, response_data = self._connection_response('File content') self.connection_mock.send.return_value = response, response_data open_mock = mock_open() with patch('%s.open' % BUILTINS, open_mock): self.ftd_plugin.download_file('/files/1', '/tmp/') open_mock.assert_called_once_with('/tmp/%s' % filename, 'wb') open_mock().write.assert_called_once_with(b'File content') @patch('os.path.basename', mock.Mock(return_value='test.txt')) @patch('ansible.plugins.httpapi.ftd.encode_multipart_formdata', mock.Mock(return_value=('--Encoded data--', 'multipart/form-data'))) def test_upload_file(self): self.connection_mock.send.return_value = self._connection_response({'id': '123'}) open_mock = mock_open() with patch('%s.open' % BUILTINS, open_mock): resp = self.ftd_plugin.upload_file('/tmp/test.txt', '/files') assert {'id': '123'} == resp exp_headers = dict(EXPECTED_BASE_HEADERS) exp_headers['Content-Length'] = len('--Encoded data--') exp_headers['Content-Type'] = 'multipart/form-data' self.connection_mock.send.assert_called_once_with('/files', data='--Encoded data--', headers=exp_headers, method=HTTPMethod.POST) open_mock.assert_called_once_with('/tmp/test.txt', 'rb') @patch('os.path.basename', mock.Mock(return_value='test.txt')) @patch('ansible.plugins.httpapi.ftd.encode_multipart_formdata', mock.Mock(return_value=('--Encoded data--', 'multipart/form-data'))) def test_upload_file_raises_exception_when_invalid_response(self): self.connection_mock.send.return_value = self._connection_response('invalidJsonResponse') open_mock = mock_open() with patch('%s.open' % BUILTINS, open_mock): with self.assertRaises(ConnectionError) as res: self.ftd_plugin.upload_file('/tmp/test.txt', '/files') assert 'Invalid JSON response' in str(res.exception) @patch.object(FdmSwaggerParser, 'parse_spec') def test_get_operation_spec(self, parse_spec_mock): self.connection_mock.send.return_value = self._connection_response(None) parse_spec_mock.return_value = { SpecProp.OPERATIONS: {'testOp': 'Specification for testOp'} } assert 'Specification for testOp' == self.ftd_plugin.get_operation_spec('testOp') assert self.ftd_plugin.get_operation_spec('nonExistingTestOp') is None @patch.object(FdmSwaggerParser, 'parse_spec') def test_get_model_spec(self, parse_spec_mock): self.connection_mock.send.return_value = self._connection_response(None) parse_spec_mock.return_value = { SpecProp.MODELS: {'TestModel': 'Specification for TestModel'} } assert 'Specification for TestModel' == self.ftd_plugin.get_model_spec('TestModel') assert self.ftd_plugin.get_model_spec('NonExistingTestModel') is None @patch.object(FdmSwaggerParser, 'parse_spec') def test_get_model_spec(self, parse_spec_mock): self.connection_mock.send.return_value = self._connection_response(None) operation1 = {'modelName': 'TestModel'} op_model_name_is_none = {'modelName': None} op_without_model_name = {'url': 'testUrl'} parse_spec_mock.return_value = { SpecProp.MODEL_OPERATIONS: { 'TestModel': { 'testOp1': operation1, 'testOp2': 'spec2' }, 'TestModel2': { 'testOp10': 'spec10', 'testOp20': 'spec20' } }, SpecProp.OPERATIONS: { 'testOp1': operation1, 'testOp10': { 'modelName': 'TestModel2' }, 'testOpWithoutModelName': op_without_model_name, 'testOpModelNameIsNone': op_model_name_is_none } } assert {'testOp1': operation1, 'testOp2': 'spec2'} == self.ftd_plugin.get_operation_specs_by_model_name( 'TestModel') assert None is self.ftd_plugin.get_operation_specs_by_model_name( 'testOpModelNameIsNone') assert None is self.ftd_plugin.get_operation_specs_by_model_name( 'testOpWithoutModelName') assert self.ftd_plugin.get_operation_specs_by_model_name('nonExistingOperation') is None @staticmethod def _connection_response(response, status=200): response_mock = mock.Mock() response_mock.getcode.return_value = status response_text = json.dumps(response) if type(response) is dict else response response_data = BytesIO(response_text.encode() if response_text else ''.encode()) return response_mock, response_data
gpl-3.0
mferenca/HMS-ecommerce
ecommerce/extensions/api/v2/tests/views/test_products.py
1
7818
from __future__ import unicode_literals import datetime import json import pytz from django.core.urlresolvers import reverse from django.test import RequestFactory from oscar.core.loading import get_model from ecommerce.coupons.tests.mixins import CouponMixin from ecommerce.courses.models import Course from ecommerce.extensions.api.serializers import ProductSerializer from ecommerce.extensions.api.v2.tests.views import JSON_CONTENT_TYPE, ProductSerializerMixin from ecommerce.extensions.catalogue.tests.mixins import CourseCatalogTestMixin from ecommerce.tests.testcases import TestCase Benefit = get_model('offer', 'Benefit') Catalog = get_model('catalogue', 'Catalog') Product = get_model('catalogue', 'Product') ProductClass = get_model('catalogue', 'ProductClass') Voucher = get_model('voucher', 'Voucher') class ProductViewSetBase(ProductSerializerMixin, CourseCatalogTestMixin, TestCase): def setUp(self): super(ProductViewSetBase, self).setUp() self.user = self.create_user(is_staff=True) self.client.login(username=self.user.username, password=self.password) self.course = Course.objects.create(id='edX/DemoX/Demo_Course', name='Test Course') # TODO Update the expiration date by 2099-12-31 expires = datetime.datetime(2100, 1, 1, tzinfo=pytz.UTC) self.seat = self.course.create_or_update_seat('honor', False, 0, self.partner, expires=expires) class ProductViewSetTests(ProductViewSetBase): def test_list(self): """ Verify a list of products is returned. """ path = reverse('api:v2:product-list') response = self.client.get(path) self.assertEqual(response.status_code, 200) results = [self.serialize_product(p) for p in self.course.products.all()] expected = {'count': 2, 'next': None, 'previous': None, 'results': results} self.assertDictEqual(json.loads(response.content), expected) # If no products exist, the view should return an empty result set. Product.objects.all().delete() response = self.client.get(path) self.assertEqual(response.status_code, 200) expected = {'count': 0, 'next': None, 'previous': None, 'results': []} self.assertDictEqual(json.loads(response.content), expected) def test_retrieve(self): """ Verify a single product is returned. """ path = reverse('api:v2:product-detail', kwargs={'pk': 999}) response = self.client.get(path) self.assertEqual(response.status_code, 404) path = reverse('api:v2:product-detail', kwargs={'pk': self.seat.id}) response = self.client.get(path) self.assertEqual(response.status_code, 200) self.assertDictEqual(json.loads(response.content), self.serialize_product(self.seat)) def test_destroy(self): """ Verify the view does NOT allow products to be destroyed. """ product_id = self.seat.id path = reverse('api:v2:product-detail', kwargs={'pk': product_id}) response = self.client.delete(path) self.assertEqual(response.status_code, 405) self.assertTrue(Product.objects.filter(id=product_id).exists()) def test_update(self): """ Verify the view allows individual products to be updated. """ data = self.serialize_product(self.seat) data['title'] = 'Fake Seat Title' path = reverse('api:v2:product-detail', kwargs={'pk': self.seat.id}) response = self.client.put(path, json.dumps(data), JSON_CONTENT_TYPE) self.assertEqual(response.status_code, 200, response.content) product = Product.objects.get(id=self.seat.id) self.assertEqual(product.title, data['title']) self.assertDictEqual(json.loads(response.content), self.serialize_product(product)) def test_list_for_course(self): """ Verify the view supports listing products for a single course. """ # Create another course and seat to confirm filtering. other_course = Course.objects.create(id='edX/DemoX/XYZ', name='Test Course 2') other_course.create_or_update_seat('honor', False, 0, self.partner) path = reverse('api:v2:course-product-list', kwargs={'parent_lookup_course_id': self.course.id}) response = self.client.get(path) self.assertEqual(response.status_code, 200) results = [self.serialize_product(p) for p in self.course.products.all()] expected = {'count': 2, 'next': None, 'previous': None, 'results': results} self.assertDictEqual(json.loads(response.content), expected) def test_get_partner_products(self): """Verify the endpoint returns the list of products associated with a partner. """ url = reverse( 'api:v2:partner-product-list', kwargs={'parent_lookup_stockrecords__partner_id': self.partner.id} ) response = self.client.get(url) expected_data = self.serialize_product(self.seat) self.assertEqual(response.status_code, 200) self.assertListEqual(json.loads(response.content)['results'], [expected_data]) def test_no_partner_product(self): """Verify the endpoint returns an empty list if no products are associated with a partner. """ Product.objects.all().delete() url = reverse( 'api:v2:partner-product-list', kwargs={'parent_lookup_stockrecords__partner_id': self.partner.id} ) response = self.client.get(url) self.assertEqual(response.status_code, 200) expected = { 'count': 0, 'next': None, 'previous': None, 'results': [] } self.assertDictEqual(json.loads(response.content), expected) class ProductViewSetCouponTests(CouponMixin, ProductViewSetBase): def test_coupon_product_details(self): """Verify the endpoint returns all coupon information.""" coupon = self.create_coupon() url = reverse('api:v2:product-detail', kwargs={'pk': coupon.id}) response = self.client.get(url) self.assertEqual(response.status_code, 200) request = RequestFactory(SERVER_NAME=self.site.domain).get('/') request.user = self.user request.site = self.site expected = ProductSerializer(coupon, context={'request': request}).data self.assertDictEqual(response.data, expected) def test_coupon_voucher_serializer(self): """Verify that the vouchers of a coupon are properly serialized.""" coupon = self.create_coupon() url = reverse('api:v2:product-detail', kwargs={'pk': coupon.id}) response = self.client.get(url) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) voucher = response_data['attribute_values'][0]['value'][0] self.assertEqual(voucher['name'], 'Test coupon') self.assertEqual(voucher['usage'], Voucher.SINGLE_USE) self.assertEqual(voucher['benefit']['type'], Benefit.PERCENTAGE) self.assertEqual(voucher['benefit']['value'], 100.0) def test_product_filtering(self): """Verify products are filtered.""" self.create_coupon() url = reverse('api:v2:product-list') response = self.client.get(url) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual(response_data['count'], 3) filtered_url = '{}?product_class=CoUpOn'.format(url) response = self.client.get(filtered_url) self.assertEqual(response.status_code, 200) response_data = json.loads(response.content) self.assertEqual(response_data['count'], 1) self.assertEqual(response_data['results'][0]['product_class'], 'Coupon')
agpl-3.0
wenottingham/ansible
lib/ansible/plugins/cache/memory.py
275
1466
# (c) 2014, Brian Coca, Josh Drake, et al # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.cache.base import BaseCacheModule class CacheModule(BaseCacheModule): def __init__(self, *args, **kwargs): self._cache = {} def get(self, key): return self._cache.get(key) def set(self, key, value): self._cache[key] = value def keys(self): return self._cache.keys() def contains(self, key): return key in self._cache def delete(self, key): del self._cache[key] def flush(self): self._cache = {} def copy(self): return self._cache.copy() def __getstate__(self): return self.copy() def __setstate__(self, data): self._cache = data
gpl-3.0
Maximilian-Reuter/SickRage
lib/hachoir_core/i18n.py
86
6241
# -*- coding: UTF-8 -*- """ Functions to manage internationalisation (i18n): - initLocale(): setup locales and install Unicode compatible stdout and stderr ; - getTerminalCharset(): guess terminal charset ; - gettext(text) translate a string to current language. The function always returns Unicode string. You can also use the alias: _() ; - ngettext(singular, plural, count): translate a sentence with singular and plural form. The function always returns Unicode string. WARNING: Loading this module indirectly calls initLocale() which sets locale LC_ALL to ''. This is needed to get user preferred locale settings. """ import hachoir_core.config as config import hachoir_core import locale from os import path import sys from codecs import BOM_UTF8, BOM_UTF16_LE, BOM_UTF16_BE def _getTerminalCharset(): """ Function used by getTerminalCharset() to get terminal charset. @see getTerminalCharset() """ # (1) Try locale.getpreferredencoding() try: charset = locale.getpreferredencoding() if charset: return charset except (locale.Error, AttributeError): pass # (2) Try locale.nl_langinfo(CODESET) try: charset = locale.nl_langinfo(locale.CODESET) if charset: return charset except (locale.Error, AttributeError): pass # (3) Try sys.stdout.encoding if hasattr(sys.stdout, "encoding") and sys.stdout.encoding: return sys.stdout.encoding # (4) Otherwise, returns "ASCII" return "ASCII" def getTerminalCharset(): """ Guess terminal charset using differents tests: 1. Try locale.getpreferredencoding() 2. Try locale.nl_langinfo(CODESET) 3. Try sys.stdout.encoding 4. Otherwise, returns "ASCII" WARNING: Call initLocale() before calling this function. """ try: return getTerminalCharset.value except AttributeError: getTerminalCharset.value = _getTerminalCharset() return getTerminalCharset.value class UnicodeStdout(object): def __init__(self, old_device, charset): self.device = old_device self.charset = charset def flush(self): self.device.flush() def write(self, text): if isinstance(text, unicode): text = text.encode(self.charset, 'replace') self.device.write(text) def writelines(self, lines): for text in lines: self.write(text) def initLocale(): # Only initialize locale once if initLocale.is_done: return getTerminalCharset() initLocale.is_done = True # Setup locales try: locale.setlocale(locale.LC_ALL, "") except (locale.Error, IOError): pass # Get the terminal charset charset = getTerminalCharset() # UnicodeStdout conflicts with the readline module if config.unicode_stdout and ('readline' not in sys.modules): # Replace stdout and stderr by unicode objet supporting unicode string sys.stdout = UnicodeStdout(sys.stdout, charset) sys.stderr = UnicodeStdout(sys.stderr, charset) return charset initLocale.is_done = False def _dummy_gettext(text): return unicode(text) def _dummy_ngettext(singular, plural, count): if 1 < abs(count) or not count: return unicode(plural) else: return unicode(singular) def _initGettext(): charset = initLocale() # Try to load gettext module if config.use_i18n: try: import gettext ok = True except ImportError: ok = False else: ok = False # gettext is not available or not needed: use dummy gettext functions if not ok: return (_dummy_gettext, _dummy_ngettext) # Gettext variables package = hachoir_core.PACKAGE locale_dir = path.join(path.dirname(__file__), "..", "locale") # Initialize gettext module gettext.bindtextdomain(package, locale_dir) gettext.textdomain(package) translate = gettext.gettext ngettext = gettext.ngettext # TODO: translate_unicode lambda function really sucks! # => find native function to do that unicode_gettext = lambda text: \ unicode(translate(text), charset) unicode_ngettext = lambda singular, plural, count: \ unicode(ngettext(singular, plural, count), charset) return (unicode_gettext, unicode_ngettext) UTF_BOMS = ( (BOM_UTF8, "UTF-8"), (BOM_UTF16_LE, "UTF-16-LE"), (BOM_UTF16_BE, "UTF-16-BE"), ) # Set of valid characters for specific charset CHARSET_CHARACTERS = ( # U+00E0: LATIN SMALL LETTER A WITH GRAVE (set(u"©®éêè\xE0ç".encode("ISO-8859-1")), "ISO-8859-1"), (set(u"©®éêè\xE0ç€".encode("ISO-8859-15")), "ISO-8859-15"), (set(u"©®".encode("MacRoman")), "MacRoman"), (set(u"εδηιθκμοΡσςυΈί".encode("ISO-8859-7")), "ISO-8859-7"), ) def guessBytesCharset(bytes, default=None): r""" >>> guessBytesCharset("abc") 'ASCII' >>> guessBytesCharset("\xEF\xBB\xBFabc") 'UTF-8' >>> guessBytesCharset("abc\xC3\xA9") 'UTF-8' >>> guessBytesCharset("File written by Adobe Photoshop\xA8 4.0\0") 'MacRoman' >>> guessBytesCharset("\xE9l\xE9phant") 'ISO-8859-1' >>> guessBytesCharset("100 \xA4") 'ISO-8859-15' >>> guessBytesCharset('Word \xb8\xea\xe4\xef\xf3\xe7 - Microsoft Outlook 97 - \xd1\xf5\xe8\xec\xdf\xf3\xe5\xe9\xf2 e-mail') 'ISO-8859-7' """ # Check for UTF BOM for bom_bytes, charset in UTF_BOMS: if bytes.startswith(bom_bytes): return charset # Pure ASCII? try: text = unicode(bytes, 'ASCII', 'strict') return 'ASCII' except UnicodeDecodeError: pass # Valid UTF-8? try: text = unicode(bytes, 'UTF-8', 'strict') return 'UTF-8' except UnicodeDecodeError: pass # Create a set of non-ASCII characters non_ascii_set = set( byte for byte in bytes if ord(byte) >= 128 ) for characters, charset in CHARSET_CHARACTERS: if characters.issuperset(non_ascii_set): return charset return default # Initialize _(), gettext() and ngettext() functions gettext, ngettext = _initGettext() _ = gettext
gpl-3.0
bestvibes/neo4j-social-network
env/lib/python2.7/encodings/cp875.py
593
13110
""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp875', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x9c' # 0x04 -> CONTROL u'\t' # 0x05 -> HORIZONTAL TABULATION u'\x86' # 0x06 -> CONTROL u'\x7f' # 0x07 -> DELETE u'\x97' # 0x08 -> CONTROL u'\x8d' # 0x09 -> CONTROL u'\x8e' # 0x0A -> CONTROL u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x9d' # 0x14 -> CONTROL u'\x85' # 0x15 -> CONTROL u'\x08' # 0x16 -> BACKSPACE u'\x87' # 0x17 -> CONTROL u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x92' # 0x1A -> CONTROL u'\x8f' # 0x1B -> CONTROL u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u'\x80' # 0x20 -> CONTROL u'\x81' # 0x21 -> CONTROL u'\x82' # 0x22 -> CONTROL u'\x83' # 0x23 -> CONTROL u'\x84' # 0x24 -> CONTROL u'\n' # 0x25 -> LINE FEED u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK u'\x1b' # 0x27 -> ESCAPE u'\x88' # 0x28 -> CONTROL u'\x89' # 0x29 -> CONTROL u'\x8a' # 0x2A -> CONTROL u'\x8b' # 0x2B -> CONTROL u'\x8c' # 0x2C -> CONTROL u'\x05' # 0x2D -> ENQUIRY u'\x06' # 0x2E -> ACKNOWLEDGE u'\x07' # 0x2F -> BELL u'\x90' # 0x30 -> CONTROL u'\x91' # 0x31 -> CONTROL u'\x16' # 0x32 -> SYNCHRONOUS IDLE u'\x93' # 0x33 -> CONTROL u'\x94' # 0x34 -> CONTROL u'\x95' # 0x35 -> CONTROL u'\x96' # 0x36 -> CONTROL u'\x04' # 0x37 -> END OF TRANSMISSION u'\x98' # 0x38 -> CONTROL u'\x99' # 0x39 -> CONTROL u'\x9a' # 0x3A -> CONTROL u'\x9b' # 0x3B -> CONTROL u'\x14' # 0x3C -> DEVICE CONTROL FOUR u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE u'\x9e' # 0x3E -> CONTROL u'\x1a' # 0x3F -> SUBSTITUTE u' ' # 0x40 -> SPACE u'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA u'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA u'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA u'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA u'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON u'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA u'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA u'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA u'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA u'[' # 0x4A -> LEFT SQUARE BRACKET u'.' # 0x4B -> FULL STOP u'<' # 0x4C -> LESS-THAN SIGN u'(' # 0x4D -> LEFT PARENTHESIS u'+' # 0x4E -> PLUS SIGN u'!' # 0x4F -> EXCLAMATION MARK u'&' # 0x50 -> AMPERSAND u'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA u'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA u'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU u'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU u'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI u'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON u'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI u'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO u'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA u']' # 0x5A -> RIGHT SQUARE BRACKET u'$' # 0x5B -> DOLLAR SIGN u'*' # 0x5C -> ASTERISK u')' # 0x5D -> RIGHT PARENTHESIS u';' # 0x5E -> SEMICOLON u'^' # 0x5F -> CIRCUMFLEX ACCENT u'-' # 0x60 -> HYPHEN-MINUS u'/' # 0x61 -> SOLIDUS u'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU u'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON u'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI u'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI u'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI u'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA u'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA u'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA u'|' # 0x6A -> VERTICAL LINE u',' # 0x6B -> COMMA u'%' # 0x6C -> PERCENT SIGN u'_' # 0x6D -> LOW LINE u'>' # 0x6E -> GREATER-THAN SIGN u'?' # 0x6F -> QUESTION MARK u'\xa8' # 0x70 -> DIAERESIS u'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS u'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS u'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS u'\xa0' # 0x74 -> NO-BREAK SPACE u'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS u'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS u'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS u'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS u'`' # 0x79 -> GRAVE ACCENT u':' # 0x7A -> COLON u'#' # 0x7B -> NUMBER SIGN u'@' # 0x7C -> COMMERCIAL AT u"'" # 0x7D -> APOSTROPHE u'=' # 0x7E -> EQUALS SIGN u'"' # 0x7F -> QUOTATION MARK u'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS u'a' # 0x81 -> LATIN SMALL LETTER A u'b' # 0x82 -> LATIN SMALL LETTER B u'c' # 0x83 -> LATIN SMALL LETTER C u'd' # 0x84 -> LATIN SMALL LETTER D u'e' # 0x85 -> LATIN SMALL LETTER E u'f' # 0x86 -> LATIN SMALL LETTER F u'g' # 0x87 -> LATIN SMALL LETTER G u'h' # 0x88 -> LATIN SMALL LETTER H u'i' # 0x89 -> LATIN SMALL LETTER I u'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA u'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA u'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA u'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA u'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON u'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA u'\xb0' # 0x90 -> DEGREE SIGN u'j' # 0x91 -> LATIN SMALL LETTER J u'k' # 0x92 -> LATIN SMALL LETTER K u'l' # 0x93 -> LATIN SMALL LETTER L u'm' # 0x94 -> LATIN SMALL LETTER M u'n' # 0x95 -> LATIN SMALL LETTER N u'o' # 0x96 -> LATIN SMALL LETTER O u'p' # 0x97 -> LATIN SMALL LETTER P u'q' # 0x98 -> LATIN SMALL LETTER Q u'r' # 0x99 -> LATIN SMALL LETTER R u'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA u'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA u'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA u'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA u'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA u'\u03bc' # 0x9F -> GREEK SMALL LETTER MU u'\xb4' # 0xA0 -> ACUTE ACCENT u'~' # 0xA1 -> TILDE u's' # 0xA2 -> LATIN SMALL LETTER S u't' # 0xA3 -> LATIN SMALL LETTER T u'u' # 0xA4 -> LATIN SMALL LETTER U u'v' # 0xA5 -> LATIN SMALL LETTER V u'w' # 0xA6 -> LATIN SMALL LETTER W u'x' # 0xA7 -> LATIN SMALL LETTER X u'y' # 0xA8 -> LATIN SMALL LETTER Y u'z' # 0xA9 -> LATIN SMALL LETTER Z u'\u03bd' # 0xAA -> GREEK SMALL LETTER NU u'\u03be' # 0xAB -> GREEK SMALL LETTER XI u'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON u'\u03c0' # 0xAD -> GREEK SMALL LETTER PI u'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO u'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA u'\xa3' # 0xB0 -> POUND SIGN u'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS u'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS u'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS u'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA u'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS u'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS u'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS u'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA u'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS u'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA u'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU u'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON u'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI u'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI u'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI u'{' # 0xC0 -> LEFT CURLY BRACKET u'A' # 0xC1 -> LATIN CAPITAL LETTER A u'B' # 0xC2 -> LATIN CAPITAL LETTER B u'C' # 0xC3 -> LATIN CAPITAL LETTER C u'D' # 0xC4 -> LATIN CAPITAL LETTER D u'E' # 0xC5 -> LATIN CAPITAL LETTER E u'F' # 0xC6 -> LATIN CAPITAL LETTER F u'G' # 0xC7 -> LATIN CAPITAL LETTER G u'H' # 0xC8 -> LATIN CAPITAL LETTER H u'I' # 0xC9 -> LATIN CAPITAL LETTER I u'\xad' # 0xCA -> SOFT HYPHEN u'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA u'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS u'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS u'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK u'\u2015' # 0xCF -> HORIZONTAL BAR u'}' # 0xD0 -> RIGHT CURLY BRACKET u'J' # 0xD1 -> LATIN CAPITAL LETTER J u'K' # 0xD2 -> LATIN CAPITAL LETTER K u'L' # 0xD3 -> LATIN CAPITAL LETTER L u'M' # 0xD4 -> LATIN CAPITAL LETTER M u'N' # 0xD5 -> LATIN CAPITAL LETTER N u'O' # 0xD6 -> LATIN CAPITAL LETTER O u'P' # 0xD7 -> LATIN CAPITAL LETTER P u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q u'R' # 0xD9 -> LATIN CAPITAL LETTER R u'\xb1' # 0xDA -> PLUS-MINUS SIGN u'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF u'\x1a' # 0xDC -> SUBSTITUTE u'\u0387' # 0xDD -> GREEK ANO TELEIA u'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK u'\xa6' # 0xDF -> BROKEN BAR u'\\' # 0xE0 -> REVERSE SOLIDUS u'\x1a' # 0xE1 -> SUBSTITUTE u'S' # 0xE2 -> LATIN CAPITAL LETTER S u'T' # 0xE3 -> LATIN CAPITAL LETTER T u'U' # 0xE4 -> LATIN CAPITAL LETTER U u'V' # 0xE5 -> LATIN CAPITAL LETTER V u'W' # 0xE6 -> LATIN CAPITAL LETTER W u'X' # 0xE7 -> LATIN CAPITAL LETTER X u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z u'\xb2' # 0xEA -> SUPERSCRIPT TWO u'\xa7' # 0xEB -> SECTION SIGN u'\x1a' # 0xEC -> SUBSTITUTE u'\x1a' # 0xED -> SUBSTITUTE u'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xEF -> NOT SIGN u'0' # 0xF0 -> DIGIT ZERO u'1' # 0xF1 -> DIGIT ONE u'2' # 0xF2 -> DIGIT TWO u'3' # 0xF3 -> DIGIT THREE u'4' # 0xF4 -> DIGIT FOUR u'5' # 0xF5 -> DIGIT FIVE u'6' # 0xF6 -> DIGIT SIX u'7' # 0xF7 -> DIGIT SEVEN u'8' # 0xF8 -> DIGIT EIGHT u'9' # 0xF9 -> DIGIT NINE u'\xb3' # 0xFA -> SUPERSCRIPT THREE u'\xa9' # 0xFB -> COPYRIGHT SIGN u'\x1a' # 0xFC -> SUBSTITUTE u'\x1a' # 0xFD -> SUBSTITUTE u'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\x9f' # 0xFF -> CONTROL ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
mit
imsplitbit/nova
nova/exception.py
1
42268
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova base exception handling. Includes decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ import functools import sys from oslo.config import cfg import webob.exc from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import safe_utils LOG = logging.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='make exception message format errors fatal'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() def _cleanse_dict(original): """Strip all admin_password, new_pass, rescue_pass keys from a dict.""" return dict((k, v) for k, v in original.iteritems() if not "_pass" in k) def wrap_exception(notifier=None, get_notifier=None): """This decorator wraps a method to catch any exceptions that may get thrown. It logs the exception as well as optionally sending it to the notification system. """ def inner(f): def wrapped(self, context, *args, **kw): # Don't store self or context in the payload, it now seems to # contain confidential information. try: return f(self, context, *args, **kw) except Exception as e: with excutils.save_and_reraise_exception(): if notifier or get_notifier: payload = dict(exception=e) call_dict = safe_utils.getcallargs(f, context, *args, **kw) cleansed = _cleanse_dict(call_dict) payload.update({'args': cleansed}) # If f has multiple decorators, they must use # functools.wraps to ensure the name is # propagated. event_type = f.__name__ (notifier or get_notifier()).error(context, event_type, payload) return functools.wraps(f)(wrapped) return inner class NovaException(Exception): """Base Nova Exception To correctly use this class, inherit from it and define a 'msg_fmt' property. That msg_fmt will get printf'd with the keyword arguments provided to the constructor. """ msg_fmt = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.msg_fmt % kwargs except Exception: exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) if CONF.fatal_exception_format_errors: raise exc_info[0], exc_info[1], exc_info[2] else: # at least get the core message out if something happened message = self.msg_fmt super(NovaException, self).__init__(message) def format_message(self): # NOTE(mrodden): use the first argument to the python Exception object # which should be our full NovaException message, (see __init__) return self.args[0] class EncryptionFailure(NovaException): msg_fmt = _("Failed to encrypt text: %(reason)s") class DecryptionFailure(NovaException): msg_fmt = _("Failed to decrypt text: %(reason)s") class VirtualInterfaceCreateException(NovaException): msg_fmt = _("Virtual Interface creation failed") class VirtualInterfaceMacAddressException(NovaException): msg_fmt = _("5 attempts to create virtual interface" "with unique mac address failed") class GlanceConnectionFailed(NovaException): msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: " "%(reason)s") class NotAuthorized(NovaException): ec2_code = 'AuthFailure' msg_fmt = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): msg_fmt = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): msg_fmt = _("Policy doesn't allow %(action)s to be performed.") class ImageNotActive(NovaException): # NOTE(jruzicka): IncorrectState is used for volumes only in EC2, # but it still seems like the most appropriate option. ec2_code = 'IncorrectState' msg_fmt = _("Image %(image_id)s is not active.") class ImageNotAuthorized(NovaException): msg_fmt = _("Not authorized for image %(image_id)s.") class Invalid(NovaException): msg_fmt = _("Unacceptable parameters.") code = 400 class InvalidBDM(Invalid): msg_fmt = _("Block Device Mapping is Invalid.") class InvalidBDMSnapshot(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "failed to get snapshot %(id)s.") class InvalidBDMVolume(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "failed to get volume %(id)s.") class InvalidBDMImage(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "failed to get image %(id)s.") class InvalidBDMBootSequence(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "Boot sequence for the instance " "and image/block device mapping " "combination is not valid.") class InvalidBDMLocalsLimit(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "You specified more local devices than the " "limit allows") class InvalidBDMEphemeralSize(InvalidBDM): msg_fmt = _("Ephemeral disks requested are larger than " "the instance type allows.") class InvalidBDMSwapSize(InvalidBDM): msg_fmt = _("Swap drive requested is larger than instance type allows.") class InvalidBDMFormat(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "%(details)s") class InvalidBDMForLegacy(InvalidBDM): msg_fmt = _("Block Device Mapping cannot " "be converted to legacy format. ") class InvalidAttribute(Invalid): msg_fmt = _("Attribute not supported: %(attr)s") class VolumeUnattached(Invalid): ec2_code = 'IncorrectState' msg_fmt = _("Volume %(volume_id)s is not attached to anything") class VolumeNotCreated(NovaException): msg_fmt = _("Volume %(volume_id)s did not finish being created" " even after we waited %(seconds)s seconds or %(attempts)s" " attempts.") class InvalidKeypair(Invalid): ec2_code = 'InvalidKeyPair.Format' msg_fmt = _("Keypair data is invalid") + ": %(reason)s" class InvalidRequest(Invalid): msg_fmt = _("The request is invalid.") class InvalidInput(Invalid): msg_fmt = _("Invalid input received") + ": %(reason)s" class InvalidVolume(Invalid): ec2_code = 'UnsupportedOperation' msg_fmt = _("Invalid volume") + ": %(reason)s" class InvalidVolumeAccessMode(Invalid): msg_fmt = _("Invalid volume access mode") + ": %(access_mode)s" class InvalidMetadata(Invalid): msg_fmt = _("Invalid metadata") + ": %(reason)s" class InvalidMetadataSize(Invalid): msg_fmt = _("Invalid metadata size") + ": %(reason)s" class InvalidPortRange(Invalid): ec2_code = 'InvalidParameterValue' msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") class InvalidIpProtocol(Invalid): msg_fmt = _("Invalid IP protocol %(protocol)s.") class InvalidContentType(Invalid): msg_fmt = _("Invalid content type %(content_type)s.") class InvalidCidr(Invalid): msg_fmt = _("Invalid cidr %(cidr)s.") class InvalidUnicodeParameter(Invalid): msg_fmt = _("Invalid Parameter: " "Unicode is not supported by the current database.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): ec2_code = 'InvalidParameterValue' msg_fmt = _("%(err)s") class InvalidAggregateAction(Invalid): msg_fmt = _("Cannot perform action '%(action)s' on aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidGroup(Invalid): msg_fmt = _("Group not valid. Reason: %(reason)s") class InvalidSortKey(Invalid): msg_fmt = _("Sort key supplied was not valid.") class InstanceInvalidState(Invalid): msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " "%(method)s while the instance is in this state.") class InstanceNotRunning(Invalid): msg_fmt = _("Instance %(instance_id)s is not running.") class InstanceNotInRescueMode(Invalid): msg_fmt = _("Instance %(instance_id)s is not in rescue mode") class InstanceNotRescuable(Invalid): msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s") class InstanceNotReady(Invalid): msg_fmt = _("Instance %(instance_id)s is not ready") class InstanceSuspendFailure(Invalid): msg_fmt = _("Failed to suspend instance") + ": %(reason)s" class InstanceResumeFailure(Invalid): msg_fmt = _("Failed to resume instance: %(reason)s.") class InstancePowerOnFailure(Invalid): msg_fmt = _("Failed to power on instance: %(reason)s.") class InstancePowerOffFailure(Invalid): msg_fmt = _("Failed to power off instance: %(reason)s.") class InstanceRebootFailure(Invalid): msg_fmt = _("Failed to reboot instance") + ": %(reason)s" class InstanceTerminationFailure(Invalid): msg_fmt = _("Failed to terminate instance") + ": %(reason)s" class InstanceDeployFailure(Invalid): msg_fmt = _("Failed to deploy instance") + ": %(reason)s" class MultiplePortsNotApplicable(Invalid): msg_fmt = _("Failed to launch instances") + ": %(reason)s" class ServiceUnavailable(Invalid): msg_fmt = _("Service is unavailable at this time.") class ComputeResourcesUnavailable(ServiceUnavailable): msg_fmt = _("Insufficient compute resources.") class HypervisorUnavailable(NovaException): msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s") class ComputeServiceUnavailable(ServiceUnavailable): msg_fmt = _("Compute service of %(host)s is unavailable at this time.") class ComputeServiceInUse(NovaException): msg_fmt = _("Compute service of %(host)s is still in use.") class UnableToMigrateToSelf(Invalid): msg_fmt = _("Unable to migrate instance (%(instance_id)s) " "to current host (%(host)s).") class InvalidHypervisorType(Invalid): msg_fmt = _("The supplied hypervisor type of is invalid.") class DestinationHypervisorTooOld(Invalid): msg_fmt = _("The instance requires a newer hypervisor version than " "has been provided.") class DestinationDiskExists(Invalid): msg_fmt = _("The supplied disk path (%(path)s) already exists, " "it is expected not to exist.") class InvalidDevicePath(Invalid): msg_fmt = _("The supplied device path (%(path)s) is invalid.") class DevicePathInUse(Invalid): msg_fmt = _("The supplied device path (%(path)s) is in use.") code = 409 class DeviceIsBusy(Invalid): msg_fmt = _("The supplied device (%(device)s) is busy.") class InvalidCPUInfo(Invalid): msg_fmt = _("Unacceptable CPU info") + ": %(reason)s" class InvalidIpAddressError(Invalid): msg_fmt = _("%(address)s is not a valid IP v4/6 address.") class InvalidVLANTag(Invalid): msg_fmt = _("VLAN tag is not appropriate for the port group " "%(bridge)s. Expected VLAN tag is %(tag)s, " "but the one associated with the port group is %(pgroup)s.") class InvalidVLANPortGroup(Invalid): msg_fmt = _("vSwitch which contains the port group %(bridge)s is " "not associated with the desired physical adapter. " "Expected vSwitch is %(expected)s, but the one associated " "is %(actual)s.") class InvalidDiskFormat(Invalid): msg_fmt = _("Disk format %(disk_format)s is not acceptable") class ImageUnacceptable(Invalid): msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s") class InstanceUnacceptable(Invalid): msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s") class InvalidEc2Id(Invalid): msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.") class InvalidUUID(Invalid): msg_fmt = _("Expected a uuid but received %(uuid)s.") class InvalidID(Invalid): msg_fmt = _("Invalid ID received %(id)s.") class ConstraintNotMet(NovaException): msg_fmt = _("Constraint not met.") code = 412 class NotFound(NovaException): msg_fmt = _("Resource could not be found.") code = 404 class AgentBuildNotFound(NotFound): msg_fmt = _("No agent-build associated with id %(id)s.") class AgentBuildExists(NovaException): msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s " "architecture %(architecture)s exists.") class VolumeNotFound(NotFound): ec2_code = 'InvalidVolumeID.NotFound' msg_fmt = _("Volume %(volume_id)s could not be found.") class SnapshotNotFound(NotFound): ec2_code = 'InvalidSnapshotID.NotFound' msg_fmt = _("Snapshot %(snapshot_id)s could not be found.") class DiskNotFound(NotFound): msg_fmt = _("No disk at %(location)s") class VolumeDriverNotFound(NotFound): msg_fmt = _("Could not find a handler for %(driver_type)s volume.") class InvalidImageRef(Invalid): msg_fmt = _("Invalid image href %(image_href)s.") class AutoDiskConfigDisabledByImage(Invalid): msg_fmt = _("Requested image %(image)s " "has automatic disk resize disabled.") class ImageNotFound(NotFound): msg_fmt = _("Image %(image_id)s could not be found.") # NOTE(jruzicka): ImageNotFound is not a valid EC2 error code. class ImageNotFoundEC2(ImageNotFound): msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API " "assigns image ids dynamically when they are listed for the " "first time. Have you listed image ids since adding this " "image?") class ProjectNotFound(NotFound): msg_fmt = _("Project %(project_id)s could not be found.") class StorageRepositoryNotFound(NotFound): msg_fmt = _("Cannot find SR to read/write VDI.") class NetworkDuplicated(Invalid): msg_fmt = _("Network %(network_id)s is duplicated.") class NetworkInUse(NovaException): msg_fmt = _("Network %(network_id)s is still in use.") class NetworkNotCreated(NovaException): msg_fmt = _("%(req)s is required to create a network.") class NetworkNotFound(NotFound): msg_fmt = _("Network %(network_id)s could not be found.") class PortNotFound(NotFound): msg_fmt = _("Port id %(port_id)s could not be found.") class NetworkNotFoundForBridge(NetworkNotFound): msg_fmt = _("Network could not be found for bridge %(bridge)s") class NetworkNotFoundForUUID(NetworkNotFound): msg_fmt = _("Network could not be found for uuid %(uuid)s") class NetworkNotFoundForCidr(NetworkNotFound): msg_fmt = _("Network could not be found with cidr %(cidr)s.") class NetworkNotFoundForInstance(NetworkNotFound): msg_fmt = _("Network could not be found for instance %(instance_id)s.") class NoNetworksFound(NotFound): msg_fmt = _("No networks defined.") class NoMoreNetworks(NovaException): msg_fmt = _("No more available networks.") class NetworkNotFoundForProject(NotFound): msg_fmt = _("Either Network uuid %(network_uuid)s is not present or " "is not assigned to the project %(project_id)s.") class NetworkAmbiguous(Invalid): msg_fmt = _("More than one possible network found. Specify " "network ID(s) to select which one(s) to connect to,") class DatastoreNotFound(NotFound): msg_fmt = _("Could not find the datastore reference(s) which the VM uses.") class PortInUse(Invalid): msg_fmt = _("Port %(port_id)s is still in use.") class PortNotUsable(Invalid): msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.") class PortNotFree(Invalid): msg_fmt = _("No free port available for instance %(instance)s.") class FixedIpExists(NovaException): msg_fmt = _("Fixed ip %(address)s already exists.") class FixedIpNotFound(NotFound): msg_fmt = _("No fixed IP associated with id %(id)s.") class FixedIpNotFoundForAddress(FixedIpNotFound): msg_fmt = _("Fixed ip not found for address %(address)s.") class FixedIpNotFoundForInstance(FixedIpNotFound): msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.") class FixedIpNotFoundForNetworkHost(FixedIpNotFound): msg_fmt = _("Network host %(host)s has zero fixed ips " "in network %(network_id)s.") class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.") class FixedIpNotFoundForNetwork(FixedIpNotFound): msg_fmt = _("Fixed IP address (%(address)s) does not exist in " "network (%(network_uuid)s).") class FixedIpAlreadyInUse(NovaException): msg_fmt = _("Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s.") class FixedIpAssociatedWithMultipleInstances(NovaException): msg_fmt = _("More than one instance is associated with fixed ip address " "'%(address)s'.") class FixedIpInvalid(Invalid): msg_fmt = _("Fixed IP address %(address)s is invalid.") class NoMoreFixedIps(NovaException): ec2_code = 'UnsupportedOperation' msg_fmt = _("Zero fixed ips available.") class NoFixedIpsDefined(NotFound): msg_fmt = _("Zero fixed ips could be found.") class FloatingIpExists(NovaException): msg_fmt = _("Floating ip %(address)s already exists.") class FloatingIpNotFound(NotFound): ec2_code = "UnsupportedOperation" msg_fmt = _("Floating ip not found for id %(id)s.") class FloatingIpDNSExists(Invalid): msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.") class FloatingIpNotFoundForAddress(FloatingIpNotFound): msg_fmt = _("Floating ip not found for address %(address)s.") class FloatingIpNotFoundForHost(FloatingIpNotFound): msg_fmt = _("Floating ip not found for host %(host)s.") class FloatingIpMultipleFoundForAddress(NovaException): msg_fmt = _("Multiple floating ips are found for address %(address)s.") class FloatingIpPoolNotFound(NotFound): msg_fmt = _("Floating ip pool not found.") safe = True class NoMoreFloatingIps(FloatingIpNotFound): msg_fmt = _("Zero floating ips available.") safe = True class FloatingIpAssociated(NovaException): ec2_code = "UnsupportedOperation" msg_fmt = _("Floating ip %(address)s is associated.") class FloatingIpNotAssociated(NovaException): msg_fmt = _("Floating ip %(address)s is not associated.") class NoFloatingIpsDefined(NotFound): msg_fmt = _("Zero floating ips exist.") class NoFloatingIpInterface(NotFound): ec2_code = "UnsupportedOperation" msg_fmt = _("Interface %(interface)s not found.") class CannotDisassociateAutoAssignedFloatingIP(NovaException): ec2_code = "UnsupportedOperation" msg_fmt = _("Cannot disassociate auto assigned floating ip") class KeypairNotFound(NotFound): ec2_code = 'InvalidKeyPair.NotFound' msg_fmt = _("Keypair %(name)s not found for user %(user_id)s") class ServiceNotFound(NotFound): msg_fmt = _("Service %(service_id)s could not be found.") class ServiceBinaryExists(NovaException): msg_fmt = _("Service with host %(host)s binary %(binary)s exists.") class ServiceTopicExists(NovaException): msg_fmt = _("Service with host %(host)s topic %(topic)s exists.") class HostNotFound(NotFound): msg_fmt = _("Host %(host)s could not be found.") class ComputeHostNotFound(HostNotFound): msg_fmt = _("Compute host %(host)s could not be found.") class HostBinaryNotFound(NotFound): msg_fmt = _("Could not find binary %(binary)s on host %(host)s.") class InvalidReservationExpiration(Invalid): msg_fmt = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): msg_fmt = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class QuotaNotFound(NotFound): msg_fmt = _("Quota could not be found") class QuotaExists(NovaException): msg_fmt = _("Quota exists for project %(project_id)s, " "resource %(resource)s") class QuotaResourceUnknown(QuotaNotFound): msg_fmt = _("Unknown quota resources %(unknown)s.") class ProjectUserQuotaNotFound(QuotaNotFound): msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s " "could not be found.") class ProjectQuotaNotFound(QuotaNotFound): msg_fmt = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): msg_fmt = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): msg_fmt = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): msg_fmt = _("Quota reservation %(uuid)s could not be found.") class OverQuota(NovaException): msg_fmt = _("Quota exceeded for resources: %(overs)s") class SecurityGroupNotFound(NotFound): msg_fmt = _("Security group %(security_group_id)s not found.") class SecurityGroupNotFoundForProject(SecurityGroupNotFound): msg_fmt = _("Security group %(security_group_id)s not found " "for project %(project_id)s.") class SecurityGroupNotFoundForRule(SecurityGroupNotFound): msg_fmt = _("Security group with rule %(rule_id)s not found.") class SecurityGroupExists(Invalid): ec2_code = 'InvalidGroup.Duplicate' msg_fmt = _("Security group %(security_group_name)s already exists " "for project %(project_id)s.") class SecurityGroupExistsForInstance(Invalid): msg_fmt = _("Security group %(security_group_id)s is already associated" " with the instance %(instance_id)s") class SecurityGroupNotExistsForInstance(Invalid): msg_fmt = _("Security group %(security_group_id)s is not associated with" " the instance %(instance_id)s") class SecurityGroupDefaultRuleNotFound(Invalid): msg_fmt = _("Security group default rule (%rule_id)s not found.") class SecurityGroupCannotBeApplied(Invalid): msg_fmt = _("Network requires port_security_enabled and subnet associated" " in order to apply security groups.") class SecurityGroupRuleExists(Invalid): ec2_code = 'InvalidPermission.Duplicate' msg_fmt = _("Rule already exists in group: %(rule)s") class NoUniqueMatch(NovaException): msg_fmt = _("No Unique Match Found.") code = 409 class MigrationNotFound(NotFound): msg_fmt = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): msg_fmt = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class ConsolePoolNotFound(NotFound): msg_fmt = _("Console pool %(pool_id)s could not be found.") class ConsolePoolExists(NovaException): msg_fmt = _("Console pool with host %(host)s, console_type " "%(console_type)s and compute_host %(compute_host)s " "already exists.") class ConsolePoolNotFoundForHostType(NotFound): msg_fmt = _("Console pool of type %(console_type)s " "for compute host %(compute_host)s " "on proxy host %(host)s not found.") class ConsoleNotFound(NotFound): msg_fmt = _("Console %(console_id)s could not be found.") class ConsoleNotFoundForInstance(ConsoleNotFound): msg_fmt = _("Console for instance %(instance_uuid)s could not be found.") class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): msg_fmt = _("Console for instance %(instance_uuid)s " "in pool %(pool_id)s could not be found.") class ConsoleTypeInvalid(Invalid): msg_fmt = _("Invalid console type %(console_type)s") class ConsoleTypeUnavailable(Invalid): msg_fmt = _("Unavailable console type %(console_type)s.") class InstanceTypeNotFound(NotFound): msg_fmt = _("Instance type %(instance_type_id)s could not be found.") class InstanceTypeNotFoundByName(InstanceTypeNotFound): msg_fmt = _("Instance type with name %(instance_type_name)s " "could not be found.") class FlavorNotFound(NotFound): msg_fmt = _("Flavor %(flavor_id)s could not be found.") class FlavorAccessNotFound(NotFound): msg_fmt = _("Flavor access not found for %(flavor_id)s / " "%(project_id)s combination.") class CellNotFound(NotFound): msg_fmt = _("Cell %(cell_name)s doesn't exist.") class CellExists(NovaException): msg_fmt = _("Cell with name %(name)s already exists.") class CellRoutingInconsistency(NovaException): msg_fmt = _("Inconsistency in cell routing: %(reason)s") class CellServiceAPIMethodNotFound(NotFound): msg_fmt = _("Service API method not found: %(detail)s") class CellTimeout(NotFound): msg_fmt = _("Timeout waiting for response from cell") class CellMaxHopCountReached(NovaException): msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s") class NoCellsAvailable(NovaException): msg_fmt = _("No cells available matching scheduling criteria.") class CellsUpdateUnsupported(NovaException): msg_fmt = _("Cannot update cells configuration file.") class InstanceUnknownCell(NotFound): msg_fmt = _("Cell is not known for instance %(instance_uuid)s") class SchedulerHostFilterNotFound(NotFound): msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.") class InstanceTypeExtraSpecsNotFound(NotFound): msg_fmt = _("Instance Type %(instance_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class FileNotFound(NotFound): msg_fmt = _("File %(file_path)s could not be found.") class NoFilesFound(NotFound): msg_fmt = _("Zero files could be found.") class SwitchNotFoundForNetworkAdapter(NotFound): msg_fmt = _("Virtual switch associated with the " "network adapter %(adapter)s not found.") class NetworkAdapterNotFound(NotFound): msg_fmt = _("Network adapter %(adapter)s could not be found.") class ClassNotFound(NotFound): msg_fmt = _("Class %(class_name)s could not be found: %(exception)s") class NotAllowed(NovaException): msg_fmt = _("Action not allowed.") class ImageRotationNotAllowed(NovaException): msg_fmt = _("Rotation is not allowed for snapshots") class RotationRequiredForBackup(NovaException): msg_fmt = _("Rotation param is required for backup image_type") class KeyPairExists(NovaException): ec2_code = 'InvalidKeyPair.Duplicate' msg_fmt = _("Key pair '%(key_name)s' already exists.") class InstanceExists(NovaException): msg_fmt = _("Instance %(name)s already exists.") class InstanceTypeExists(NovaException): msg_fmt = _("Instance Type with name %(name)s already exists.") class InstanceTypeIdExists(NovaException): msg_fmt = _("Instance Type with ID %(flavor_id)s already exists.") class FlavorAccessExists(NovaException): msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s " "and project %(project_id)s combination.") class InvalidSharedStorage(NovaException): msg_fmt = _("%(path)s is not on shared storage: %(reason)s") class InvalidLocalStorage(NovaException): msg_fmt = _("%(path)s is not on local storage: %(reason)s") class MigrationError(NovaException): msg_fmt = _("Migration error") + ": %(reason)s" class MigrationPreCheckError(MigrationError): msg_fmt = _("Migration pre-check error") + ": %(reason)s" class MalformedRequestBody(NovaException): msg_fmt = _("Malformed message body: %(reason)s") # NOTE(johannes): NotFound should only be used when a 404 error is # appropriate to be returned class ConfigNotFound(NovaException): msg_fmt = _("Could not find config at %(path)s") class PasteAppNotFound(NovaException): msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") class CannotResizeToSameFlavor(NovaException): msg_fmt = _("When resizing, instances must change flavor!") class ResizeError(NovaException): msg_fmt = _("Resize error: %(reason)s") class CannotResizeDisk(NovaException): msg_fmt = _("Server disk was unable to be resized because: %(reason)s") class InstanceTypeMemoryTooSmall(NovaException): msg_fmt = _("Instance type's memory is too small for requested image.") class InstanceTypeDiskTooSmall(NovaException): msg_fmt = _("Instance type's disk is too small for requested image.") class InsufficientFreeMemory(NovaException): msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.") class NoValidHost(NovaException): msg_fmt = _("No valid host was found. %(reason)s") class QuotaError(NovaException): ec2_code = 'ResourceLimitExceeded' msg_fmt = _("Quota exceeded") + ": code=%(code)s" code = 413 headers = {'Retry-After': 0} safe = True class TooManyInstances(QuotaError): msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s," " but already used %(used)d of %(allowed)d %(resource)s") class FloatingIpLimitExceeded(QuotaError): msg_fmt = _("Maximum number of floating ips exceeded") class FixedIpLimitExceeded(QuotaError): msg_fmt = _("Maximum number of fixed ips exceeded") class MetadataLimitExceeded(QuotaError): msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d") class OnsetFileLimitExceeded(QuotaError): msg_fmt = _("Personality file limit exceeded") class OnsetFilePathLimitExceeded(QuotaError): msg_fmt = _("Personality file path too long") class OnsetFileContentLimitExceeded(QuotaError): msg_fmt = _("Personality file content too long") class KeypairLimitExceeded(QuotaError): msg_fmt = _("Maximum number of key pairs exceeded") class SecurityGroupLimitExceeded(QuotaError): ec2_code = 'SecurityGroupLimitExceeded' msg_fmt = _("Maximum number of security groups or rules exceeded") class PortLimitExceeded(QuotaError): msg_fmt = _("Maximum number of ports exceeded") class AggregateError(NovaException): msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' " "caused an error: %(reason)s.") class AggregateNotFound(NotFound): msg_fmt = _("Aggregate %(aggregate_id)s could not be found.") class AggregateNameExists(NovaException): msg_fmt = _("Aggregate %(aggregate_name)s already exists.") class AggregateHostNotFound(NotFound): msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.") class AggregateMetadataNotFound(NotFound): msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with " "key %(metadata_key)s.") class AggregateHostExists(NovaException): msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.") class InstanceTypeCreateFailed(NovaException): msg_fmt = _("Unable to create instance type") class InstancePasswordSetFailed(NovaException): msg_fmt = _("Failed to set admin password on %(instance)s " "because %(reason)s") safe = True class DuplicateVlan(NovaException): msg_fmt = _("Detected existing vlan with id %(vlan)d") class CidrConflict(NovaException): msg_fmt = _("There was a conflict when trying to complete your request.") code = 409 class InstanceNotFound(NotFound): ec2_code = 'InvalidInstanceID.NotFound' msg_fmt = _("Instance %(instance_id)s could not be found.") class InstanceInfoCacheNotFound(NotFound): msg_fmt = _("Info cache for instance %(instance_uuid)s could not be " "found.") class NodeNotFound(NotFound): msg_fmt = _("Node %(node_id)s could not be found.") class NodeNotFoundByUUID(NotFound): msg_fmt = _("Node with UUID %(node_uuid)s could not be found.") class MarkerNotFound(NotFound): msg_fmt = _("Marker %(marker)s could not be found.") class InvalidInstanceIDMalformed(Invalid): ec2_code = 'InvalidInstanceID.Malformed' msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").") class CouldNotFetchImage(NovaException): msg_fmt = _("Could not fetch image %(image_id)s") class CouldNotUploadImage(NovaException): msg_fmt = _("Could not upload image %(image_id)s") class TaskAlreadyRunning(NovaException): msg_fmt = _("Task %(task_name)s is already running on host %(host)s") class TaskNotRunning(NovaException): msg_fmt = _("Task %(task_name)s is not running on host %(host)s") class InstanceIsLocked(InstanceInvalidState): msg_fmt = _("Instance %(instance_uuid)s is locked") class ConfigDriveInvalidValue(Invalid): msg_fmt = _("Invalid value for Config Drive option: %(option)s") class ConfigDriveMountFailed(NovaException): msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. " "Error: %(error)s") class ConfigDriveUnknownFormat(NovaException): msg_fmt = _("Unknown config drive format %(format)s. Select one of " "iso9660 or vfat.") class InterfaceAttachFailed(Invalid): msg_fmt = _("Failed to attach network adapter device to %(instance)s") class InterfaceDetachFailed(Invalid): msg_fmt = _("Failed to detach network adapter device from %(instance)s") class InstanceUserDataTooLarge(NovaException): msg_fmt = _("User data too large. User data must be no larger than " "%(maxsize)s bytes once base64 encoded. Your data is " "%(length)d bytes") class InstanceUserDataMalformed(NovaException): msg_fmt = _("User data needs to be valid base 64.") class UnexpectedTaskStateError(NovaException): msg_fmt = _("unexpected task state: expecting %(expected)s but " "the actual state is %(actual)s") class InstanceActionNotFound(NovaException): msg_fmt = _("Action for request_id %(request_id)s on instance" " %(instance_uuid)s not found") class InstanceActionEventNotFound(NovaException): msg_fmt = _("Event %(event)s not found for action id %(action_id)s") class UnexpectedVMStateError(NovaException): msg_fmt = _("unexpected VM state: expecting %(expected)s but " "the actual state is %(actual)s") class CryptoCAFileNotFound(FileNotFound): msg_fmt = _("The CA file for %(project)s could not be found") class CryptoCRLFileNotFound(FileNotFound): msg_fmt = _("The CRL file for %(project)s could not be found") class InstanceRecreateNotSupported(Invalid): msg_fmt = _('Instance recreate is not implemented by this virt driver.') class ServiceGroupUnavailable(NovaException): msg_fmt = _("The service from servicegroup driver %(driver)s is " "temporarily unavailable.") class DBNotAllowed(NovaException): msg_fmt = _('%(binary)s attempted direct database access which is ' 'not allowed by policy') class UnsupportedVirtType(Invalid): msg_fmt = _("Virtualization type '%(virt)s' is not supported by " "this compute driver") class UnsupportedHardware(Invalid): msg_fmt = _("Requested hardware '%(model)s' is not supported by " "the '%(virt)s' virt driver") class Base64Exception(NovaException): msg_fmt = _("Invalid Base 64 data for file %(path)s") class BuildAbortException(NovaException): msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s") class RescheduledException(NovaException): msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: " "%(reason)s") class ShadowTableExists(NovaException): msg_fmt = _("Shadow table with name %(name)s already exists.") class InstanceFaultRollback(NovaException): def __init__(self, inner_exception=None): message = _("Instance rollback performed due to: %s") self.inner_exception = inner_exception super(InstanceFaultRollback, self).__init__(message % inner_exception) class UnsupportedObjectError(NovaException): msg_fmt = _('Unsupported object type %(objtype)s') class OrphanedObjectError(NovaException): msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object') class IncompatibleObjectVersion(NovaException): msg_fmt = _('Version %(objver)s of %(objname)s is not supported') class ObjectActionError(NovaException): msg_fmt = _('Object action %(action)s failed because: %(reason)s') class CoreAPIMissing(NovaException): msg_fmt = _("Core API extensions are missing: %(missing_apis)s") class AgentError(NovaException): msg_fmt = _('Error during following call to agent: %(method)s') class AgentTimeout(AgentError): msg_fmt = _('Unable to contact guest agent. ' 'The following call timed out: %(method)s') class AgentNotImplemented(AgentError): msg_fmt = _('Agent does not support the call: %(method)s') class InstanceGroupNotFound(NotFound): msg_fmt = _("Instance group %(group_uuid)s could not be found.") class InstanceGroupIdExists(NovaException): msg_fmt = _("Instance group %(group_uuid)s already exists.") class InstanceGroupMetadataNotFound(NotFound): msg_fmt = _("Instance group %(group_uuid)s has no metadata with " "key %(metadata_key)s.") class InstanceGroupMemberNotFound(NotFound): msg_fmt = _("Instance group %(group_uuid)s has no member with " "id %(instance_id)s.") class InstanceGroupPolicyNotFound(NotFound): msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.") class PluginRetriesExceeded(NovaException): msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.") class ImageDownloadModuleError(NovaException): msg_fmt = _("There was an error with the download module %(module)s. " "%(reason)s") class ImageDownloadModuleMetaDataError(ImageDownloadModuleError): msg_fmt = _("The metadata for this location will not work with this " "module %(module)s. %(reason)s.") class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError): msg_fmt = _("The method %(method_name)s is not implemented.") class ImageDownloadModuleConfigurationError(ImageDownloadModuleError): msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.") class PciDeviceWrongAddressFormat(NovaException): msg_fmt = _("The PCI address %(address)s has an incorrect format.") class PciDeviceNotFoundById(NotFound): msg_fmt = _("PCI device %(id)s not found") class PciDeviceNotFound(NovaException): msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.") class PciDeviceInvalidStatus(NovaException): msg_fmt = _( "PCI Device %(compute_node_id)s:%(address)s is %(status)s " "instead of %(hopestatus)s") class PciDeviceInvalidOwner(NovaException): msg_fmt = _( "PCI Device %(compute_node_id)s:%(address)s is owned by %(owner)s " "instead of %(hopeowner)s") class PciDeviceRequestFailed(NovaException): msg_fmt = _( "PCI Device request (%requests)s failed") class PciDevicePoolEmpty(NovaException): msg_fmt = _( "Attempt to consume PCI Device %(compute_node_id)s:%(address)s " "from empty pool") class PciInvalidAlias(NovaException): msg_fmt = _("Invalid PCI alias definition: %(reason)s") class PciRequestAliasNotDefined(NovaException): msg_fmt = _("PCI alias %(alias)s is not defined") class MissingParameter(NovaException): ec2_code = 'MissingParameter' msg_fmt = _("Not enough parameters: %(reason)s") code = 400 class PciConfigInvalidWhitelist(Invalid): msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s") class PciTrackerInvalidNodeId(NovaException): msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s") # Cannot be templated, msg needs to be constructed when raised. class InternalError(NovaException): ec2_code = 'InternalError' msg_fmt = "%(err)s" class PciDevicePrepareFailed(NovaException): msg_fmt = _("Failed to prepare PCI device %(id)s for instance " "%(instance_uuid)s: %(reason)s") class PciDeviceDetachFailed(NovaException): msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s") class PciDeviceUnsupportedHypervisor(NovaException): msg_fmt = _("%(type)s hypervisor does not support PCI devices") class KeyManagerError(NovaException): msg_fmt = _("key manager error: %(reason)s")
apache-2.0
qedi-r/home-assistant
homeassistant/components/knx/light.py
3
12148
"""Support for KNX/IP lights.""" from enum import Enum import voluptuous as vol from xknx.devices import Light as XknxLight from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_WHITE_VALUE, PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, SUPPORT_WHITE_VALUE, Light, ) from homeassistant.const import CONF_ADDRESS, CONF_NAME from homeassistant.core import callback import homeassistant.helpers.config_validation as cv import homeassistant.util.color as color_util from . import ATTR_DISCOVER_DEVICES, DATA_KNX CONF_STATE_ADDRESS = "state_address" CONF_BRIGHTNESS_ADDRESS = "brightness_address" CONF_BRIGHTNESS_STATE_ADDRESS = "brightness_state_address" CONF_COLOR_ADDRESS = "color_address" CONF_COLOR_STATE_ADDRESS = "color_state_address" CONF_COLOR_TEMP_ADDRESS = "color_temperature_address" CONF_COLOR_TEMP_STATE_ADDRESS = "color_temperature_state_address" CONF_COLOR_TEMP_MODE = "color_temperature_mode" CONF_RGBW_ADDRESS = "rgbw_address" CONF_RGBW_STATE_ADDRESS = "rgbw_state_address" CONF_MIN_KELVIN = "min_kelvin" CONF_MAX_KELVIN = "max_kelvin" DEFAULT_NAME = "KNX Light" DEFAULT_COLOR = (0.0, 0.0) DEFAULT_BRIGHTNESS = 255 DEFAULT_COLOR_TEMP_MODE = "absolute" DEFAULT_WHITE_VALUE = 255 DEFAULT_MIN_KELVIN = 2700 # 370 mireds DEFAULT_MAX_KELVIN = 6000 # 166 mireds class ColorTempModes(Enum): """Color temperature modes for config validation.""" absolute = "DPT-7.600" relative = "DPT-5.001" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_ADDRESS): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_STATE_ADDRESS): cv.string, vol.Optional(CONF_BRIGHTNESS_ADDRESS): cv.string, vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): cv.string, vol.Optional(CONF_COLOR_ADDRESS): cv.string, vol.Optional(CONF_COLOR_STATE_ADDRESS): cv.string, vol.Optional(CONF_COLOR_TEMP_ADDRESS): cv.string, vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): cv.string, vol.Optional(CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE): cv.enum( ColorTempModes ), vol.Optional(CONF_RGBW_ADDRESS): cv.string, vol.Optional(CONF_RGBW_STATE_ADDRESS): cv.string, vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN): vol.All( vol.Coerce(int), vol.Range(min=1) ), vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN): vol.All( vol.Coerce(int), vol.Range(min=1) ), } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up lights for KNX platform.""" if discovery_info is not None: async_add_entities_discovery(hass, discovery_info, async_add_entities) else: async_add_entities_config(hass, config, async_add_entities) @callback def async_add_entities_discovery(hass, discovery_info, async_add_entities): """Set up lights for KNX platform configured via xknx.yaml.""" entities = [] for device_name in discovery_info[ATTR_DISCOVER_DEVICES]: device = hass.data[DATA_KNX].xknx.devices[device_name] entities.append(KNXLight(device)) async_add_entities(entities) @callback def async_add_entities_config(hass, config, async_add_entities): """Set up light for KNX platform configured within platform.""" group_address_tunable_white = None group_address_tunable_white_state = None group_address_color_temp = None group_address_color_temp_state = None if config[CONF_COLOR_TEMP_MODE] == ColorTempModes.absolute: group_address_color_temp = config.get(CONF_COLOR_TEMP_ADDRESS) group_address_color_temp_state = config.get(CONF_COLOR_TEMP_STATE_ADDRESS) elif config[CONF_COLOR_TEMP_MODE] == ColorTempModes.relative: group_address_tunable_white = config.get(CONF_COLOR_TEMP_ADDRESS) group_address_tunable_white_state = config.get(CONF_COLOR_TEMP_STATE_ADDRESS) light = XknxLight( hass.data[DATA_KNX].xknx, name=config[CONF_NAME], group_address_switch=config[CONF_ADDRESS], group_address_switch_state=config.get(CONF_STATE_ADDRESS), group_address_brightness=config.get(CONF_BRIGHTNESS_ADDRESS), group_address_brightness_state=config.get(CONF_BRIGHTNESS_STATE_ADDRESS), group_address_color=config.get(CONF_COLOR_ADDRESS), group_address_color_state=config.get(CONF_COLOR_STATE_ADDRESS), group_address_rgbw=config.get(CONF_RGBW_ADDRESS), group_address_rgbw_state=config.get(CONF_RGBW_STATE_ADDRESS), group_address_tunable_white=group_address_tunable_white, group_address_tunable_white_state=group_address_tunable_white_state, group_address_color_temperature=group_address_color_temp, group_address_color_temperature_state=group_address_color_temp_state, min_kelvin=config[CONF_MIN_KELVIN], max_kelvin=config[CONF_MAX_KELVIN], ) hass.data[DATA_KNX].xknx.devices.add(light) async_add_entities([KNXLight(light)]) class KNXLight(Light): """Representation of a KNX light.""" def __init__(self, device): """Initialize of KNX light.""" self.device = device self._min_kelvin = device.min_kelvin self._max_kelvin = device.max_kelvin self._min_mireds = color_util.color_temperature_kelvin_to_mired( self._max_kelvin ) self._max_mireds = color_util.color_temperature_kelvin_to_mired( self._min_kelvin ) @callback def async_register_callbacks(self): """Register callbacks to update hass after device was changed.""" async def after_update_callback(device): """Call after device was updated.""" await self.async_update_ha_state() self.device.register_device_updated_cb(after_update_callback) async def async_added_to_hass(self): """Store register state change callback.""" self.async_register_callbacks() @property def name(self): """Return the name of the KNX device.""" return self.device.name @property def available(self): """Return True if entity is available.""" return self.hass.data[DATA_KNX].connected @property def should_poll(self): """No polling needed within KNX.""" return False @property def brightness(self): """Return the brightness of this light between 0..255.""" if not self.device.supports_brightness: return None return self.device.current_brightness @property def hs_color(self): """Return the HS color value.""" rgb = None if self.device.supports_rgbw or self.device.supports_color: rgb, _ = self.device.current_color return color_util.color_RGB_to_hs(*rgb) if rgb else None @property def white_value(self): """Return the white value.""" white = None if self.device.supports_rgbw: _, white = self.device.current_color return white @property def color_temp(self): """Return the color temperature in mireds.""" if self.device.supports_color_temperature: kelvin = self.device.current_color_temperature if kelvin is not None: return color_util.color_temperature_kelvin_to_mired(kelvin) if self.device.supports_tunable_white: relative_ct = self.device.current_tunable_white if relative_ct is not None: # as KNX devices typically use Kelvin we use it as base for # calculating ct from percent return color_util.color_temperature_kelvin_to_mired( self._min_kelvin + ((relative_ct / 255) * (self._max_kelvin - self._min_kelvin)) ) return None @property def min_mireds(self): """Return the coldest color temp this light supports in mireds.""" return self._min_mireds @property def max_mireds(self): """Return the warmest color temp this light supports in mireds.""" return self._max_mireds @property def effect_list(self): """Return the list of supported effects.""" return None @property def effect(self): """Return the current effect.""" return None @property def is_on(self): """Return true if light is on.""" return self.device.state @property def supported_features(self): """Flag supported features.""" flags = 0 if self.device.supports_brightness: flags |= SUPPORT_BRIGHTNESS if self.device.supports_color: flags |= SUPPORT_COLOR | SUPPORT_BRIGHTNESS if self.device.supports_rgbw: flags |= SUPPORT_COLOR | SUPPORT_WHITE_VALUE if self.device.supports_color_temperature or self.device.supports_tunable_white: flags |= SUPPORT_COLOR_TEMP return flags async def async_turn_on(self, **kwargs): """Turn the light on.""" brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness) hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color) white_value = kwargs.get(ATTR_WHITE_VALUE, self.white_value) mireds = kwargs.get(ATTR_COLOR_TEMP, self.color_temp) update_brightness = ATTR_BRIGHTNESS in kwargs update_color = ATTR_HS_COLOR in kwargs update_white_value = ATTR_WHITE_VALUE in kwargs update_color_temp = ATTR_COLOR_TEMP in kwargs # always only go one path for turning on (avoid conflicting changes # and weird effects) if self.device.supports_brightness and (update_brightness and not update_color): # if we don't need to update the color, try updating brightness # directly if supported; don't do it if color also has to be # changed, as RGB color implicitly sets the brightness as well await self.device.set_brightness(brightness) elif (self.device.supports_rgbw or self.device.supports_color) and ( update_brightness or update_color or update_white_value ): # change RGB color, white value )if supported), and brightness # if brightness or hs_color was not yet set use the default value # to calculate RGB from as a fallback if brightness is None: brightness = DEFAULT_BRIGHTNESS if hs_color is None: hs_color = DEFAULT_COLOR if white_value is None and self.device.supports_rgbw: white_value = DEFAULT_WHITE_VALUE rgb = color_util.color_hsv_to_RGB(*hs_color, brightness * 100 / 255) await self.device.set_color(rgb, white_value) elif self.device.supports_color_temperature and update_color_temp: # change color temperature without ON telegram kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds)) if kelvin > self._max_kelvin: kelvin = self._max_kelvin elif kelvin < self._min_kelvin: kelvin = self._min_kelvin await self.device.set_color_temperature(kelvin) elif self.device.supports_tunable_white and update_color_temp: # calculate relative_ct from Kelvin to fit typical KNX devices kelvin = min( self._max_kelvin, int(color_util.color_temperature_mired_to_kelvin(mireds)), ) relative_ct = int( 255 * (kelvin - self._min_kelvin) / (self._max_kelvin - self._min_kelvin) ) await self.device.set_tunable_white(relative_ct) else: # no color/brightness change requested, so just turn it on await self.device.set_on() async def async_turn_off(self, **kwargs): """Turn the light off.""" await self.device.set_off()
apache-2.0
swgillespie/coreclr
src/scripts/genXplatEventing.py
8
31274
# ## Licensed to the .NET Foundation under one or more agreements. ## The .NET Foundation licenses this file to you under the MIT license. ## See the LICENSE file in the project root for more information. # # #USAGE: #Add Events: modify <root>src/vm/ClrEtwAll.man #Look at the Code in <root>/src/inc/genXplatLttng.py for using subroutines in this file # # Python 2 compatibility from __future__ import print_function import os import xml.dom.minidom as DOM stdprolog=""" // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. /****************************************************************** DO NOT MODIFY. AUTOGENERATED FILE. This file is generated using the logic from <root>/src/scripts/genXplatEventing.py ******************************************************************/ """ stdprolog_cmake=""" # # #****************************************************************** #DO NOT MODIFY. AUTOGENERATED FILE. #This file is generated using the logic from <root>/src/scripts/genXplatEventing.py #****************************************************************** """ lindent = " "; palDataTypeMapping ={ #constructed types "win:null" :" ", "win:Int64" :"const __int64", "win:ULong" :"const ULONG", "win:count" :"*", "win:Struct" :"const void", #actual spec "win:GUID" :"const GUID", "win:AnsiString" :"LPCSTR", "win:UnicodeString" :"PCWSTR", "win:Double" :"const double", "win:Int32" :"const signed int", "win:Boolean" :"const BOOL", "win:UInt64" :"const unsigned __int64", "win:UInt32" :"const unsigned int", "win:UInt16" :"const unsigned short", "win:UInt8" :"const unsigned char", "win:Pointer" :"const void*", "win:Binary" :"const BYTE" } # A Template represents an ETW template can contain 1 or more AbstractTemplates # The AbstractTemplate contains FunctionSignature # FunctionSignature consist of FunctionParameter representing each parameter in it's signature class AbstractTemplate: def __init__(self,abstractTemplateName,abstractFnFrame): self.abstractTemplateName = abstractTemplateName self.AbstractFnFrame = abstractFnFrame class Template: def __init__(self,templateName): self.template = templateName self.allAbstractTemplateTypes = [] # list of AbstractTemplateNames self.allAbstractTemplateLUT = {} #dictionary of AbstractTemplate def append(self,abstractTemplateName,abstractFnFrame): self.allAbstractTemplateTypes.append(abstractTemplateName) self.allAbstractTemplateLUT[abstractTemplateName] = AbstractTemplate(abstractTemplateName,abstractFnFrame) def getFnFrame(self,abstractTemplateName): return self.allAbstractTemplateLUT[abstractTemplateName].AbstractFnFrame def getAbstractVarProps(self,abstractTemplateName): return self.allAbstractTemplateLUT[abstractTemplateName].AbstractVarProps def getFnParam(self,name): for subtemplate in self.allAbstractTemplateTypes: frame = self.getFnFrame(subtemplate) if frame.getParam(name): return frame.getParam(name) return None class FunctionSignature: def __init__(self): self.LUT = {} # dictionary of FunctionParameter self.paramlist = [] # list of parameters to maintain their order in signature def append(self,variable,fnparam): self.LUT[variable] = fnparam self.paramlist.append(variable) def getParam(self,variable): return self.LUT.get(variable) def getLength(self): return len(self.paramlist) class FunctionParameter: def __init__(self,winType,name,count,prop): self.winType = winType #ETW type as given in the manifest self.name = name #parameter name as given in the manifest self.prop = prop #any special property as determined by the manifest and developer #self.count #indicates if the parameter is a pointer if count == "win:null": self.count = "win:null" elif count or winType == "win:GUID" or count == "win:count": #special case for GUIDS, consider them as structs self.count = "win:count" else: self.count = "win:null" def getTopLevelElementsByTagName(Node,tag): dataNodes = [] for element in Node.getElementsByTagName(tag): if element.parentNode == Node: dataNodes.append(element) return dataNodes def bucketizeAbstractTemplates(template,fnPrototypes,var_Dependecies): # At this point we have the complete argument list, now break them into chunks of 10 # As Abstract Template supports a maximum of 10 arguments abstractTemplateName = template; subevent_cnt = 1; templateProp = Template(template) abstractFnFrame = FunctionSignature() for variable in fnPrototypes.paramlist: for dependency in var_Dependecies[variable]: if not abstractFnFrame.getParam(dependency): abstractFnFrame.append(dependency,fnPrototypes.getParam(dependency)) frameCount = abstractFnFrame.getLength() if frameCount == 10: templateProp.append(abstractTemplateName,abstractFnFrame) abstractTemplateName = template + "_" + str(subevent_cnt) subevent_cnt += 1 if len(var_Dependecies[variable]) > 1: #check if the frame's dependencies are all present depExists = True for depends in var_Dependecies[variable]: if not abstractFnFrame.getParam(depends): depExists = False break if not depExists: raise ValueError('Abstract Template: '+ abstractTemplateName+ ' does not have all its dependecies in the frame, write required Logic here and test it out, the parameter whose dependency is missing is :'+ variable) #psuedo code: # 1. add a missing dependecies to the frame of the current parameter # 2. Check if the frame has enough space, if there is continue adding missing dependencies # 3. Else Save the current Frame and start a new frame and follow step 1 and 2 # 4. Add the current parameter and proceed #create a new fn frame abstractFnFrame = FunctionSignature() #subevent_cnt == 1 represents argumentless templates if abstractFnFrame.getLength() > 0 or subevent_cnt == 1: templateProp.append(abstractTemplateName,abstractFnFrame) return templateProp ignoredXmlTemplateAttribes = frozenset(["map","outType"]) usedXmlTemplateAttribes = frozenset(["name","inType","count", "length"]) def parseTemplateNodes(templateNodes): #return values allTemplates = {} for templateNode in templateNodes: template = templateNode.getAttribute('tid') var_Dependecies = {} fnPrototypes = FunctionSignature() dataNodes = getTopLevelElementsByTagName(templateNode,'data') # Validate that no new attributes has been added to manifest for dataNode in dataNodes: nodeMap = dataNode.attributes for attrib in nodeMap.values(): attrib_name = attrib.name if attrib_name not in ignoredXmlTemplateAttribes and attrib_name not in usedXmlTemplateAttribes: raise ValueError('unknown attribute: '+ attrib_name + ' in template:'+ template) for dataNode in dataNodes: variable = dataNode.getAttribute('name') wintype = dataNode.getAttribute('inType') #count and length are the same wincount = dataNode.getAttribute('count') winlength = dataNode.getAttribute('length'); var_Props = None var_dependency = [variable] if winlength: if wincount: raise Exception("both count and length property found on: " + variable + "in template: " + template) wincount = winlength if (wincount.isdigit() and int(wincount) ==1): wincount = '' if wincount: if (wincount.isdigit()): var_Props = wincount elif fnPrototypes.getParam(wincount): var_Props = wincount var_dependency.insert(0,wincount) #construct the function signature if wintype == "win:GUID": var_Props = "sizeof(GUID)/sizeof(int)" var_Dependecies[variable] = var_dependency fnparam = FunctionParameter(wintype,variable,wincount,var_Props) fnPrototypes.append(variable,fnparam) structNodes = getTopLevelElementsByTagName(templateNode,'struct') count = 0; for structToBeMarshalled in structNodes: struct_len = "Arg"+ str(count) + "_Struct_Len_" struct_pointer = "Arg"+ str(count) + "_Struct_Pointer_" count += 1 #populate the Property- used in codegen structname = structToBeMarshalled.getAttribute('name') countVarName = structToBeMarshalled.getAttribute('count') if not countVarName: raise ValueError('Struct '+ structname+ ' in template:'+ template + 'does not have an attribute count') var_Props = countVarName + "*" + struct_len + "/sizeof(int)" var_Dependecies[struct_len] = [struct_len] var_Dependecies[struct_pointer] = [countVarName,struct_len,struct_pointer] fnparam_len = FunctionParameter("win:ULong",struct_len,"win:null",None) fnparam_pointer = FunctionParameter("win:Struct",struct_pointer,"win:count",var_Props) fnPrototypes.append(struct_len,fnparam_len) fnPrototypes.append(struct_pointer,fnparam_pointer) allTemplates[template] = bucketizeAbstractTemplates(template,fnPrototypes,var_Dependecies) return allTemplates def generateClrallEvents(eventNodes,allTemplates): clrallEvents = [] for eventNode in eventNodes: eventName = eventNode.getAttribute('symbol') templateName = eventNode.getAttribute('template') #generate EventEnabled clrallEvents.append("inline BOOL EventEnabled") clrallEvents.append(eventName) clrallEvents.append("() {return XplatEventLogger::IsEventLoggingEnabled() && EventXplatEnabled") clrallEvents.append(eventName+"();}\n\n") #generate FireEtw functions fnptype = [] fnbody = [] fnptype.append("inline ULONG FireEtw") fnptype.append(eventName) fnptype.append("(\n") fnbody.append(lindent) fnbody.append("if (!EventEnabled") fnbody.append(eventName) fnbody.append("()) {return ERROR_SUCCESS;}\n") line = [] fnptypeline = [] if templateName: for subTemplate in allTemplates[templateName].allAbstractTemplateTypes: fnSig = allTemplates[templateName].getFnFrame(subTemplate) for params in fnSig.paramlist: fnparam = fnSig.getParam(params) wintypeName = fnparam.winType typewName = palDataTypeMapping[wintypeName] winCount = fnparam.count countw = palDataTypeMapping[winCount] fnptypeline.append(lindent) fnptypeline.append(typewName) fnptypeline.append(countw) fnptypeline.append(" ") fnptypeline.append(fnparam.name) fnptypeline.append(",\n") #fnsignature for params in fnSig.paramlist: fnparam = fnSig.getParam(params) line.append(fnparam.name) line.append(",") #remove trailing commas if len(line) > 0: del line[-1] if len(fnptypeline) > 0: del fnptypeline[-1] fnptype.extend(fnptypeline) fnptype.append("\n)\n{\n") fnbody.append(lindent) fnbody.append("return FireEtXplat") fnbody.append(eventName) fnbody.append("(") fnbody.extend(line) fnbody.append(");\n") fnbody.append("}\n\n") clrallEvents.extend(fnptype) clrallEvents.extend(fnbody) return ''.join(clrallEvents) def generateClrXplatEvents(eventNodes, allTemplates): clrallEvents = [] for eventNode in eventNodes: eventName = eventNode.getAttribute('symbol') templateName = eventNode.getAttribute('template') #generate EventEnabled clrallEvents.append("extern \"C\" BOOL EventXplatEnabled") clrallEvents.append(eventName) clrallEvents.append("();\n") #generate FireEtw functions fnptype = [] fnptypeline = [] fnptype.append("extern \"C\" ULONG FireEtXplat") fnptype.append(eventName) fnptype.append("(\n") if templateName: for subTemplate in allTemplates[templateName].allAbstractTemplateTypes: fnSig = allTemplates[templateName].getFnFrame(subTemplate) for params in fnSig.paramlist: fnparam = fnSig.getParam(params) wintypeName = fnparam.winType typewName = palDataTypeMapping[wintypeName] winCount = fnparam.count countw = palDataTypeMapping[winCount] fnptypeline.append(lindent) fnptypeline.append(typewName) fnptypeline.append(countw) fnptypeline.append(" ") fnptypeline.append(fnparam.name) fnptypeline.append(",\n") #remove trailing commas if len(fnptypeline) > 0: del fnptypeline[-1] fnptype.extend(fnptypeline) fnptype.append("\n);\n") clrallEvents.extend(fnptype) return ''.join(clrallEvents) #generates the dummy header file which is used by the VM as entry point to the logging Functions def generateclrEtwDummy(eventNodes,allTemplates): clretmEvents = [] for eventNode in eventNodes: eventName = eventNode.getAttribute('symbol') templateName = eventNode.getAttribute('template') fnptype = [] #generate FireEtw functions fnptype.append("#define FireEtw") fnptype.append(eventName) fnptype.append("("); line = [] if templateName: for subTemplate in allTemplates[templateName].allAbstractTemplateTypes: fnSig = allTemplates[templateName].getFnFrame(subTemplate) for params in fnSig.paramlist: fnparam = fnSig.getParam(params) line.append(fnparam.name) line.append(", ") #remove trailing commas if len(line) > 0: del line[-1] fnptype.extend(line) fnptype.append(") 0\n") clretmEvents.extend(fnptype) return ''.join(clretmEvents) def generateClralltestEvents(sClrEtwAllMan): tree = DOM.parse(sClrEtwAllMan) clrtestEvents = [] for providerNode in tree.getElementsByTagName('provider'): templateNodes = providerNode.getElementsByTagName('template') allTemplates = parseTemplateNodes(templateNodes) eventNodes = providerNode.getElementsByTagName('event') for eventNode in eventNodes: eventName = eventNode.getAttribute('symbol') templateName = eventNode.getAttribute('template') clrtestEvents.append(" EventXplatEnabled" + eventName + "();\n") clrtestEvents.append("Error |= FireEtXplat" + eventName + "(\n") line =[] if templateName : for subTemplate in allTemplates[templateName].allAbstractTemplateTypes: fnSig = allTemplates[templateName].getFnFrame(subTemplate) for params in fnSig.paramlist: argline ='' fnparam = fnSig.getParam(params) if fnparam.name.lower() == 'count': argline = '2' else: if fnparam.winType == "win:Binary": argline = 'win_Binary' elif fnparam.winType == "win:Pointer" and fnparam.count == "win:count": argline = "(const void**)&var11" elif fnparam.winType == "win:Pointer" : argline = "(const void*)var11" elif fnparam.winType =="win:AnsiString": argline = '" Testing AniString "' elif fnparam.winType =="win:UnicodeString": argline = 'W(" Testing UnicodeString ")' else: if fnparam.count == "win:count": line.append("&") argline = fnparam.winType.replace(":","_") line.append(argline) line.append(",\n") #remove trailing commas if len(line) > 0: del line[-1] line.append("\n") line.append(");\n") clrtestEvents.extend(line) return ''.join(clrtestEvents) def generateSanityTest(sClrEtwAllMan,testDir): if not testDir: return print('Generating Event Logging Tests') if not os.path.exists(testDir): os.makedirs(testDir) cmake_file = testDir + "/CMakeLists.txt" test_cpp = "clralltestevents.cpp" testinfo = testDir + "/testinfo.dat" Cmake_file = open(cmake_file,'w') Test_cpp = open(testDir + "/" + test_cpp,'w') Testinfo = open(testinfo,'w') #CMake File: Cmake_file.write(stdprolog_cmake) Cmake_file.write(""" cmake_minimum_required(VERSION 2.8.12.2) set(CMAKE_INCLUDE_CURRENT_DIR ON) set(SOURCES """) Cmake_file.write(test_cpp) Cmake_file.write(""" ) include_directories(${GENERATED_INCLUDE_DIR}) include_directories(${COREPAL_SOURCE_DIR}/inc/rt) add_executable(eventprovidertest ${SOURCES} ) set(EVENT_PROVIDER_DEPENDENCIES "") set(EVENT_PROVIDER_LINKER_OTPTIONS "") if(FEATURE_EVENT_TRACE) add_definitions(-DFEATURE_EVENT_TRACE=1) list(APPEND EVENT_PROVIDER_DEPENDENCIES coreclrtraceptprovider eventprovider ) list(APPEND EVENT_PROVIDER_LINKER_OTPTIONS ${EVENT_PROVIDER_DEPENDENCIES} ) endif(FEATURE_EVENT_TRACE) add_dependencies(eventprovidertest ${EVENT_PROVIDER_DEPENDENCIES} coreclrpal) target_link_libraries(eventprovidertest coreclrpal ${EVENT_PROVIDER_LINKER_OTPTIONS} ) """) Testinfo.write(""" Copyright (c) Microsoft Corporation. All rights reserved. # Version = 1.0 Section = EventProvider Function = EventProvider Name = PAL test for FireEtW* and EventEnabled* functions TYPE = DEFAULT EXE1 = eventprovidertest Description =This is a sanity test to check that there are no crashes in Xplat eventing """) #Test.cpp Test_cpp.write(stdprolog) Test_cpp.write(""" /*===================================================================== ** ** Source: clralltestevents.cpp ** ** Purpose: Ensure Correctness of Eventing code ** ** **===================================================================*/ #include <palsuite.h> #include <clrxplatevents.h> typedef struct _Struct1 { ULONG Data1; unsigned short Data2; unsigned short Data3; unsigned char Data4[8]; } Struct1; Struct1 var21[2] = { { 245, 13, 14, "deadbea" }, { 542, 0, 14, "deadflu" } }; Struct1* var11 = var21; Struct1* win_Struct = var21; GUID win_GUID ={ 245, 13, 14, "deadbea" }; double win_Double =34.04; ULONG win_ULong = 34; BOOL win_Boolean = FALSE; unsigned __int64 win_UInt64 = 114; unsigned int win_UInt32 = 4; unsigned short win_UInt16 = 12; unsigned char win_UInt8 = 9; int win_Int32 = 12; BYTE* win_Binary =(BYTE*)var21 ; int __cdecl main(int argc, char **argv) { /* Initialize the PAL. */ if(0 != PAL_Initialize(argc, argv)) { return FAIL; } ULONG Error = ERROR_SUCCESS; #if defined(FEATURE_EVENT_TRACE) Trace("\\n Starting functional eventing APIs tests \\n"); """) Test_cpp.write(generateClralltestEvents(sClrEtwAllMan)) Test_cpp.write(""" /* Shutdown the PAL. */ if (Error != ERROR_SUCCESS) { Fail("One or more eventing Apis failed\\n "); return FAIL; } Trace("\\n All eventing APIs were fired succesfully \\n"); #endif //defined(FEATURE_EVENT_TRACE) PAL_Terminate(); return PASS; } """) Cmake_file.close() Test_cpp.close() Testinfo.close() def generateEtmDummyHeader(sClrEtwAllMan,clretwdummy): if not clretwdummy: return print(' Generating Dummy Event Headers') tree = DOM.parse(sClrEtwAllMan) incDir = os.path.dirname(os.path.realpath(clretwdummy)) if not os.path.exists(incDir): os.makedirs(incDir) Clretwdummy = open(clretwdummy,'w') Clretwdummy.write(stdprolog + "\n") for providerNode in tree.getElementsByTagName('provider'): templateNodes = providerNode.getElementsByTagName('template') allTemplates = parseTemplateNodes(templateNodes) eventNodes = providerNode.getElementsByTagName('event') #pal: create etmdummy.h Clretwdummy.write(generateclrEtwDummy(eventNodes, allTemplates) + "\n") Clretwdummy.close() def generatePlformIndependentFiles(sClrEtwAllMan,incDir,etmDummyFile): generateEtmDummyHeader(sClrEtwAllMan,etmDummyFile) tree = DOM.parse(sClrEtwAllMan) if not incDir: return print(' Generating Event Headers') if not os.path.exists(incDir): os.makedirs(incDir) clrallevents = incDir + "/clretwallmain.h" clrxplatevents = incDir + "/clrxplatevents.h" Clrallevents = open(clrallevents,'w') Clrxplatevents = open(clrxplatevents,'w') Clrallevents.write(stdprolog + "\n") Clrxplatevents.write(stdprolog + "\n") Clrallevents.write("\n#include \"clrxplatevents.h\"\n\n") for providerNode in tree.getElementsByTagName('provider'): templateNodes = providerNode.getElementsByTagName('template') allTemplates = parseTemplateNodes(templateNodes) eventNodes = providerNode.getElementsByTagName('event') #vm header: Clrallevents.write(generateClrallEvents(eventNodes, allTemplates) + "\n") #pal: create clrallevents.h Clrxplatevents.write(generateClrXplatEvents(eventNodes, allTemplates) + "\n") Clrxplatevents.close() Clrallevents.close() class EventExclusions: def __init__(self): self.nostack = set() self.explicitstack = set() self.noclrinstance = set() def parseExclusionList(exclusionListFile): ExclusionFile = open(exclusionListFile,'r') exclusionInfo = EventExclusions() for line in ExclusionFile: line = line.strip() #remove comments if not line or line.startswith('#'): continue tokens = line.split(':') #entries starting with nomac are ignored if "nomac" in tokens: continue if len(tokens) > 5: raise Exception("Invalid Entry " + line + "in "+ exclusionListFile) eventProvider = tokens[2] eventTask = tokens[1] eventSymbol = tokens[4] if eventProvider == '': eventProvider = "*" if eventTask == '': eventTask = "*" if eventSymbol == '': eventSymbol = "*" entry = eventProvider + ":" + eventTask + ":" + eventSymbol if tokens[0].lower() == "nostack": exclusionInfo.nostack.add(entry) if tokens[0].lower() == "stack": exclusionInfo.explicitstack.add(entry) if tokens[0].lower() == "noclrinstanceid": exclusionInfo.noclrinstance.add(entry) ExclusionFile.close() return exclusionInfo def getStackWalkBit(eventProvider, taskName, eventSymbol, stackSet): for entry in stackSet: tokens = entry.split(':') if len(tokens) != 3: raise Exception("Error, possible error in the script which introduced the enrty "+ entry) eventCond = tokens[0] == eventProvider or tokens[0] == "*" taskCond = tokens[1] == taskName or tokens[1] == "*" symbolCond = tokens[2] == eventSymbol or tokens[2] == "*" if eventCond and taskCond and symbolCond: return False return True #Add the miscelaneous checks here def checkConsistency(sClrEtwAllMan,exclusionListFile): tree = DOM.parse(sClrEtwAllMan) exclusionInfo = parseExclusionList(exclusionListFile) for providerNode in tree.getElementsByTagName('provider'): stackSupportSpecified = {} eventNodes = providerNode.getElementsByTagName('event') templateNodes = providerNode.getElementsByTagName('template') eventProvider = providerNode.getAttribute('name') allTemplates = parseTemplateNodes(templateNodes) for eventNode in eventNodes: taskName = eventNode.getAttribute('task') eventSymbol = eventNode.getAttribute('symbol') eventTemplate = eventNode.getAttribute('template') eventValue = int(eventNode.getAttribute('value')) clrInstanceBit = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.noclrinstance) sLookupFieldName = "ClrInstanceID" sLookupFieldType = "win:UInt16" if clrInstanceBit and allTemplates.get(eventTemplate): # check for the event template and look for a field named ClrInstanceId of type win:UInt16 fnParam = allTemplates[eventTemplate].getFnParam(sLookupFieldName) if not(fnParam and fnParam.winType == sLookupFieldType): raise Exception(exclusionListFile + ":No " + sLookupFieldName + " field of type " + sLookupFieldType + " for event symbol " + eventSymbol) # If some versions of an event are on the nostack/stack lists, # and some versions are not on either the nostack or stack list, # then developer likely forgot to specify one of the versions eventStackBitFromNoStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.nostack) eventStackBitFromExplicitStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.explicitstack) sStackSpecificityError = exclusionListFile + ": Error processing event :" + eventSymbol + "(ID" + str(eventValue) + "): This file must contain either ALL versions of this event or NO versions of this event. Currently some, but not all, versions of this event are present\n" if not stackSupportSpecified.get(eventValue): # Haven't checked this event before. Remember whether a preference is stated if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList): stackSupportSpecified[eventValue] = True else: stackSupportSpecified[eventValue] = False else: # We've checked this event before. if stackSupportSpecified[eventValue]: # When we last checked, a preference was previously specified, so it better be specified here if eventStackBitFromNoStackList and eventStackBitFromExplicitStackList: raise Exception(sStackSpecificityError) else: # When we last checked, a preference was not previously specified, so it better not be specified here if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList): raise Exception(sStackSpecificityError) import argparse import sys def main(argv): #parse the command line parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism") required = parser.add_argument_group('required arguments') required.add_argument('--man', type=str, required=True, help='full path to manifest containig the description of events') required.add_argument('--exc', type=str, required=True, help='full path to exclusion list') required.add_argument('--inc', type=str, default=None, help='full path to directory where the header files will be generated') required.add_argument('--dummy', type=str,default=None, help='full path to file that will have dummy definitions of FireEtw functions') required.add_argument('--testdir', type=str, default=None, help='full path to directory where the test assets will be deployed' ) args, unknown = parser.parse_known_args(argv) if unknown: print('Unknown argument(s): ', ', '.join(unknown)) return const.UnknownArguments sClrEtwAllMan = args.man exclusionListFile = args.exc incdir = args.inc etmDummyFile = args.dummy testDir = args.testdir checkConsistency(sClrEtwAllMan, exclusionListFile) generatePlformIndependentFiles(sClrEtwAllMan,incdir,etmDummyFile) generateSanityTest(sClrEtwAllMan,testDir) if __name__ == '__main__': return_code = main(sys.argv[1:]) sys.exit(return_code)
mit
firebitsbr/pwn_plug_sources
src/metagoofil/pdfminer/pdffont.py
32
26471
#!/usr/bin/env python2 import sys import struct try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from cmapdb import CMapDB, CMapParser, FileUnicodeMap, CMap from encodingdb import EncodingDB, name2unicode from psparser import PSStackParser from psparser import PSSyntaxError, PSEOF from psparser import LIT, KWD, STRICT from psparser import PSLiteral, literal_name from pdftypes import PDFException, resolve1 from pdftypes import int_value, float_value, num_value from pdftypes import str_value, list_value, dict_value, stream_value from fontmetrics import FONT_METRICS from utils import apply_matrix_norm, nunpack, choplist def get_widths(seq): widths = {} r = [] for v in seq: if isinstance(v, list): if r: char1 = r[-1] for (i,w) in enumerate(v): widths[char1+i] = w r = [] elif isinstance(v, int): r.append(v) if len(r) == 3: (char1,char2,w) = r for i in xrange(char1, char2+1): widths[i] = w r = [] return widths #assert get_widths([1]) == {} #assert get_widths([1,2,3]) == {1:3, 2:3} #assert get_widths([1,[2,3],6,[7,8]]) == {1:2,2:3, 6:7,7:8} def get_widths2(seq): widths = {} r = [] for v in seq: if isinstance(v, list): if r: char1 = r[-1] for (i,(w,vx,vy)) in enumerate(choplist(3,v)): widths[char1+i] = (w,(vx,vy)) r = [] elif isinstance(v, int): r.append(v) if len(r) == 5: (char1,char2,w,vx,vy) = r for i in xrange(char1, char2+1): widths[i] = (w,(vx,vy)) r = [] return widths #assert get_widths2([1]) == {} #assert get_widths2([1,2,3,4,5]) == {1:(3,(4,5)), 2:(3,(4,5))} #assert get_widths2([1,[2,3,4,5],6,[7,8,9]]) == {1:(2,(3,4)), 6:(7,(8,9))} ## FontMetricsDB ## class FontMetricsDB(object): @classmethod def get_metrics(klass, fontname): return FONT_METRICS[fontname] ## Type1FontHeaderParser ## class Type1FontHeaderParser(PSStackParser): KEYWORD_BEGIN = KWD('begin') KEYWORD_END = KWD('end') KEYWORD_DEF = KWD('def') KEYWORD_PUT = KWD('put') KEYWORD_DICT = KWD('dict') KEYWORD_ARRAY = KWD('array') KEYWORD_READONLY = KWD('readonly') KEYWORD_FOR = KWD('for') KEYWORD_FOR = KWD('for') def __init__(self, data): PSStackParser.__init__(self, data) self._cid2unicode = {} return def get_encoding(self): while 1: try: (cid,name) = self.nextobject() except PSEOF: break try: self._cid2unicode[cid] = name2unicode(name) except KeyError: pass return self._cid2unicode def do_keyword(self, pos, token): if token is self.KEYWORD_PUT: ((_,key),(_,value)) = self.pop(2) if (isinstance(key, int) and isinstance(value, PSLiteral)): self.add_results((key, literal_name(value))) return ## CFFFont ## (Format specified in Adobe Technical Note: #5176 ## "The Compact Font Format Specification") ## NIBBLES = ('0','1','2','3','4','5','6','7','8','9','.','e','e-',None,'-') def getdict(data): d = {} fp = StringIO(data) stack = [] while 1: c = fp.read(1) if not c: break b0 = ord(c) if b0 <= 21: d[b0] = stack stack = [] continue if b0 == 30: s = '' loop = True while loop: b = ord(fp.read(1)) for n in (b >> 4, b & 15): if n == 15: loop = False else: s += NIBBLES[n] value = float(s) elif 32 <= b0 and b0 <= 246: value = b0-139 else: b1 = ord(fp.read(1)) if 247 <= b0 and b0 <= 250: value = ((b0-247)<<8)+b1+108 elif 251 <= b0 and b0 <= 254: value = -((b0-251)<<8)-b1-108 else: b2 = ord(fp.read(1)) if 128 <= b1: b1 -= 256 if b0 == 28: value = b1<<8 | b2 else: value = b1<<24 | b2<<16 | struct.unpack('>H', fp.read(2))[0] stack.append(value) return d class CFFFont(object): STANDARD_STRINGS = ( '.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', 'Semibold', ) class INDEX(object): def __init__(self, fp): self.fp = fp self.offsets = [] (count, offsize) = struct.unpack('>HB', self.fp.read(3)) for i in xrange(count+1): self.offsets.append(nunpack(self.fp.read(offsize))) self.base = self.fp.tell()-1 self.fp.seek(self.base+self.offsets[-1]) return def __repr__(self): return '<INDEX: size=%d>' % len(self) def __len__(self): return len(self.offsets)-1 def __getitem__(self, i): self.fp.seek(self.base+self.offsets[i]) return self.fp.read(self.offsets[i+1]-self.offsets[i]) def __iter__(self): return iter( self[i] for i in xrange(len(self)) ) def __init__(self, name, fp): self.name = name self.fp = fp # Header (_major,_minor,hdrsize,offsize) = struct.unpack('BBBB', self.fp.read(4)) self.fp.read(hdrsize-4) # Name INDEX self.name_index = self.INDEX(self.fp) # Top DICT INDEX self.dict_index = self.INDEX(self.fp) # String INDEX self.string_index = self.INDEX(self.fp) # Global Subr INDEX self.subr_index = self.INDEX(self.fp) # Top DICT DATA self.top_dict = getdict(self.dict_index[0]) (charset_pos,) = self.top_dict.get(15, [0]) (encoding_pos,) = self.top_dict.get(16, [0]) (charstring_pos,) = self.top_dict.get(17, [0]) # CharStrings self.fp.seek(charstring_pos) self.charstring = self.INDEX(self.fp) self.nglyphs = len(self.charstring) # Encodings self.code2gid = {} self.gid2code = {} self.fp.seek(encoding_pos) format = self.fp.read(1) if format == '\x00': # Format 0 (n,) = struct.unpack('B', self.fp.read(1)) for (code,gid) in enumerate(struct.unpack('B'*n, self.fp.read(n))): self.code2gid[code] = gid self.gid2code[gid] = code elif format == '\x01': # Format 1 (n,) = struct.unpack('B', self.fp.read(1)) code = 0 for i in xrange(n): (first,nleft) = struct.unpack('BB', self.fp.read(2)) for gid in xrange(first,first+nleft+1): self.code2gid[code] = gid self.gid2code[gid] = code code += 1 else: raise ValueError('unsupported encoding format: %r' % format) # Charsets self.name2gid = {} self.gid2name = {} self.fp.seek(charset_pos) format = self.fp.read(1) if format == '\x00': # Format 0 n = self.nglyphs-1 for (gid,sid) in enumerate(struct.unpack('>'+'H'*n, self.fp.read(2*n))): gid += 1 name = self.getstr(sid) self.name2gid[name] = gid self.gid2name[gid] = name elif format == '\x01': # Format 1 (n,) = struct.unpack('B', self.fp.read(1)) sid = 0 for i in xrange(n): (first,nleft) = struct.unpack('BB', self.fp.read(2)) for gid in xrange(first,first+nleft+1): name = self.getstr(sid) self.name2gid[name] = gid self.gid2name[gid] = name sid += 1 elif format == '\x02': # Format 2 assert 0 else: raise ValueError('unsupported charset format: %r' % format) #print self.code2gid #print self.name2gid #assert 0 return def getstr(self, sid): if sid < len(self.STANDARD_STRINGS): return self.STANDARD_STRINGS[sid] return self.string_index[sid-len(self.STANDARD_STRINGS)] ## TrueTypeFont ## class TrueTypeFont(object): class CMapNotFound(Exception): pass def __init__(self, name, fp): self.name = name self.fp = fp self.tables = {} self.fonttype = fp.read(4) (ntables, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8)) for _ in xrange(ntables): (name, tsum, offset, length) = struct.unpack('>4sLLL', fp.read(16)) self.tables[name] = (offset, length) return def create_unicode_map(self): if 'cmap' not in self.tables: raise TrueTypeFont.CMapNotFound (base_offset, length) = self.tables['cmap'] fp = self.fp fp.seek(base_offset) (version, nsubtables) = struct.unpack('>HH', fp.read(4)) subtables = [] for i in xrange(nsubtables): subtables.append(struct.unpack('>HHL', fp.read(8))) char2gid = {} # Only supports subtable type 0, 2 and 4. for (_1, _2, st_offset) in subtables: fp.seek(base_offset+st_offset) (fmttype, fmtlen, fmtlang) = struct.unpack('>HHH', fp.read(6)) if fmttype == 0: char2gid.update(enumerate(struct.unpack('>256B', fp.read(256)))) elif fmttype == 2: subheaderkeys = struct.unpack('>256H', fp.read(512)) firstbytes = [0]*8192 for (i,k) in enumerate(subheaderkeys): firstbytes[k/8] = i nhdrs = max(subheaderkeys)/8 + 1 hdrs = [] for i in xrange(nhdrs): (firstcode,entcount,delta,offset) = struct.unpack('>HHhH', fp.read(8)) hdrs.append((i,firstcode,entcount,delta,fp.tell()-2+offset)) for (i,firstcode,entcount,delta,pos) in hdrs: if not entcount: continue first = firstcode + (firstbytes[i] << 8) fp.seek(pos) for c in xrange(entcount): gid = struct.unpack('>H', fp.read(2)) if gid: gid += delta char2gid[first+c] = gid elif fmttype == 4: (segcount, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8)) segcount /= 2 ecs = struct.unpack('>%dH' % segcount, fp.read(2*segcount)) fp.read(2) scs = struct.unpack('>%dH' % segcount, fp.read(2*segcount)) idds = struct.unpack('>%dh' % segcount, fp.read(2*segcount)) pos = fp.tell() idrs = struct.unpack('>%dH' % segcount, fp.read(2*segcount)) for (ec,sc,idd,idr) in zip(ecs, scs, idds, idrs): if idr: fp.seek(pos+idr) for c in xrange(sc, ec+1): char2gid[c] = (struct.unpack('>H', fp.read(2))[0] + idd) & 0xffff else: for c in xrange(sc, ec+1): char2gid[c] = (c + idd) & 0xffff else: assert 0 # create unicode map unicode_map = FileUnicodeMap() for (char,gid) in char2gid.iteritems(): unicode_map.add_cid2unichr(gid, char) return unicode_map ## Fonts ## class PDFFontError(PDFException): pass class PDFUnicodeNotDefined(PDFFontError): pass LITERAL_STANDARD_ENCODING = LIT('StandardEncoding') LITERAL_TYPE1C = LIT('Type1C') # PDFFont class PDFFont(object): def __init__(self, descriptor, widths, default_width=None): self.descriptor = descriptor self.widths = widths self.fontname = resolve1(descriptor.get('FontName', 'unknown')) if isinstance(self.fontname, PSLiteral): self.fontname = literal_name(self.fontname) self.flags = int_value(descriptor.get('Flags', 0)) self.ascent = num_value(descriptor.get('Ascent', 0)) self.descent = num_value(descriptor.get('Descent', 0)) self.italic_angle = num_value(descriptor.get('ItalicAngle', 0)) self.default_width = default_width or num_value(descriptor.get('MissingWidth', 0)) self.leading = num_value(descriptor.get('Leading', 0)) self.bbox = list_value(descriptor.get('FontBBox', (0,0,0,0))) self.hscale = self.vscale = .001 return def __repr__(self): return '<PDFFont>' def is_vertical(self): return False def is_multibyte(self): return False def decode(self, bytes): return map(ord, bytes) def get_ascent(self): return self.ascent * self.vscale def get_descent(self): return self.descent * self.vscale def get_width(self): w = self.bbox[2]-self.bbox[0] if w == 0: w = -self.default_width return w * self.hscale def get_height(self): h = self.bbox[3]-self.bbox[1] if h == 0: h = self.ascent - self.descent return h * self.vscale def char_width(self, cid): return self.widths.get(cid, self.default_width) * self.hscale def char_disp(self, cid): return 0 def string_width(self, s): return sum( self.char_width(cid) for cid in self.decode(s) ) # PDFSimpleFont class PDFSimpleFont(PDFFont): def __init__(self, descriptor, widths, spec): # Font encoding is specified either by a name of # built-in encoding or a dictionary that describes # the differences. if 'Encoding' in spec: encoding = resolve1(spec['Encoding']) else: encoding = LITERAL_STANDARD_ENCODING if isinstance(encoding, dict): name = literal_name(encoding.get('BaseEncoding', LITERAL_STANDARD_ENCODING)) diff = list_value(encoding.get('Differences', None)) self.cid2unicode = EncodingDB.get_encoding(name, diff) else: self.cid2unicode = EncodingDB.get_encoding(literal_name(encoding)) self.unicode_map = None if 'ToUnicode' in spec: strm = stream_value(spec['ToUnicode']) self.unicode_map = FileUnicodeMap() CMapParser(self.unicode_map, StringIO(strm.get_data())).run() PDFFont.__init__(self, descriptor, widths) return def to_unichr(self, cid): if self.unicode_map: try: return self.unicode_map.get_unichr(cid) except KeyError: pass try: return self.cid2unicode[cid] except KeyError: raise PDFUnicodeNotDefined(None, cid) # PDFType1Font class PDFType1Font(PDFSimpleFont): def __init__(self, rsrcmgr, spec): try: self.basefont = literal_name(spec['BaseFont']) except KeyError: if STRICT: raise PDFFontError('BaseFont is missing') self.basefont = 'unknown' try: (descriptor, widths) = FontMetricsDB.get_metrics(self.basefont) except KeyError: descriptor = dict_value(spec.get('FontDescriptor', {})) firstchar = int_value(spec.get('FirstChar', 0)) lastchar = int_value(spec.get('LastChar', 255)) widths = list_value(spec.get('Widths', [0]*256)) widths = dict( (i+firstchar,w) for (i,w) in enumerate(widths) ) PDFSimpleFont.__init__(self, descriptor, widths, spec) if 'Encoding' not in spec and 'FontFile' in descriptor: # try to recover the missing encoding info from the font file. self.fontfile = stream_value(descriptor.get('FontFile')) length1 = int_value(self.fontfile['Length1']) data = self.fontfile.get_data()[:length1] parser = Type1FontHeaderParser(StringIO(data)) self.cid2unicode = parser.get_encoding() return def __repr__(self): return '<PDFType1Font: basefont=%r>' % self.basefont # PDFTrueTypeFont class PDFTrueTypeFont(PDFType1Font): def __repr__(self): return '<PDFTrueTypeFont: basefont=%r>' % self.basefont # PDFType3Font class PDFType3Font(PDFSimpleFont): def __init__(self, rsrcmgr, spec): firstchar = int_value(spec.get('FirstChar', 0)) lastchar = int_value(spec.get('LastChar', 0)) widths = list_value(spec.get('Widths', [0]*256)) widths = dict( (i+firstchar,w) for (i,w) in enumerate(widths)) if 'FontDescriptor' in spec: descriptor = dict_value(spec['FontDescriptor']) else: descriptor = {'Ascent':0, 'Descent':0, 'FontBBox':spec['FontBBox']} PDFSimpleFont.__init__(self, descriptor, widths, spec) self.matrix = tuple(list_value(spec.get('FontMatrix'))) (_,self.descent,_,self.ascent) = self.bbox (self.hscale,self.vscale) = apply_matrix_norm(self.matrix, (1,1)) return def __repr__(self): return '<PDFType3Font>' # PDFCIDFont class PDFCIDFont(PDFFont): def __init__(self, rsrcmgr, spec): try: self.basefont = literal_name(spec['BaseFont']) except KeyError: if STRICT: raise PDFFontError('BaseFont is missing') self.basefont = 'unknown' self.cidsysteminfo = dict_value(spec.get('CIDSystemInfo', {})) self.cidcoding = '%s-%s' % (self.cidsysteminfo.get('Registry', 'unknown'), self.cidsysteminfo.get('Ordering', 'unknown')) try: name = literal_name(spec['Encoding']) except KeyError: if STRICT: raise PDFFontError('Encoding is unspecified') name = 'unknown' try: self.cmap = CMapDB.get_cmap(name) except CMapDB.CMapNotFound, e: if STRICT: raise PDFFontError(e) self.cmap = CMap() try: descriptor = dict_value(spec['FontDescriptor']) except KeyError: if STRICT: raise PDFFontError('FontDescriptor is missing') descriptor = {} ttf = None if 'FontFile2' in descriptor: self.fontfile = stream_value(descriptor.get('FontFile2')) ttf = TrueTypeFont(self.basefont, StringIO(self.fontfile.get_data())) self.unicode_map = None if 'ToUnicode' in spec: strm = stream_value(spec['ToUnicode']) self.unicode_map = FileUnicodeMap() CMapParser(self.unicode_map, StringIO(strm.get_data())).run() elif self.cidcoding == 'Adobe-Identity': if ttf: try: self.unicode_map = ttf.create_unicode_map() except TrueTypeFont.CMapNotFound: pass else: try: self.unicode_map = CMapDB.get_unicode_map(self.cidcoding, self.cmap.is_vertical()) except CMapDB.CMapNotFound, e: pass self.vertical = self.cmap.is_vertical() if self.vertical: # writing mode: vertical widths = get_widths2(list_value(spec.get('W2', []))) self.disps = dict( (cid,(vx,vy)) for (cid,(_,(vx,vy))) in widths.iteritems() ) (vy,w) = spec.get('DW2', [880, -1000]) self.default_disp = (None,vy) widths = dict( (cid,w) for (cid,(w,_)) in widths.iteritems() ) default_width = w else: # writing mode: horizontal self.disps = {} self.default_disp = 0 widths = get_widths(list_value(spec.get('W', []))) default_width = spec.get('DW', 1000) PDFFont.__init__(self, descriptor, widths, default_width=default_width) return def __repr__(self): return '<PDFCIDFont: basefont=%r, cidcoding=%r>' % (self.basefont, self.cidcoding) def is_vertical(self): return self.vertical def is_multibyte(self): return True def decode(self, bytes): return self.cmap.decode(bytes) def char_disp(self, cid): "Returns an integer for horizontal fonts, a tuple for vertical fonts." return self.disps.get(cid, self.default_disp) def to_unichr(self, cid): try: if not self.unicode_map: raise KeyError(cid) return self.unicode_map.get_unichr(cid) except KeyError: raise PDFUnicodeNotDefined(self.cidcoding, cid) # main def main(argv): for fname in argv[1:]: fp = file(fname, 'rb') #font = TrueTypeFont(fname, fp) font = CFFFont(fname, fp) print font fp.close() return if __name__ == '__main__': sys.exit(main(sys.argv))
gpl-3.0
liorvh/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
124
2754
# Copyright (C) 2010 Apple Inc. All rights reserved. # Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Unit tests for watchlist.py.''' import unittest2 as unittest import watchlist class MockErrorHandler(object): def __init__(self, handle_style_error): self.turned_off_filtering = False self._handle_style_error = handle_style_error def turn_off_line_filtering(self): self.turned_off_filtering = True def __call__(self, line_number, category, confidence, message): self._handle_style_error(self, line_number, category, confidence, message) return True class WatchListTest(unittest.TestCase): def test_basic_error_message(self): def handle_style_error(mock_error_handler, line_number, category, confidence, message): mock_error_handler.had_error = True self.assertEqual(0, line_number) self.assertEqual('watchlist/general', category) error_handler = MockErrorHandler(handle_style_error) error_handler.had_error = False checker = watchlist.WatchListChecker('watchlist', error_handler) checker.check(['{"DEFINTIONS": {}}']) self.assertTrue(error_handler.had_error) self.assertTrue(error_handler.turned_off_filtering)
bsd-3-clause
ArtemTeleshev/raspberrypi-linux
tools/perf/scripts/python/sctop.py
1996
2102
# system call top # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): raw_syscalls__sys_enter(**locals()) def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
Conjuror/fxos-certsuite
mcts/web-platform-tests/tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi.py
139
17070
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This file provides the opening handshake processor for the WebSocket protocol (RFC 6455). Specification: http://tools.ietf.org/html/rfc6455 """ # Note: request.connection.write is used in this module, even though mod_python # document says that it should be used only in connection handlers. # Unfortunately, we have no other options. For example, request.write is not # suitable because it doesn't allow direct raw bytes writing. import base64 import logging import os import re from mod_pywebsocket import common from mod_pywebsocket.extensions import get_extension_processor from mod_pywebsocket.extensions import is_compression_extension from mod_pywebsocket.handshake._base import check_request_line from mod_pywebsocket.handshake._base import format_header from mod_pywebsocket.handshake._base import get_mandatory_header from mod_pywebsocket.handshake._base import HandshakeException from mod_pywebsocket.handshake._base import parse_token_list from mod_pywebsocket.handshake._base import validate_mandatory_header from mod_pywebsocket.handshake._base import validate_subprotocol from mod_pywebsocket.handshake._base import VersionException from mod_pywebsocket.stream import Stream from mod_pywebsocket.stream import StreamOptions from mod_pywebsocket import util # Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648 # disallows non-zero padding, so the character right before == must be any of # A, Q, g and w. _SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$') # Defining aliases for values used frequently. _VERSION_HYBI08 = common.VERSION_HYBI08 _VERSION_HYBI08_STRING = str(_VERSION_HYBI08) _VERSION_LATEST = common.VERSION_HYBI_LATEST _VERSION_LATEST_STRING = str(_VERSION_LATEST) _SUPPORTED_VERSIONS = [ _VERSION_LATEST, _VERSION_HYBI08, ] def compute_accept(key): """Computes value for the Sec-WebSocket-Accept header from value of the Sec-WebSocket-Key header. """ accept_binary = util.sha1_hash( key + common.WEBSOCKET_ACCEPT_UUID).digest() accept = base64.b64encode(accept_binary) return (accept, accept_binary) class Handshaker(object): """Opening handshake processor for the WebSocket protocol (RFC 6455).""" def __init__(self, request, dispatcher): """Construct an instance. Args: request: mod_python request. dispatcher: Dispatcher (dispatch.Dispatcher). Handshaker will add attributes such as ws_resource during handshake. """ self._logger = util.get_class_logger(self) self._request = request self._dispatcher = dispatcher def _validate_connection_header(self): connection = get_mandatory_header( self._request, common.CONNECTION_HEADER) try: connection_tokens = parse_token_list(connection) except HandshakeException, e: raise HandshakeException( 'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e)) connection_is_valid = False for token in connection_tokens: if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower(): connection_is_valid = True break if not connection_is_valid: raise HandshakeException( '%s header doesn\'t contain "%s"' % (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE)) def do_handshake(self): self._request.ws_close_code = None self._request.ws_close_reason = None # Parsing. check_request_line(self._request) validate_mandatory_header( self._request, common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE) self._validate_connection_header() self._request.ws_resource = self._request.uri unused_host = get_mandatory_header(self._request, common.HOST_HEADER) self._request.ws_version = self._check_version() # This handshake must be based on latest hybi. We are responsible to # fallback to HTTP on handshake failure as latest hybi handshake # specifies. try: self._get_origin() self._set_protocol() self._parse_extensions() # Key validation, response generation. key = self._get_key() (accept, accept_binary) = compute_accept(key) self._logger.debug( '%s: %r (%s)', common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, util.hexify(accept_binary)) self._logger.debug('Protocol version is RFC 6455') # Setup extension processors. processors = [] if self._request.ws_requested_extensions is not None: for extension_request in self._request.ws_requested_extensions: processor = get_extension_processor(extension_request) # Unknown extension requests are just ignored. if processor is not None: processors.append(processor) self._request.ws_extension_processors = processors # List of extra headers. The extra handshake handler may add header # data as name/value pairs to this list and pywebsocket appends # them to the WebSocket handshake. self._request.extra_headers = [] # Extra handshake handler may modify/remove processors. self._dispatcher.do_extra_handshake(self._request) processors = filter(lambda processor: processor is not None, self._request.ws_extension_processors) # Ask each processor if there are extensions on the request which # cannot co-exist. When processor decided other processors cannot # co-exist with it, the processor marks them (or itself) as # "inactive". The first extension processor has the right to # make the final call. for processor in reversed(processors): if processor.is_active(): processor.check_consistency_with_other_processors( processors) processors = filter(lambda processor: processor.is_active(), processors) accepted_extensions = [] # We need to take into account of mux extension here. # If mux extension exists: # - Remove processors of extensions for logical channel, # which are processors located before the mux processor # - Pass extension requests for logical channel to mux processor # - Attach the mux processor to the request. It will be referred # by dispatcher to see whether the dispatcher should use mux # handler or not. mux_index = -1 for i, processor in enumerate(processors): if processor.name() == common.MUX_EXTENSION: mux_index = i break if mux_index >= 0: logical_channel_extensions = [] for processor in processors[:mux_index]: logical_channel_extensions.append(processor.request()) processor.set_active(False) self._request.mux_processor = processors[mux_index] self._request.mux_processor.set_extensions( logical_channel_extensions) processors = filter(lambda processor: processor.is_active(), processors) stream_options = StreamOptions() for index, processor in enumerate(processors): if not processor.is_active(): continue extension_response = processor.get_extension_response() if extension_response is None: # Rejected. continue accepted_extensions.append(extension_response) processor.setup_stream_options(stream_options) if not is_compression_extension(processor.name()): continue # Inactivate all of the following compression extensions. for j in xrange(index + 1, len(processors)): if is_compression_extension(processors[j].name()): processors[j].set_active(False) if len(accepted_extensions) > 0: self._request.ws_extensions = accepted_extensions self._logger.debug( 'Extensions accepted: %r', map(common.ExtensionParameter.name, accepted_extensions)) else: self._request.ws_extensions = None self._request.ws_stream = self._create_stream(stream_options) if self._request.ws_requested_protocols is not None: if self._request.ws_protocol is None: raise HandshakeException( 'do_extra_handshake must choose one subprotocol from ' 'ws_requested_protocols and set it to ws_protocol') validate_subprotocol(self._request.ws_protocol) self._logger.debug( 'Subprotocol accepted: %r', self._request.ws_protocol) else: if self._request.ws_protocol is not None: raise HandshakeException( 'ws_protocol must be None when the client didn\'t ' 'request any subprotocol') self._send_handshake(accept) except HandshakeException, e: if not e.status: # Fallback to 400 bad request by default. e.status = common.HTTP_STATUS_BAD_REQUEST raise e def _get_origin(self): if self._request.ws_version is _VERSION_HYBI08: origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER else: origin_header = common.ORIGIN_HEADER origin = self._request.headers_in.get(origin_header) if origin is None: self._logger.debug('Client request does not have origin header') self._request.ws_origin = origin def _check_version(self): version = get_mandatory_header(self._request, common.SEC_WEBSOCKET_VERSION_HEADER) if version == _VERSION_HYBI08_STRING: return _VERSION_HYBI08 if version == _VERSION_LATEST_STRING: return _VERSION_LATEST if version.find(',') >= 0: raise HandshakeException( 'Multiple versions (%r) are not allowed for header %s' % (version, common.SEC_WEBSOCKET_VERSION_HEADER), status=common.HTTP_STATUS_BAD_REQUEST) raise VersionException( 'Unsupported version %r for header %s' % (version, common.SEC_WEBSOCKET_VERSION_HEADER), supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS))) def _set_protocol(self): self._request.ws_protocol = None protocol_header = self._request.headers_in.get( common.SEC_WEBSOCKET_PROTOCOL_HEADER) if protocol_header is None: self._request.ws_requested_protocols = None return self._request.ws_requested_protocols = parse_token_list( protocol_header) self._logger.debug('Subprotocols requested: %r', self._request.ws_requested_protocols) def _parse_extensions(self): extensions_header = self._request.headers_in.get( common.SEC_WEBSOCKET_EXTENSIONS_HEADER) if not extensions_header: self._request.ws_requested_extensions = None return if self._request.ws_version is common.VERSION_HYBI08: allow_quoted_string=False else: allow_quoted_string=True try: self._request.ws_requested_extensions = common.parse_extensions( extensions_header, allow_quoted_string=allow_quoted_string) except common.ExtensionParsingException, e: raise HandshakeException( 'Failed to parse Sec-WebSocket-Extensions header: %r' % e) self._logger.debug( 'Extensions requested: %r', map(common.ExtensionParameter.name, self._request.ws_requested_extensions)) def _validate_key(self, key): if key.find(',') >= 0: raise HandshakeException('Request has multiple %s header lines or ' 'contains illegal character \',\': %r' % (common.SEC_WEBSOCKET_KEY_HEADER, key)) # Validate key_is_valid = False try: # Validate key by quick regex match before parsing by base64 # module. Because base64 module skips invalid characters, we have # to do this in advance to make this server strictly reject illegal # keys. if _SEC_WEBSOCKET_KEY_REGEX.match(key): decoded_key = base64.b64decode(key) if len(decoded_key) == 16: key_is_valid = True except TypeError, e: pass if not key_is_valid: raise HandshakeException( 'Illegal value for header %s: %r' % (common.SEC_WEBSOCKET_KEY_HEADER, key)) return decoded_key def _get_key(self): key = get_mandatory_header( self._request, common.SEC_WEBSOCKET_KEY_HEADER) decoded_key = self._validate_key(key) self._logger.debug( '%s: %r (%s)', common.SEC_WEBSOCKET_KEY_HEADER, key, util.hexify(decoded_key)) return key def _create_stream(self, stream_options): return Stream(self._request, stream_options) def _create_handshake_response(self, accept): response = [] response.append('HTTP/1.1 101 Switching Protocols\r\n') # WebSocket headers response.append(format_header( common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE)) response.append(format_header( common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE)) response.append(format_header( common.SEC_WEBSOCKET_ACCEPT_HEADER, accept)) if self._request.ws_protocol is not None: response.append(format_header( common.SEC_WEBSOCKET_PROTOCOL_HEADER, self._request.ws_protocol)) if (self._request.ws_extensions is not None and len(self._request.ws_extensions) != 0): response.append(format_header( common.SEC_WEBSOCKET_EXTENSIONS_HEADER, common.format_extensions(self._request.ws_extensions))) # Headers not specific for WebSocket for name, value in self._request.extra_headers: response.append(format_header(name, value)) response.append('\r\n') return ''.join(response) def _send_handshake(self, accept): raw_response = self._create_handshake_response(accept) self._request.connection.write(raw_response) self._logger.debug('Sent server\'s opening handshake: %r', raw_response) # vi:sts=4 sw=4 et
mpl-2.0
openstack/nova
nova/virt/storage_users.py
9
3738
# Copyright 2012 Michael Still and Canonical Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from nova import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF TWENTY_FOUR_HOURS = 3600 * 24 # NOTE(morganfainberg): Due to circular import dependencies, the use of the # CONF.instances_path needs to be wrapped so that it can be resolved at the # appropriate time. Because compute.manager imports this file, we end up in # a rather ugly dependency loop without moving this into a wrapped function. # This issue mostly stems from the use of a decorator for the lock # synchronize and the implications of how decorators wrap the wrapped function # or method. If this needs to be used outside of compute.manager, it should # be refactored to eliminate this circular dependency loop. # config option import is avoided here since it is # explicitly imported from compute.manager and may cause issues with # defining options after config has been processed with the # wrapped-function style used here. def register_storage_use(storage_path, hostname): """Identify the id of this instance storage.""" LOCK_PATH = os.path.join(CONF.instances_path, 'locks') @utils.synchronized('storage-registry-lock', external=True, lock_path=LOCK_PATH) def do_register_storage_use(storage_path, hostname): # NOTE(mikal): this is required to determine if the instance storage is # shared, which is something that the image cache manager needs to # know. I can imagine other uses as well though. d = {} id_path = os.path.join(storage_path, 'compute_nodes') if os.path.exists(id_path): with open(id_path) as f: try: d = jsonutils.loads(f.read()) except ValueError: LOG.warning("Cannot decode JSON from %(id_path)s", {"id_path": id_path}) d[hostname] = time.time() with open(id_path, 'w') as f: f.write(jsonutils.dumps(d)) return do_register_storage_use(storage_path, hostname) def get_storage_users(storage_path): """Get a list of all the users of this storage path.""" # See comments above method register_storage_use LOCK_PATH = os.path.join(CONF.instances_path, 'locks') @utils.synchronized('storage-registry-lock', external=True, lock_path=LOCK_PATH) def do_get_storage_users(storage_path): d = {} id_path = os.path.join(storage_path, 'compute_nodes') if os.path.exists(id_path): with open(id_path) as f: try: d = jsonutils.loads(f.read()) except ValueError: LOG.warning("Cannot decode JSON from %(id_path)s", {"id_path": id_path}) recent_users = [] for node in d: if time.time() - d[node] < TWENTY_FOUR_HOURS: recent_users.append(node) return recent_users return do_get_storage_users(storage_path)
apache-2.0
google-code/android-scripting
python/src/Demo/pysvr/pysvr.py
51
3430
#! /usr/bin/env python """A multi-threaded telnet-like server that gives a Python prompt. This is really a prototype for the same thing in C. Usage: pysvr.py [port] For security reasons, it only accepts requests from the current host. This can still be insecure, but restricts violations from people who can log in on your machine. Use with caution! """ import sys, os, string, getopt, thread, socket, traceback PORT = 4000 # Default port def main(): try: opts, args = getopt.getopt(sys.argv[1:], "") if len(args) > 1: raise getopt.error, "Too many arguments." except getopt.error, msg: usage(msg) for o, a in opts: pass if args: try: port = string.atoi(args[0]) except ValueError, msg: usage(msg) else: port = PORT main_thread(port) def usage(msg=None): sys.stdout = sys.stderr if msg: print msg print "\n", __doc__, sys.exit(2) def main_thread(port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", port)) sock.listen(5) print "Listening on port", port, "..." while 1: (conn, addr) = sock.accept() if addr[0] != conn.getsockname()[0]: conn.close() print "Refusing connection from non-local host", addr[0], "." continue thread.start_new_thread(service_thread, (conn, addr)) del conn, addr def service_thread(conn, addr): (caddr, cport) = addr print "Thread %s has connection from %s.\n" % (str(thread.get_ident()), caddr), stdin = conn.makefile("r") stdout = conn.makefile("w", 0) run_interpreter(stdin, stdout) print "Thread %s is done.\n" % str(thread.get_ident()), def run_interpreter(stdin, stdout): globals = {} try: str(sys.ps1) except: sys.ps1 = ">>> " source = "" while 1: stdout.write(sys.ps1) line = stdin.readline() if line[:2] == '\377\354': line = "" if not line and not source: break if line[-2:] == '\r\n': line = line[:-2] + '\n' source = source + line try: code = compile_command(source) except SyntaxError, err: source = "" traceback.print_exception(SyntaxError, err, None, file=stdout) continue if not code: continue source = "" try: run_command(code, stdin, stdout, globals) except SystemExit, how: if how: try: how = str(how) except: how = "" stdout.write("Exit %s\n" % how) break stdout.write("\nGoodbye.\n") def run_command(code, stdin, stdout, globals): save = sys.stdin, sys.stdout, sys.stderr try: sys.stdout = sys.stderr = stdout sys.stdin = stdin try: exec code in globals except SystemExit, how: raise SystemExit, how, sys.exc_info()[2] except: type, value, tb = sys.exc_info() if tb: tb = tb.tb_next traceback.print_exception(type, value, tb) del tb finally: sys.stdin, sys.stdout, sys.stderr = save from code import compile_command main()
apache-2.0
aparrish/cashclones
generate.py
1
10326
import rdflib from rdflib import URIRef from rdflib.namespace import RDFS from jinja2 import Template import random import urllib import json import time import re from altuniverse import alternate_universe def get_random_class(g): return random.choice(list(g.subjects(RDFS.subClassOf, None))) def get_label_string(g, thing): return g.preferredLabel(thing, lang="en")[0][1] def get_property(subj, prop): query = """ SELECT ?prop WHERE { <%s> %s ?prop } """ % (subj, prop) qstr = urllib.urlencode( {'query': query, 'output': 'json', 'default-graph-uri': 'http://dbpedia.org'}) resp = urllib.urlopen("http://dbpedia.org/sparql?" + qstr) obj = json.loads(resp.read()) if len(obj['results']['bindings']) > 0: return obj['results']['bindings'][0]['prop']['value'] else: return None def schema_convert(url, val): from dateutil.parser import parse months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] if url == "http://www.w3.org/2001/XMLSchema#date": val = re.sub("\+.*$", "", val) dt = parse(val) retval = "%s %d, %d" % (months[dt.month-1], dt.day, dt.year) elif url == "http://www.w3.org/2001/XMLSchema#gYear": val = re.sub("\+.*$", "", val) dt = parse(val) retval = str(dt.year) elif url == "http://www.w3.org/2001/XMLSchema#gMonthDay": val = re.sub("\+.*$", "", val) dt = parse(val) retval = "%s %d" % (months[dt.month-1], dt.day) else: retval = val return retval def get_random_property(subj): filter_terms = ["ID","id","Id","image","Image","gray","dorlands","wiki", "lat","long","color","info","Info","homepage","map","Map", "updated","Updated","logo","Logo","pushpin","label","Label", "photo","Photo"] query = """ SELECT ?property ?propertyLabel ?propertyVal ?propertyValLabel WHERE { <%s> ?property ?propertyVal. ?property rdfs:label ?propertyLabel. FILTER(lang(?propertyLabel) = "en"). OPTIONAL { ?propertyVal rdfs:label ?propertyValLabel. FILTER(lang(?propertyValLabel) = "en"). } FILTER(regex(?property, "..")). FILTER(!regex(?property, "(%s)")). FILTER(?property != <http://dbpedia.org/ontology/wikiPageRevisionID>). FILTER(?property != <http://dbpedia.org/ontology/wikiPageID>). FILTER(?property != <http://dbpedia.org/ontology/abstract>). FILTER(?property != <http://dbpedia.org/ontology/wikiPageExternalLink>). FILTER(?property != <http://dbpedia.org/ontology/filename>). FILTER(?property != <http://dbpedia.org/property/imageSize>). FILTER(?property != <http://dbpedia.org/property/imagesize>). FILTER(?property != <http://dbpedia.org/property/logoImage>). FILTER(?property != <http://dbpedia.org/property/webpage>). FILTER(?property != <http://dbpedia.org/property/name>). FILTER(?property != <http://dbpedia.org/property/image>). FILTER(?property != <http://dbpedia.org/ontology/thumbnail>). FILTER(?property != <http://dbpedia.org/property/graypage>). FILTER(?property != <http://dbpedia.org/ontology/grayPage>). FILTER(?property != <http://dbpedia.org/property/imageCaption>). FILTER(?property != <http://dbpedia.org/property/id>). FILTER(?property != <http://dbpedia.org/property/photo>). FILTER(?property != <http://dbpedia.org/property/caption>). FILTER(?property != <http://dbpedia.org/ontology/graySubject>). FILTER(?property != <http://dbpedia.org/property/graysubject>). FILTER(?property != <http://dbpedia.org/property/website>). FILTER(?property != <http://dbpedia.org/property/imageName>). FILTER(?property != <http://dbpedia.org/ontology/dorlandsSuffix>). FILTER(?property != <http://dbpedia.org/property/dorlandssuf>). FILTER(?property != <http://dbpedia.org/property/signature>). FILTER(?property != <http://dbpedia.org/ontology/viafId>). FILTER(?property != <http://dbpedia.org/property/pixels>). FILTER(?property != <http://dbpedia.org/property/mapCaption>). FILTER(?property != <http://dbpedia.org/property/picture>). FILTER(?property != <http://dbpedia.org/property/imageFlag>). FILTER(?property != <http://dbpedia.org/property/neurolexid>). FILTER(?property != <http://dbpedia.org/property/gnd>). FILTER(?property != <http://dbpedia.org/ontology/dorlandsPrefix>). FILTER(?property != <http://dbpedia.org/property/dorlandspre>). FILTER(?property != <http://dbpedia.org/property/imageWidth>). FILTER(?property != <http://dbpedia.org/property/verifiedrevid>). } """ % (subj, '|'.join(filter_terms)) qstr = urllib.urlencode({'query': query, 'output': 'json', 'default-graph-uri': 'http://dbpedia.org'}) resp = urllib.urlopen("http://dbpedia.org/sparql?" + qstr) obj = json.loads(resp.read()) properties = dict() for prop in obj['results']['bindings']: purl = prop['property']['value'] plabel = prop['propertyLabel']['value'] if 'propertyValLabel' in prop: pval = prop['propertyValLabel']['value'] else: pval = schema_convert(prop['propertyVal'].get('datatype', ''), prop['propertyVal']['value']) if pval.startswith("List of"): continue if plabel not in properties: properties[(purl, plabel)] = set() properties[(purl, plabel)].add(pval) chosen = random.choice(properties.items()) return {'url': chosen[0][0], 'label': chosen[0][1], 'value': random.choice(list(chosen[1]))} def get_random_neighboring_property(subj, prop): query = """ SELECT DISTINCT ?t ?tlabel WHERE { <%s> <http://purl.org/dc/terms/subject> ?val. ?s ?prop ?val. ?s <%s> ?t. OPTIONAL { ?t rdfs:label ?tlabel. FILTER(lang(?tlabel) = "en"). } } limit 1000""" % (subj, prop) qstr = urllib.urlencode({'query': query, 'output': 'json', 'default-graph-uri': 'http://dbpedia.org'}) resp = urllib.urlopen("http://dbpedia.org/sparql?" + qstr) obj = json.loads(resp.read()) alternates = list() for prop in obj['results']['bindings']: if 'tlabel' in prop: if prop['tlabel']['value'].startswith("List of"): continue alternates.append(prop['tlabel']['value']) else: val = schema_convert(prop['t'].get('datatype', ''), prop['t']['value']) alternates.append(val) return random.choice(alternates) cache = {} def get_subject_count(class_): if class_ in cache: return cache[class_] query = """ SELECT count(*) WHERE { ?subject rdf:type <%s>. ?subject rdfs:label ?label. ?subject foaf:name ?name. FILTER(lang(?label) = "en"). } """ % class_ qstr = urllib.urlencode({'query': query, 'output': 'json', 'default-graph-uri': 'http://dbpedia.org'}) resp = urllib.urlopen("http://dbpedia.org/sparql?" + qstr) obj = json.loads(resp.read()) result = int(obj['results']['bindings'][0]['callret-0']['value']) cache[class_] = result return result def get_random_subject(class_, count): query = """ SELECT * WHERE { ?subject rdf:type <%s>. ?subject rdfs:label ?label. ?subject foaf:name ?name. FILTER(lang(?label) = "en"). FILTER(!STRSTARTS(?label, "List of")). FILTER EXISTS {?subject foaf:depiction ?url} } offset %d limit 1""" % (class_, random.randrange(count)) qstr = urllib.urlencode({'query': query, 'output': 'json', 'default-graph-uri': 'http://dbpedia.org'}) resp = urllib.urlopen("http://dbpedia.org/sparql?" + qstr) obj = json.loads(resp.read()) info = dict([(k, v['value']) for k, v \ in obj['results']['bindings'][0].iteritems() \ if not(k.startswith("List of"))]) return info def get_random_resource(g): while True: class_ = get_random_class(g) class_str = get_label_string(g, class_) count = get_subject_count(class_) if count > 0: try: return get_random_subject(class_, count) except IndexError as e: continue def get_subj_from_wikilink(href): query = """ SELECT * WHERE { ?subject rdfs:label ?label. ?subject foaf:isPrimaryTopicOf <%s>. FILTER(lang(?label) = "en"). } """ % href qstr = urllib.urlencode({'query': query, 'output': 'json', 'default-graph-uri': 'http://dbpedia.org'}) resp = urllib.urlopen("http://dbpedia.org/sparql?" + qstr) obj = json.loads(resp.read()) try: info = dict([(k, v['value']) for k, v \ in obj['results']['bindings'][0].iteritems() \ if not(k.startswith("List of"))]) except IndexError: return None return info def generate(subj=None): g = rdflib.Graph() g.parse("dbpedia_3.9.owl") while True: if subj is None: subj = get_random_resource(g) try: prop_dict = get_random_property(subj['subject']) alt_prop = get_random_neighboring_property(subj['subject'], prop_dict['url']).strip() except IndexError as e: continue real_prop = prop_dict['value'].strip() if real_prop.startswith('http') or alt_prop.startswith('http'): continue if real_prop.lower() == alt_prop.lower(): continue output = alternate_universe(subj['label'], prop_dict['label'], real_prop, alt_prop) output = output.replace("\n", "") if len(output) <= 115: return output + " " + get_property(subj['subject'], 'foaf:isPrimaryTopicOf') elif len(output) <= 140: return output if __name__ == '__main__': pool = [s.strip() for s in open("pool.txt").readlines()] while True: print "---" if random.randrange(4) > 0: subj = get_subj_from_wikilink('http://en.wikipedia.org' + random.choice(pool)) print generate(subj) else: print generate() time.sleep(1)
mit
PerilousApricot/bigcouch
couchjs/scons/scons-local-2.0.1/SCons/Tool/wix.py
61
3563
"""SCons.Tool.wix Tool-specific initialization for wix, the Windows Installer XML Tool. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/wix.py 5134 2010/08/16 23:02:40 bdeegan" import SCons.Builder import SCons.Action import os def generate(env): """Add Builders and construction variables for WiX to an Environment.""" if not exists(env): return env['WIXCANDLEFLAGS'] = ['-nologo'] env['WIXCANDLEINCLUDE'] = [] env['WIXCANDLECOM'] = '$WIXCANDLE $WIXCANDLEFLAGS -I $WIXCANDLEINCLUDE -o ${TARGET} ${SOURCE}' env['WIXLIGHTFLAGS'].append( '-nologo' ) env['WIXLIGHTCOM'] = "$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}" object_builder = SCons.Builder.Builder( action = '$WIXCANDLECOM', suffix = '.wxiobj', src_suffix = '.wxs') linker_builder = SCons.Builder.Builder( action = '$WIXLIGHTCOM', src_suffix = '.wxiobj', src_builder = object_builder) env['BUILDERS']['WiX'] = linker_builder def exists(env): env['WIXCANDLE'] = 'candle.exe' env['WIXLIGHT'] = 'light.exe' # try to find the candle.exe and light.exe tools and # add the install directory to light libpath. #for path in os.environ['PATH'].split(os.pathsep): for path in os.environ['PATH'].split(os.pathsep): if not path: continue # workaround for some weird python win32 bug. if path[0] == '"' and path[-1:]=='"': path = path[1:-1] # normalize the path path = os.path.normpath(path) # search for the tools in the PATH environment variable try: if env['WIXCANDLE'] in os.listdir(path) and\ env['WIXLIGHT'] in os.listdir(path): env.PrependENVPath('PATH', path) env['WIXLIGHTFLAGS'] = [ os.path.join( path, 'wixui.wixlib' ), '-loc', os.path.join( path, 'WixUI_en-us.wxl' ) ] return 1 except OSError: pass # ignore this, could be a stale PATH entry. return None # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
lizardsystem/lizard-wbconfiguration
lizard_wbconfiguration/migrations/0003_auto__del_field_dbfconfiguration_filepath__add_field_dbfconfiguration_.py
1
35057
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'DBFConfiguration.filepath' db.delete_column('lizard_wbconfiguration_dbfconfiguration', 'filepath') # Adding field 'DBFConfiguration.owner' db.add_column('lizard_wbconfiguration_dbfconfiguration', 'owner', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True), keep_default=False) # Adding field 'DBFConfiguration.save_to' db.add_column('lizard_wbconfiguration_dbfconfiguration', 'save_to', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True), keep_default=False) def backwards(self, orm): # Adding field 'DBFConfiguration.filepath' db.add_column('lizard_wbconfiguration_dbfconfiguration', 'filepath', self.gf('django.db.models.fields.files.FileField')(default=datetime.date(2011, 12, 8), max_length=100), keep_default=False) # Deleting field 'DBFConfiguration.owner' db.delete_column('lizard_wbconfiguration_dbfconfiguration', 'owner') # Deleting field 'DBFConfiguration.save_to' db.delete_column('lizard_wbconfiguration_dbfconfiguration', 'save_to') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'lizard_area.area': { 'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']}, 'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}), 'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}) }, 'lizard_area.areacode': { 'Meta': {'object_name': 'AreaCode'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_area.areatype': { 'Meta': {'object_name': 'AreaType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_area.basin': { 'Meta': {'object_name': 'Basin'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_area.communique': { 'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']}, 'area_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.AreaType']", 'null': 'True', 'blank': 'True'}), 'basin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Basin']", 'null': 'True', 'blank': 'True'}), 'code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.AreaCode']", 'null': 'True', 'blank': 'True'}), 'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}), 'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Municipality']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'province': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Province']", 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Status']", 'null': 'True', 'blank': 'True'}), 'watermanagementarea': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.WaterManagementArea']", 'null': 'True', 'blank': 'True'}) }, 'lizard_area.dataadministrator': { 'Meta': {'object_name': 'DataAdministrator'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_area.municipality': { 'Meta': {'object_name': 'Municipality'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_area.province': { 'Meta': {'object_name': 'Province'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_area.status': { 'Meta': {'object_name': 'Status'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_area.watermanagementarea': { 'Meta': {'object_name': 'WaterManagementArea'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'lizard_fewsnorm.fewsnormsource': { 'Meta': {'object_name': 'FewsNormSource'}, 'database_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}) }, 'lizard_fewsnorm.geolocationcache': { 'Meta': {'ordering': "('ident', 'name')", 'object_name': 'GeoLocationCache', '_ormbases': ['lizard_geo.GeoObject']}, 'fews_norm_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.FewsNormSource']"}), 'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}), 'icon': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'module': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_fewsnorm.ModuleCache']", 'null': 'True', 'through': "orm['lizard_fewsnorm.TimeSeriesCache']", 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_fewsnorm.ParameterCache']", 'null': 'True', 'through': "orm['lizard_fewsnorm.TimeSeriesCache']", 'blank': 'True'}), 'shortname': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'timestep': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_fewsnorm.TimeStepCache']", 'null': 'True', 'through': "orm['lizard_fewsnorm.TimeSeriesCache']", 'blank': 'True'}), 'tooltip': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'lizard_fewsnorm.modulecache': { 'Meta': {'ordering': "('ident',)", 'object_name': 'ModuleCache'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'lizard_fewsnorm.parametercache': { 'Meta': {'ordering': "('ident',)", 'object_name': 'ParameterCache'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'lizard_fewsnorm.timeseriescache': { 'Meta': {'object_name': 'TimeSeriesCache'}, 'geolocationcache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.GeoLocationCache']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modulecache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ModuleCache']"}), 'parametercache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ParameterCache']"}), 'timestepcache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.TimeStepCache']"}) }, 'lizard_fewsnorm.timestepcache': { 'Meta': {'ordering': "('ident',)", 'object_name': 'TimeStepCache'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'lizard_geo.geoobject': { 'Meta': {'object_name': 'GeoObject'}, 'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'}) }, 'lizard_geo.geoobjectgroup': { 'Meta': {'object_name': 'GeoObjectGroup'}, 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'lizard_wbconfiguration.areaconfiguration': { 'Meta': {'object_name': 'AreaConfiguration'}, 'area': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Area']", 'unique': 'True'}), 'bottom_height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'concentr_chloride_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'concentr_chloride_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'herfstp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'incr_concentr_nitrogyn_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'incr_concentr_nitrogyn_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'incr_concentr_phosphate_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'incr_concentr_phosphate_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'ini_con_cl': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'init_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'kwel': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'kwel_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'lentep': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'marge_bov': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'marge_ond': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'max_intake': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'max_outtake': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_nitrogyn_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_nitrogyn_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_phopshate_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_phosphate_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'nutc_inc_1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'nutc_inc_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'nutc_inc_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'nutc_inc_4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'nutc_min_1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'nutc_min_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'nutc_min_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'nutc_min_4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'peilh_issp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sp_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'start_dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'start_hp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'start_lp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'start_wp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'start_zp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'surface': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '1', 'blank': 'True'}), 'ts_concentr_chloride_1': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_concentr_chloride_1'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_concentr_chloride_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_concentr_chloride_2'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_evaporation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_evaporation'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_kwel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_kwel'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_precipitation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_precipitation'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_sp': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sp'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_water_level': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_water_level'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_wegz': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_wegz'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'wegz': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'wegz_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'winterp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'zomerp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}) }, 'lizard_wbconfiguration.areafield': { 'Meta': {'object_name': 'AreaField'}, 'app_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'primary_key': 'True'}), 'field_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'model_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'lizard_wbconfiguration.areagridconfiguration': { 'Meta': {'object_name': 'AreaGridConfiguration'}, 'app_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'lizard_wbconfiguration.areagridfieldconfiguration': { 'Meta': {'object_name': 'AreaGridFieldConfiguration'}, 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'field_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaField']", 'max_length': '128'}), 'field_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'grid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaGridConfiguration']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sequence': ('django.db.models.fields.IntegerField', [], {}), 'ts_parameter': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'lizard_wbconfiguration.bucket': { 'Meta': {'ordering': "['id']", 'object_name': 'Bucket'}, 'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaConfiguration']"}), 'bottom_crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bottom_drainage_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bottom_indraft_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bottom_init_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bottom_max_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bottom_min_crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bottom_min_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bottom_porosity': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'bucket_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.BucketsType']", 'null': 'True', 'blank': 'True'}), 'code': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'concentr_chloride_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'concentr_chloride_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'drainage_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'incr_concentr_nitrogen_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'incr_concentr_nitrogen_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'incr_concentr_phosphate_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'incr_concentr_phosphate_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'indraft_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'init_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'is_computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'kwelwegz': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'kwelwegz_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'label_drainaige_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'label_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'man_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_nitrogen_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_nitrogen_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_phosphate_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_phosphate_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'porosity': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'replace_impact_by_nutricalc': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'surface': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '1', 'blank': 'True'}), 'ts_drainageindraft': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_drainageindraf_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_flowoff': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_flowoff_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_kwelwegz': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_kwelwegz_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}), 'ts_referenceoverflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_referenceoverflow_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}) }, 'lizard_wbconfiguration.bucketstype': { 'Meta': {'object_name': 'BucketsType'}, 'bucket_type': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'lizard_wbconfiguration.dbfconfiguration': { 'Meta': {'object_name': 'DBFConfiguration'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'save_to': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}) }, 'lizard_wbconfiguration.parametermapping': { 'Meta': {'object_name': 'ParameterMapping'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident_wbconfiguration': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'parametercache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ParameterCache']"}) }, 'lizard_wbconfiguration.structure': { 'Meta': {'ordering': "['id']", 'object_name': 'Structure'}, 'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaConfiguration']"}), 'code': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'concentr_chloride': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'deb_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'deb_wint': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'deb_zomer': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_out': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}), 'incr_concentr_nitrogen': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'incr_concentr_phosphate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'is_computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'min_concentr_nitrogen': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'min_concentr_phosphate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'ts_debiet': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_debiet'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}) }, 'lizard_wbconfiguration.wbconfigurationdbfmapping': { 'Meta': {'ordering': "['id']", 'object_name': 'WBConfigurationDBFMapping'}, 'dbffield_decimals': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'dbffield_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'dbffield_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'dbffield_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'wbfield_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) } } complete_apps = ['lizard_wbconfiguration']
gpl-3.0
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/boto/gs/cors.py
153
7717
# Copyright 2012 Google Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import types from boto.gs.user import User from boto.exception import InvalidCorsError from xml.sax import handler # Relevant tags for the CORS XML document. CORS_CONFIG = 'CorsConfig' CORS = 'Cors' ORIGINS = 'Origins' ORIGIN = 'Origin' METHODS = 'Methods' METHOD = 'Method' HEADERS = 'ResponseHeaders' HEADER = 'ResponseHeader' MAXAGESEC = 'MaxAgeSec' class Cors(handler.ContentHandler): """Encapsulates the CORS configuration XML document""" def __init__(self): # List of CORS elements found within a CorsConfig element. self.cors = [] # List of collections (e.g. Methods, ResponseHeaders, Origins) # found within a CORS element. We use a list of lists here # instead of a dictionary because the collections need to be # preserved in the order in which they appear in the input XML # document (and Python dictionary keys are inherently unordered). # The elements on this list are two element tuples of the form # (collection name, [list of collection contents]). self.collections = [] # Lists of elements within a collection. Again a list is needed to # preserve ordering but also because the same element may appear # multiple times within a collection. self.elements = [] # Dictionary mapping supported collection names to element types # which may be contained within each. self.legal_collections = { ORIGINS : [ORIGIN], METHODS : [METHOD], HEADERS : [HEADER], MAXAGESEC: [] } # List of supported element types within any collection, used for # checking validadity of a parsed element name. self.legal_elements = [ORIGIN, METHOD, HEADER] self.parse_level = 0 self.collection = None self.element = None def validateParseLevel(self, tag, level): """Verify parse level for a given tag.""" if self.parse_level != level: raise InvalidCorsError('Invalid tag %s at parse level %d: ' % (tag, self.parse_level)) def startElement(self, name, attrs, connection): """SAX XML logic for parsing new element found.""" if name == CORS_CONFIG: self.validateParseLevel(name, 0) self.parse_level += 1; elif name == CORS: self.validateParseLevel(name, 1) self.parse_level += 1; elif name in self.legal_collections: self.validateParseLevel(name, 2) self.parse_level += 1; self.collection = name elif name in self.legal_elements: self.validateParseLevel(name, 3) # Make sure this tag is found inside a collection tag. if self.collection is None: raise InvalidCorsError('Tag %s found outside collection' % name) # Make sure this tag is allowed for the current collection tag. if name not in self.legal_collections[self.collection]: raise InvalidCorsError('Tag %s not allowed in %s collection' % (name, self.collection)) self.element = name else: raise InvalidCorsError('Unsupported tag ' + name) def endElement(self, name, value, connection): """SAX XML logic for parsing new element found.""" if name == CORS_CONFIG: self.validateParseLevel(name, 1) self.parse_level -= 1; elif name == CORS: self.validateParseLevel(name, 2) self.parse_level -= 1; # Terminating a CORS element, save any collections we found # and re-initialize collections list. self.cors.append(self.collections) self.collections = [] elif name in self.legal_collections: self.validateParseLevel(name, 3) if name != self.collection: raise InvalidCorsError('Mismatched start and end tags (%s/%s)' % (self.collection, name)) self.parse_level -= 1; if not self.legal_collections[name]: # If this collection doesn't contain any sub-elements, store # a tuple of name and this tag's element value. self.collections.append((name, value.strip())) else: # Otherwise, we're terminating a collection of sub-elements, # so store a tuple of name and list of contained elements. self.collections.append((name, self.elements)) self.elements = [] self.collection = None elif name in self.legal_elements: self.validateParseLevel(name, 3) # Make sure this tag is found inside a collection tag. if self.collection is None: raise InvalidCorsError('Tag %s found outside collection' % name) # Make sure this end tag is allowed for the current collection tag. if name not in self.legal_collections[self.collection]: raise InvalidCorsError('Tag %s not allowed in %s collection' % (name, self.collection)) if name != self.element: raise InvalidCorsError('Mismatched start and end tags (%s/%s)' % (self.element, name)) # Terminating an element tag, add it to the list of elements # for the current collection. self.elements.append((name, value.strip())) self.element = None else: raise InvalidCorsError('Unsupported end tag ' + name) def to_xml(self): """Convert CORS object into XML string representation.""" s = '<' + CORS_CONFIG + '>' for collections in self.cors: s += '<' + CORS + '>' for (collection, elements_or_value) in collections: assert collection is not None s += '<' + collection + '>' # If collection elements has type string, append atomic value, # otherwise, append sequence of values in named tags. if isinstance(elements_or_value, str): s += elements_or_value else: for (name, value) in elements_or_value: assert name is not None assert value is not None s += '<' + name + '>' + value + '</' + name + '>' s += '</' + collection + '>' s += '</' + CORS + '>' s += '</' + CORS_CONFIG + '>' return s
mit
kapilrastogi/Impala
tests/comparison/discrepancy_searcher.py
2
33095
#!/usr/bin/env impala-python # Copyright (c) 2014 Cloudera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''This module will run random queries against existing databases and compare the results. ''' from copy import deepcopy from decimal import Decimal from itertools import izip from logging import getLogger from math import isinf, isnan from os import getenv, symlink, unlink from os.path import join as join_path from random import choice, randint from string import ascii_lowercase, digits from subprocess import call from tempfile import gettempdir from threading import current_thread, Thread from time import time from db_types import BigInt from db_connection import ( DbCursor, IMPALA, HIVE, MYSQL, ORACLE, POSTGRESQL) from model_translator import SqlWriter from query_flattener import QueryFlattener from query_generator import QueryGenerator LOG = getLogger(__name__) class QueryResultComparator(object): '''Used for comparing the results of a Query across two databases''' # Used when comparing FLOAT values EPSILON = 0.1 # The DECIMAL values will be rounded before comparison DECIMAL_PLACES = 2 def __init__(self, query_profile, ref_conn, test_conn, query_timeout_seconds, flatten_dialect=None): '''test/ref_conn arguments should be an instance of DbConnection''' ref_cursor = ref_conn.cursor() test_cursor = test_conn.cursor() self.ref_conn = ref_conn self.ref_sql_writer = SqlWriter.create( dialect=ref_conn.db_type, nulls_order_asc=query_profile.nulls_order_asc()) self.test_conn = test_conn self.test_sql_writer = SqlWriter.create(dialect=test_conn.db_type) self.query_executor = QueryExecutor( [ref_cursor, test_cursor], [self.ref_sql_writer, self.test_sql_writer], query_timeout_seconds=query_timeout_seconds, flatten_dialect=flatten_dialect) @property def ref_db_type(self): return self.ref_conn.db_type def compare_query_results(self, query): '''Execute the query, compare the data, and return a ComparisonResult, which summarizes the outcome. ''' comparison_result = ComparisonResult(query, self.ref_db_type) (ref_sql, ref_exception, ref_data_set, ref_cursor_description), (test_sql, test_exception, test_data_set, test_cursor_description) = \ self.query_executor.fetch_query_results(query) comparison_result.ref_sql = ref_sql comparison_result.test_sql = test_sql if ref_exception: comparison_result.exception = ref_exception error_message = str(ref_exception) if 'Year is out of valid range: 1400..10000' in error_message: # This comes from Postgresql. Overflow errors will be ignored. comparison_result.exception = TypeOverflow(error_message) LOG.debug('%s encountered an error running query: %s', self.ref_conn.db_type, ref_exception, exc_info=True) return comparison_result if test_exception: # "known errors" will be ignored error_message = str(test_exception) known_error = None if 'Expressions in the ORDER BY clause must not be constant' in error_message \ or 'Expressions in the PARTITION BY clause must not be consta' in error_message: # It's too much work to avoid this bug. Just ignore it if it comes up. known_error = KnownError('https://issues.cloudera.org/browse/IMPALA-1354') elif 'GROUP BY expression must not contain aggregate functions' in error_message \ or 'select list expression not produced by aggregation output' in error_message: known_error = KnownError('https://issues.cloudera.org/browse/IMPALA-1423') elif ('max(' in error_message or 'min(' in error_message) \ and 'only supported with an UNBOUNDED PRECEDING start bound' in error_message: # This analytic isn't supported and ignoring this here is much easier than not # generating the query... known_error = KnownError('MAX UNBOUNDED PRECISION') elif 'IN and/or EXISTS subquery predicates are not supported in binary predicates' \ in error_message: known_error = KnownError('https://issues.cloudera.org/browse/IMPALA-1418') elif 'Unsupported predicate with subquery' in error_message: known_error = KnownError('https://issues.cloudera.org/browse/IMPALA-1950') elif 'RIGHT OUTER JOIN type with no equi-join' in error_message: known_error = KnownError('https://issues.cloudera.org/browse/IMPALA-3063') elif 'Operation is in ERROR_STATE' in error_message: known_error = KnownError('Mem limit exceeded') if known_error: comparison_result.exception = known_error else: comparison_result.exception = test_exception LOG.debug('%s encountered an error running query: %s', self.test_conn.db_type, test_exception, exc_info=True) return comparison_result comparison_result.ref_row_count = len(ref_data_set) comparison_result.test_row_count = len(test_data_set) comparison_result.query_resulted_in_data = (comparison_result.test_row_count > 0 or comparison_result.ref_row_count > 0) if comparison_result.ref_row_count != comparison_result.test_row_count: return comparison_result # Standardize data (round FLOATs) in each column, and sort the data set for data_set in (ref_data_set, test_data_set): for row_idx, row in enumerate(data_set): data_set[row_idx] = [] for col_idx, col in enumerate(row): data_set[row_idx].append(self.standardize_data(col, ref_cursor_description[col_idx], test_cursor_description[col_idx])) # TODO: If the query has an ORDER BY clause, sorting should only be done within # subsets of rows that have the same order by values. data_set.sort(cmp=self.row_sort_cmp) found_data = False # Will be set to True if the result contains non-zero/NULL data for ref_row, test_row in izip(ref_data_set, test_data_set): for col_idx, (ref_val, test_val) in enumerate(izip(ref_row, test_row)): if ref_val or test_val: # Ignores zeros, ex "SELECT COUNT(*) ... WHERE FALSE" found_data = True if self.vals_are_equal(ref_val, test_val): continue if isinstance(test_val, int) \ and isinstance(ref_val, (int, float, Decimal)) \ and abs(ref_val) > BigInt.MAX: # Impala will return incorrect results if the val is greater than max BigInt comparison_result.exception = KnownError( 'https://issues.cloudera.org/browse/IMPALA-865') elif isinstance(test_val, float) \ and (isinf(test_val) or isnan(test_val)): # In some cases, Impala gives NaNs and Infs instead of NULLs comparison_result.exception = KnownError( 'https://issues.cloudera.org/browse/IMPALA-724') comparison_result.ref_row = ref_row comparison_result.test_row = test_row comparison_result.mismatch_at_row_number = row_idx + 1 comparison_result.mismatch_at_col_number = col_idx + 1 return comparison_result comparison_result.query_resulted_in_data = found_data return comparison_result def standardize_data(self, data, ref_col_description, test_col_description): '''Return a val that is suitable for comparison.''' # For float data we need to round otherwise differences in precision will cause errors if isinstance(data, float): return round(data, self.DECIMAL_PLACES) if isinstance(data, Decimal): if ref_col_description[5] is not None and test_col_description[5] is not None: return round(data, min(ref_col_description[5], test_col_description[5])) return data def row_sort_cmp(self, ref_row, test_row): '''Comparison used for sorting. ''' for ref_val, test_val in izip(ref_row, test_row): if ref_val is None and test_val is not None: return -1 if ref_val is not None and test_val is None: return 1 result = cmp(ref_val, test_val) if result: return result return 0 def vals_are_equal(self, ref, test): '''Compares if two values are equal in two cells. Floats are considered equal if the difference between them is very small.''' if ref == test: return True # For some reason Postgresql will return Decimals when using some aggregate # functions such as AVG(). if isinstance(ref, (float, Decimal)) and isinstance(test, float): return self.floats_are_equal(ref, test) LOG.debug("Values differ, reference: %s (%s), test: %s (%s)", ref, type(ref), test, type(test)) return False def floats_are_equal(self, ref, test): '''Compare two floats.''' ref = round(ref, self.DECIMAL_PLACES) test = round(test, self.DECIMAL_PLACES) diff = abs(ref - test) if ref * test == 0: return diff < self.EPSILON result = diff / (abs(ref) + abs(test)) < self.EPSILON if not result: LOG.debug("Floats differ, diff: %s, |reference|: %s, |test|: %s", diff, abs(ref), abs(test)) return result class QueryExecutor(object): '''Concurrently executes queries''' # If the number of rows * cols is greater than this val, then the comparison will # be aborted. Raising this value also raises the risk of python being OOM killed. At # 10M python would get OOM killed occasionally even on a physical machine with 32GB # ram. TOO_MUCH_DATA = 1000 * 1000 def __init__(self, cursors, sql_writers, query_timeout_seconds, flatten_dialect=None): '''cursors should be a list of db_connector.Cursors. sql_writers should be a list of model_translator.SqlWriters, with translators in the same order as cursors in "cursors". ''' self.query_timeout_seconds = query_timeout_seconds self.cursors = cursors self.sql_writers = sql_writers self.query_logs = list() # SQL dialect for which the queries should be flattened self.flatten_dialect = flatten_dialect for cursor in cursors: # A list of all queries attempted query_log_path = gettempdir() + '/test_query_log_%s_%s.sql' \ % (cursor.db_type.lower(), time()) self.query_logs.append(open(query_log_path, 'w')) link = gettempdir() + '/test_query_log_%s.sql' % cursor.db_type.lower() try: unlink(link) except OSError as e: if not 'No such file' in str(e): raise e try: symlink(query_log_path, link) except OSError as e: # TODO: Figure out what the error message is where there is a race condition # and ignore it. raise e # In case the query will be executed as a "CREATE TABLE <name> AS ..." or # "CREATE VIEW <name> AS ...", this will be the value of "<name>". self._table_or_view_name = None def set_impala_query_optons(self, cursor): opts = """ SET MEM_LIMIT={mem_limit}; SET BATCH_SIZE={batch_size}; SET DISABLE_CODEGEN={disable_codegen}; SET DISABLE_OUTERMOST_TOPN={disable_outermost_topn}; SET DISABLE_ROW_RUNTIME_FILTERING={disable_row_runtime_filtering}; SET DISABLE_STREAMING_PREAGGREGATIONS={disable_streaming_preaggregations}; SET DISABLE_UNSAFE_SPILLS={disable_unsafe_spills}; SET EXEC_SINGLE_NODE_ROWS_THRESHOLD={exec_single_node_rows_threshold}; SET MAX_BLOCK_MGR_MEMORY={max_block_mgr_memory}; SET MAX_IO_BUFFERS={max_io_buffers}; SET MAX_SCAN_RANGE_LENGTH={max_scan_range_length}; SET NUM_NODES={num_nodes}; SET NUM_SCANNER_THREADS={num_scanner_threads}; SET OPTIMIZE_PARTITION_KEY_SCANS={optimize_partition_key_scans}; SET RUNTIME_BLOOM_FILTER_SIZE={runtime_bloom_filter_size}; SET RUNTIME_FILTER_MODE={runtime_filter_mode}; SET RUNTIME_FILTER_WAIT_TIME_MS={runtime_filter_wait_time_ms}; SET SCAN_NODE_CODEGEN_THRESHOLD={scan_node_codegen_threshold}""".format( mem_limit=randint(1024 ** 3, 10 * 1024 ** 3), batch_size=randint(1, 4096), disable_codegen=choice((0, 1)), disable_outermost_topn=choice((0, 1)), disable_row_runtime_filtering=choice((0, 1)), disable_streaming_preaggregations=choice((0, 1)), disable_unsafe_spills=choice((0, 1)), exec_single_node_rows_threshold=randint(1, 100000000), max_block_mgr_memory=randint(1, 100000000), max_io_buffers=randint(1, 100000000), max_scan_range_length=randint(1, 100000000), num_nodes=randint(3, 3), num_scanner_threads=randint(1, 100), optimize_partition_key_scans=choice((0, 1)), random_replica=choice((0, 1)), replica_preference=choice(("CACHE_LOCAL", "DISK_LOCAL", "REMOTE")), runtime_bloom_filter_size=randint(4096, 16777216), runtime_filter_mode=choice(("OFF", "LOCAL", "GLOBAL")), runtime_filter_wait_time_ms=randint(1, 100000000), scan_node_codegen_threshold=randint(1, 100000000)) LOG.debug(opts) for opt in opts.strip().split(";"): cursor.execute(opt) def fetch_query_results(self, query): '''Concurrently execute the query using each cursor and return a list of tuples containing the result information for each cursor. The tuple format is (<exception or None>, <data set or None>). If query_timeout_seconds is reached and the connection is killable then the query will be cancelled and the connection reset. Otherwise the query will continue to run in the background. "query" should be an instance of query.Query. ''' if query.execution != 'RAW': self._table_or_view_name = self._create_random_table_name() query_threads = list() for sql_writer, cursor, log_file \ in izip(self.sql_writers, self.cursors, self.query_logs): if cursor.db_type == IMPALA: self.set_impala_query_optons(cursor) query_thread = Thread( target=self._fetch_sql_results, args=[query, cursor, sql_writer, log_file], name='Query execution thread {0}'.format(current_thread().name)) query_thread.daemon = True query_thread.sql = '' query_thread.data_set = None query_thread.cursor_description = None query_thread.exception = None query_thread.start() query_threads.append(query_thread) end_time = time() + self.query_timeout_seconds for query_thread, cursor in izip(query_threads, self.cursors): join_time = end_time - time() if join_time > 0: query_thread.join(join_time) if query_thread.is_alive(): # Kill connection and reconnect to return cursor to initial state. if cursor.conn.supports_kill: LOG.debug('Attempting to kill connection') cursor.conn.kill() LOG.debug('Kill connection') try: # XXX: Sometimes this takes a very long time causing the program to appear to # hang. Maybe this should be done in another thread so a timeout can be # applied? cursor.close() except Exception as e: LOG.info('Error closing cursor: %s', e) cursor.reconnect() query_thread.exception = QueryTimeout( 'Query timed out after %s seconds' % self.query_timeout_seconds) return [(query_thread.sql, query_thread.exception, query_thread.data_set, query_thread.cursor_description) for query_thread in query_threads] def _fetch_sql_results(self, query, cursor, sql_writer, log_file): '''Execute the query using the cursor and set the result or exception on the local thread. ''' try: log_file.write('/***** Start Query *****/\n') if sql_writer.DIALECT == self.flatten_dialect: # Converts the query model for the flattened version of the data. This is for # testing of Impala nested types support. query = deepcopy(query) QueryFlattener().flatten(query) if query.execution == 'CREATE_TABLE_AS': setup_sql = sql_writer.write_create_table_as(query, self._table_or_view_name) query_sql = 'SELECT * FROM ' + self._table_or_view_name elif query.execution == 'VIEW': setup_sql = sql_writer.write_create_view(query, self._table_or_view_name) query_sql = 'SELECT * FROM ' + self._table_or_view_name else: setup_sql = None query_sql = sql_writer.write_query(query) if setup_sql: LOG.debug("Executing on %s:\n%s", cursor.db_type, setup_sql) current_thread().sql = setup_sql + ';\n' log_file.write(setup_sql + ';\n') log_file.flush() cursor.execute(setup_sql) LOG.debug("Executing on %s:\n%s", cursor.db_type, query_sql) current_thread().sql += query_sql log_file.write(query_sql + ';\n') log_file.write('/***** End Query *****/\n') log_file.flush() cursor.execute(query_sql) col_count = len(cursor.description) batch_size = max(10000 / col_count, 1) row_limit = self.TOO_MUCH_DATA / col_count data_set = list() current_thread().data_set = data_set current_thread().cursor_description = cursor.description LOG.debug("Fetching results from %s", cursor.db_type) while True: batch = cursor.fetchmany(batch_size) data_set.extend(batch) if len(batch) < batch_size: if cursor.db_type == IMPALA: impala_log = cursor.get_log() if 'Expression overflowed, returning NULL' in impala_log: raise TypeOverflow('Numeric overflow; data may not match') break if len(data_set) > row_limit: raise DataLimitExceeded('Too much data') except Exception as e: current_thread().exception = e finally: if query.execution == 'CREATE_TABLE_AS': cursor.drop_table(self._table_or_view_name) elif query.execution == 'VIEW': cursor.drop_view(self._table_or_view_name) def _create_random_table_name(self): char_choices = ascii_lowercase chars = list() for idx in xrange(4): # will result in ~1M combinations if idx == 1: char_choices += '_' + digits chars.append(choice(char_choices)) return 'qgen_' + ''.join(chars) class ComparisonResult(object): '''Represents a result.''' def __init__(self, query, ref_db_type): self.query = query self.ref_db_type = ref_db_type self.ref_sql = None self.test_sql = None self.query_resulted_in_data = False self.ref_row_count = None self.test_row_count = None self.mismatch_at_row_number = None self.mismatch_at_col_number = None self.ref_row = None # The test row where mismatch happened self.test_row = None # The reference row where mismatch happened self.exception = None self._error_message = None @property def error(self): if not self._error_message: if self.exception: self._error_message = str(self.exception) elif (self.ref_row_count or self.test_row_count) and \ self.ref_row_count != self.test_row_count: self._error_message = 'Row counts do not match: %s Impala rows vs %s %s rows' \ % (self.test_row_count, self.ref_db_type, self.ref_row_count) elif self.mismatch_at_row_number is not None: # Write a row like "[a, b, <<c>>, d]" where c is a bad value test_row = '[' + ', '.join( '<<' + str(val) + '>>' if idx == self.mismatch_at_col_number - 1 else str(val) for idx, val in enumerate(self.test_row) ) + ']' ref_row = '[' + ', '.join( '<<' + str(val) + '>>' if idx == self.mismatch_at_col_number - 1 else str(val) for idx, val in enumerate(self.ref_row) ) + ']' self._error_message = \ 'Column %s in row %s does not match: %s Impala row vs %s %s row' \ % (self.mismatch_at_col_number, self.mismatch_at_row_number, test_row, ref_row, self.ref_db_type) return self._error_message @property def is_known_error(self): return isinstance(self.exception, KnownError) @property def query_timed_out(self): return isinstance(self.exception, QueryTimeout) QueryTimeout = type('QueryTimeout', (Exception, ), {}) TypeOverflow = type('TypeOverflow', (Exception, ), {}) DataLimitExceeded = type('DataLimitExceeded', (Exception, ), {}) class KnownError(Exception): def __init__(self, jira_url): Exception.__init__(self, 'Known issue: ' + jira_url) self.jira_url = jira_url class FrontendExceptionSearcher(object): def __init__(self, query_profile, ref_conn, test_conn): '''query_profile should be an instance of one of the profiles in query_profile.py''' self.query_profile = query_profile self.ref_conn = ref_conn self.test_conn = test_conn self.ref_sql_writer = SqlWriter.create(dialect=ref_conn.db_type) self.test_sql_writer = SqlWriter.create(dialect=test_conn.db_type) with ref_conn.cursor() as ref_cursor: with test_conn.cursor() as test_cursor: self.common_tables = DbCursor.describe_common_tables([ref_cursor, test_cursor]) if not self.common_tables: raise Exception("Unable to find a common set of tables in both databases") def search(self, number_of_test_queries): query_generator = QueryGenerator(self.query_profile) def on_ref_db_error(e, sql): LOG.warn("Error generating explain plan for reference db:\n%s\n%s" % (e, sql)) def on_test_db_error(e, sql): LOG.error("Error generating explain plan for test db:\n%s" % sql) raise e for idx in xrange(number_of_test_queries): LOG.info("Explaining query #%s" % (idx + 1)) query = query_generator.create_query(self.common_tables) if not self._explain_query(self.ref_conn, self.ref_sql_writer, query, on_ref_db_error): continue self._explain_query(self.test_conn, self.test_sql_writer, query, on_test_db_error) def _explain_query(self, conn, writer, query, exception_handler): sql = writer.write_query(query) try: with conn.cursor() as cursor: cursor.execute("EXPLAIN %s" % sql) return True except Exception as e: exception_handler(e, sql) return False class QueryResultDiffSearcher(object): '''This class uses the query generator (query_generator.py) along with the query profile (query_profile.py) to randomly generate queries then executes the queries on the reference and test databases, then compares the results. ''' # Sometimes things get into a bad state and the same error loops forever ABORT_ON_REPEAT_ERROR_COUNT = 2 def __init__(self, query_profile, ref_conn, test_conn): '''query_profile should be an instance of one of the profiles in query_profile.py''' self.query_profile = query_profile self.ref_conn = ref_conn self.test_conn = test_conn with ref_conn.cursor() as ref_cursor: with test_conn.cursor() as test_cursor: self.common_tables = DbCursor.describe_common_tables([ref_cursor, test_cursor]) if not self.common_tables: raise Exception("Unable to find a common set of tables in both databases") def search(self, number_of_test_queries, stop_on_result_mismatch, stop_on_crash, query_timeout_seconds): '''Returns an instance of SearchResults, which is a summary report. This method oversees the generation, execution, and comparison of queries. number_of_test_queries should an integer indicating the maximum number of queries to generate and execute. ''' start_time = time() query_result_comparator = QueryResultComparator( self.query_profile, self.ref_conn, self.test_conn, query_timeout_seconds) query_generator = QueryGenerator(self.query_profile) query_count = 0 queries_resulted_in_data_count = 0 mismatch_count = 0 query_timeout_count = 0 known_error_count = 0 test_crash_count = 0 last_error = None repeat_error_count = 0 while number_of_test_queries > query_count: query = query_generator.create_query(self.common_tables) query.execution = self.query_profile.get_query_execution() query_count += 1 LOG.info('Running query #%s', query_count) result = query_result_comparator.compare_query_results(query) if result.query_resulted_in_data: queries_resulted_in_data_count += 1 if isinstance(result.exception, DataLimitExceeded) \ or isinstance(result.exception, TypeOverflow): continue if result.error: # TODO: These first two come from psycopg2, the postgres driver. Maybe we should # try a different driver? Or maybe the usage of the driver isn't correct. # Anyhow ignore these failures. if 'division by zero' in result.error \ or 'out of range' in result.error: LOG.debug('Ignoring error: %s', result.error) query_count -= 1 continue if result.is_known_error: known_error_count += 1 elif result.query_timed_out: query_timeout_count += 1 else: mismatch_count += 1 print('---Test Query---\n') print(result.test_sql + '\n') print('---Reference Query---\n') print(result.ref_sql + '\n') print('---Error---\n') print(result.error + '\n') print('------\n') if 'Could not connect' in result.error \ or "Couldn't open transport for" in result.error: if stop_on_crash: break # Assume Impala crashed and try restarting test_crash_count += 1 LOG.info('Restarting Impala') call([join_path(getenv('IMPALA_HOME'), 'bin/start-impala-cluster.py'), '--log_dir=%s' % getenv('LOG_DIR', "/tmp/")]) self.test_conn.reconnect() query_result_comparator.test_cursor = self.test_conn.cursor() result = query_result_comparator.compare_query_results(query) if result.error: LOG.info('Restarting Impala') call([join_path(getenv('IMPALA_HOME'), 'bin/start-impala-cluster.py'), '--log_dir=%s' % getenv('LOG_DIR', "/tmp/")]) self.test_conn.reconnect() query_result_comparator.test_cursor = self.test_conn.cursor() else: break if stop_on_result_mismatch and \ not (result.is_known_error or result.query_timed_out): break if last_error == result.error \ and not (result.is_known_error or result.query_timed_out): repeat_error_count += 1 if repeat_error_count == self.ABORT_ON_REPEAT_ERROR_COUNT: break else: last_error = result.error repeat_error_count = 0 else: if result.query_resulted_in_data: LOG.info('Results matched (%s rows)', result.test_row_count) else: LOG.info('Query did not produce meaningful data') last_error = None repeat_error_count = 0 return SearchResults( query_count, queries_resulted_in_data_count, mismatch_count, query_timeout_count, known_error_count, test_crash_count, time() - start_time) class SearchResults(object): '''This class holds information about the outcome of a search run.''' def __init__(self, query_count, queries_resulted_in_data_count, mismatch_count, query_timeout_count, known_error_count, test_crash_count, run_time_in_seconds): # Approx number of queries run, some queries may have been ignored self.query_count = query_count self.queries_resulted_in_data_count = queries_resulted_in_data_count # Number of queries that had an error or result mismatch self.mismatch_count = mismatch_count self.query_timeout_count = query_timeout_count self.known_error_count = known_error_count self.test_crash_count = test_crash_count self.run_time_in_seconds = run_time_in_seconds def __str__(self): '''Returns the string representation of the results.''' mins, secs = divmod(self.run_time_in_seconds, 60) hours, mins = divmod(mins, 60) hours = int(hours) mins = int(mins) if hours: run_time = '%s hour and %s minutes' % (hours, mins) else: secs = int(secs) run_time = '%s seconds' % secs if mins: run_time = '%s mins and ' % mins + run_time summary_params = self.__dict__ summary_params['run_time'] = run_time return ( '%(mismatch_count)s mismatches found after running %(query_count)s queries in ' '%(run_time)s.\n' '%(queries_resulted_in_data_count)s of %(query_count)s queries produced results.' '\n' '%(test_crash_count)s crashes occurred.\n' '%(known_error_count)s queries were excluded from the mismatch count because ' 'they are known errors.\n' '%(query_timeout_count)s queries timed out and were excluded from all counts.') \ % summary_params if __name__ == '__main__': import sys from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser import cli_options from query_profile import PROFILES parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) cli_options.add_logging_options(parser) cli_options.add_db_name_option(parser) cli_options.add_cluster_options(parser) cli_options.add_connection_option_groups(parser) cli_options.add_timeout_option(parser) parser.add_argument('--test-db-type', default=IMPALA, choices=(HIVE, IMPALA, MYSQL, ORACLE, POSTGRESQL), help='The type of the test database to use. Ex: IMPALA.') parser.add_argument('--ref-db-type', default=POSTGRESQL, choices=(MYSQL, ORACLE, POSTGRESQL), help='The type of the ref database to use. Ex: POSTGRESQL.') parser.add_argument('--stop-on-mismatch', default=False, action='store_true', help='Exit immediately upon find a discrepancy in a query result.') parser.add_argument('--stop-on-crash', default=False, action='store_true', help='Exit immediately if Impala crashes.') parser.add_argument('--query-count', default=1000000, type=int, help='Exit after running the given number of queries.') parser.add_argument('--exclude-types', default='', help='A comma separated list of data types to exclude while generating queries.') parser.add_argument('--explain-only', action='store_true', help="Don't run the queries only explain them to see if there was an error in " "planning.") profiles = dict() for profile in PROFILES: profile_name = profile.__name__ if profile_name.endswith('Profile'): profile_name = profile_name[:-1 * len('Profile')] profiles[profile_name.lower()] = profile parser.add_argument('--profile', default='default', choices=(sorted(profiles.keys())), help='Determines the mix of SQL features to use during query generation.') # TODO: Seed the random query generator for repeatable queries? args = parser.parse_args() cli_options.configure_logging(args.log_level, debug_log_file=args.debug_log_file) cluster = cli_options.create_cluster(args) ref_conn = cli_options.create_connection(args, args.ref_db_type, db_name=args.db_name) if args.test_db_type == IMPALA: test_conn = cluster.impala.connect(db_name=args.db_name) elif args.test_db_type == HIVE: test_conn = cluster.hive.connect(db_name=args.db_name) else: test_conn = cli_options.create_connection( args, args.test_db_type, db_name=args.db_name) # Create an instance of profile class (e.g. DefaultProfile) query_profile = profiles[args.profile]() if args.explain_only: searcher = FrontendExceptionSearcher(query_profile, ref_conn, test_conn) searcher.search(args.query_count) else: diff_searcher = QueryResultDiffSearcher(query_profile, ref_conn, test_conn) query_timeout_seconds = args.timeout search_results = diff_searcher.search( args.query_count, args.stop_on_mismatch, args.stop_on_crash, query_timeout_seconds) print(search_results) sys.exit(search_results.mismatch_count)
apache-2.0
arun6582/django
django/db/models/sql/compiler.py
2
56499
import re from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import OrderBy, Random, RawSQL, Ref from django.db.models.query_utils import QueryWrapper, select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError FORCE = object() class SQLCompiler: def __init__(self, query, connection, using): self.query = query self.connection = connection self.using = using self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)') def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.tables): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. for expr, _, _ in select: cols = expr.get_group_by_cols() for col in cols: expressions.append(col) for expr, (sql, params, is_ref) in order_by: if expr.contains_aggregate: continue # We can skip References to select clause, as all expressions in # the select clause are already part of the group by. if is_ref: continue expressions.extend(expr.get_source_expressions()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) if (sql, tuple(params)) not in seen: result.append((sql, params)) seen.add((sql, tuple(params))) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # The logic here is: if the main model's primary key is in the # query, then set new_expressions to that field. If that happens, # then also add having expressions to group by. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.tables[0]): pk = expr break if pk: # MySQLism: Columns in HAVING clause must be added to the GROUP BY. expressions = [pk] + [expr for expr in expressions if expr in having] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. pks = {expr for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key} aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - Which model to instantiate - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: select_list = [] for c in self.get_default_columns(): select_list.append(select_idx) select.append((c, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } # self.query.select is a special case. These columns never go to # any model. for col in self.query.select: select.append((col, None)) select_idx += 1 for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: try: sql, params = self.compile(col, select_format=True) except EmptyResultSet: # Select a predicate that's always False. sql, params = '0', () ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def get_order_by(self): """ Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by else: ordering = (self.query.order_by or self.query.get_meta().ordering or []) if self.query.standard_ordering: asc, desc = ORDER_DIR['ASC'] else: asc, desc = ORDER_DIR['DESC'] order_by = [] for field in ordering: if hasattr(field, 'resolve_expression'): if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field.reverse_ordering() order_by.append((field, False)) continue if field == '?': # random order_by.append((OrderBy(Random()), False)) continue col, order = get_order_dir(field, asc) descending = True if order == 'DESC' else False if col in self.query.annotation_select: # Reference to expression in SELECT clause order_by.append(( OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True)) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT clause order_by.append(( OrderBy(self.query.annotations[col], descending=descending), False)) continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) order_by.append(( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending ), False)) continue if not self.query._extra or col not in self.query._extra: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. order_by.extend(self.find_ordering_name( field, self.query.get_meta(), default_order=asc)) else: if col not in self.query.extra_select: order_by.append(( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False)) else: order_by.append(( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True)) result = [] seen = set() for expr, is_ref in order_by: if self.query.combinator: src = expr.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias: continue if src == sel_expr: expr.set_source_expressions([RawSQL('%d' % (idx + 1), ())]) break else: raise DatabaseError('ORDER BY term does not match any column in the result set.') resolved = expr.resolve_expression( self.query, allow_joins=True, reuse=None) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql).group(1) if (without_ordering, tuple(params)) in seen: continue seen.add((without_ordering, tuple(params))) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] select_sql = [t[1] for t in select] if self.query.distinct and not self.query.distinct_fields: for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql).group(1) if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( name in self.query.external_aliases and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node, select_format=False): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) if select_format is FORCE or (select_format and not self.query.subquery): return node.output_field.select_format(self, sql, params) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection) for query in self.query.combined_queries ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.') if compiler.get_order_by(): raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.') parts = (compiler.as_sql() for compiler in compilers) combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == 'union': combinator_sql += ' ALL' braces = '({})' if features.supports_slicing_ordering_in_compound else '{}' sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts)) result = [' {} '.format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() distinct_fields = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' -- see # docstring of get_from_clause() for details. from_, f_params = self.get_from_clause() for_update_part = None where, w_params = self.compile(self.where) if self.where is not None else ("", []) having, h_params = self.compile(self.having) if self.having is not None else ("", []) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, 'supports_select_{}'.format(combinator)): raise DatabaseError('{} not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: result = ['SELECT'] params = [] if self.query.distinct: result.append(self.connection.ops.distinct_sql(distinct_fields)) out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result.append(', '.join(out_cols)) result.append('FROM') result.extend(from_) params.extend(f_params) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked # If it's a NOWAIT/SKIP LOCKED query but the backend # doesn't support it, raise a DatabaseError to prevent a # possible deadlock. if nowait and not self.connection.features.has_select_for_update_nowait: raise DatabaseError('NOWAIT is not supported on this database backend.') elif skip_locked and not self.connection.features.has_select_for_update_skip_locked: raise DatabaseError('SKIP LOCKED is not supported on this database backend.') for_update_part = self.connection.ops.for_update_sql(nowait=nowait, skip_locked=skip_locked) if for_update_part and self.connection.features.for_update_after_from: result.append(for_update_part) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) is not implemented.') if not order_by: order_by = self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if having: result.append('HAVING %s' % having) params.extend(h_params) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limits: if self.query.high_mark is not None: result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark)) if self.query.low_mark: if self.query.high_mark is None: val = self.connection.ops.no_limit_value() if val: result.append('LIMIT %d' % val) result.append('OFFSET %d' % self.query.low_mark) if for_update_part and not self.connection.features.for_update_after_from: result.append(for_update_part) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() if not start_alias: start_alias = self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: result.append("%s.%s" % (qn(alias), qn2(target.column))) return result def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = True if order == 'DESC' else False pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless the attribute name # of the field is specified. if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name: # Firstly, avoid infinite loops. if not already_seen: already_seen = set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ if not alias: alias = self.query.get_initial_alias() field, targets, opts, joins, path = self.query.setup_joins( pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in self.query.tables: if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices) related_klass_infos = [] if not restricted and self.query.max_depth and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: if isinstance(self.query.select_related, dict): requested = self.query.select_related restricted = True else: restricted = False def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) _, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias) alias = joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested.keys()).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def deferred_to_columns(self): """ Convert the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Return the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, row, converters): row = list(row) for pos, (convs, expression) in converters.items(): value = row[pos] for converter in convs: value = converter(value, expression, self.connection, self.query.context) row[pos] = value return tuple(row) def results_iter(self, results=None): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql(MULTI) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) for rows in results: for row in rows: if converters: row = self.apply_converters(row, converters) yield row def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ # This is always executed on a query clone, so we can modify self.query self.query.add_extra({'a': 1}, None, None, None, None, None) self.query.set_extra_mask(['a']) return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI, chunked_fetch=False): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ if not result_type: result_type = NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Caller didn't specify a result_type, so just give them back the # cursor to process (and close). return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count ) if not chunked_fetch and not self.connection.features.can_use_chunked_reads: try: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested. return list(result) finally: # done with the cursor cursor.close() return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params class SQLInsertCompiler(SQLCompiler): def __init__(self, *args, **kwargs): self.return_id = False super().__init__(*args, **kwargs) def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError("Aggregate functions are not allowed in this query") else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() result = ['INSERT INTO %s' % qn(opts.db_table)] has_fields = bool(self.query.fields) fields = self.query.fields if has_fields else [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if has_fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) if self.return_id and self.connection.features.can_return_id_from_insert: if self.connection.features.can_return_ids_from_bulk_insert: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) r_fmt, r_params = self.connection.ops.return_insert_id() # Skip empty r_fmt to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. if r_fmt: result.append(r_fmt % col) params += [r_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, return_id=False): assert not ( return_id and len(self.query.objs) != 1 and not self.connection.features.can_return_ids_from_bulk_insert ) self.return_id = return_id with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not (return_id and cursor): return if self.connection.features.can_return_ids_from_bulk_insert and len(self.query.objs) > 1: return self.connection.ops.fetch_returned_insert_ids(cursor) if self.connection.features.can_return_id_from_insert: assert len(self.query.objs) == 1 return self.connection.ops.fetch_returned_insert_id(cursor) return self.connection.ops.last_insert_id( cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column ) class SQLDeleteCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ assert len([t for t in self.query.tables if self.query.alias_refcount[t] > 0]) == 1, \ "Can only delete from one table at a time." qn = self.quote_name_unless_alias result = ['DELETE FROM %s' % qn(self.query.tables[0])] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError("Aggregate functions are not allowed in this query") elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) table = self.query.tables[0] result = [ 'UPDATE %s SET' % qn(table), ', '.join(values), ] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.clone(klass=Query) query.select_related = False query.clear_ordering(True) query._extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super().pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.where = self.query.where_class() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter(('pk__in', query)) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation, select_format=FORCE) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) params = params + self.query.sub_params return sql, params def cursor_iter(cursor, sentinel, col_count): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), sentinel): yield [r[0:col_count] for r in rows] finally: cursor.close()
bsd-3-clause
arthurfurlan/django-shortim
src/shortim/migrations/0004_auto__add_field_shorturl_collect_tries.py
1
2388
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'ShortURL.collect_tries' db.add_column('shortim_shorturl', 'collect_tries', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'ShortURL.collect_tries' db.delete_column('shortim_shorturl', 'collect_tries') models = { 'shortim.shorturl': { 'Meta': {'ordering': "['-id']", 'object_name': 'ShortURL'}, 'canonical_url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'collect_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'collect_tries': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mime': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}), 'remote_user': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'db_index': 'True'}) }, 'shortim.shorturlhit': { 'Meta': {'ordering': "['-date']", 'object_name': 'ShortURLHit'}, 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'remote_user': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'shorturl': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hits'", 'to': "orm['shortim.ShortURL']"}) } } complete_apps = ['shortim']
gpl-3.0
kbidarkar/robottelo
tests/foreman/rhai/test_rhai.py
2
6626
"""Tests for Red Hat Access Insights :Requirement: Rhai :CaseAutomation: Automated :CaseLevel: Acceptance :CaseComponent: UI :TestType: Functional :CaseImportance: High :Upstream: No """ import time from fauxfactory import gen_string from nailgun import entities from robottelo import manifests from robottelo.api.utils import upload_manifest from robottelo.constants import DEFAULT_SUBSCRIPTION_NAME from robottelo.constants import DISTRO_RHEL6, DISTRO_RHEL7 from robottelo.decorators import run_in_one_thread, skip_if_not_set from robottelo.test import UITestCase from robottelo.ui.locators import locators from robottelo.ui.navigator import Navigator from robottelo.ui.session import Session from robottelo.vm import VirtualMachine @run_in_one_thread class RHAITestCase(UITestCase): @classmethod def setUpClass(cls): # noqa super(RHAITestCase, cls).setUpClass() # Create a new organization with prefix 'insights' org = entities.Organization( name='insights_{0}'.format(gen_string('alpha', 6)) ).create() # Upload manifest with manifests.clone() as manifest: upload_manifest(org.id, manifest.content) # Create activation key using default CV and library environment activation_key = entities.ActivationKey( auto_attach=True, content_view=org.default_content_view.id, environment=org.library.id, name=gen_string('alpha'), organization=org, ).create() # Walk through the list of subscriptions. # Find the "Red Hat Employee Subscription" and attach it to the # recently-created activation key. for subs in entities.Subscription(organization=org).search(): if subs.read_json()['product_name'] == DEFAULT_SUBSCRIPTION_NAME: # 'quantity' must be 1, not subscription['quantity']. Greater # values produce this error: "RuntimeError: Error: Only pools # with multi-entitlement product subscriptions can be added to # the activation key with a quantity greater than one." activation_key.add_subscriptions(data={ 'quantity': 1, 'subscription_id': subs.id, }) break cls.org_label = org.label cls.ak_name = activation_key.name cls.org_name = org.name @skip_if_not_set('clients') def test_positive_register_client_to_rhai(self): """Check client registration to redhat-access-insights service. :id: f3aefdb3-ac99-402d-afd9-e53e9ee1e8d7 :expectedresults: Registered client should appear in the Systems sub- menu of Red Hat Access Insights """ # Register a VM to Access Insights Service with VirtualMachine(distro=DISTRO_RHEL6) as vm: try: vm.configure_rhai_client(self.ak_name, self.org_label, DISTRO_RHEL6) with Session(self) as session: # view clients registered to Red Hat Access Insights session.nav.go_to_select_org(self.org_name) Navigator(self.browser).go_to_insights_systems() result = self.rhai.view_registered_systems() self.assertIn("1", result, 'Registered clients are not listed') finally: vm.get('/var/log/redhat-access-insights/' 'redhat-access-insights.log', './insights_client_registration.log') def test_negative_org_not_selected(self): """Verify that user attempting to access RHAI is directed to select an Organization if there is no organization selected :id: 6ddfdb29-eeb5-41a4-8851-ad19130b112c :expectedresults: 'Organization Selection Required' message must be displayed if the user tries to view Access Insights overview without selecting an org """ with Session(self) as session: # Given that the user does not specify any Organization session.nav.go_to_select_org("Any Organization") session.nav.go_to_insights_overview() # 'Organization Selection Required' message must be present result = session.nav.wait_until_element( locators['insights.org_selection_msg']).text self.assertIn("Organization Selection Required", result) @skip_if_not_set('clients') def test_positive_unregister_client_from_rhai(self): """Verify that 'Unregister' a system from RHAI works correctly then the system should not be able to use the service. :id: 580f9704-8c6d-4f63-b027-68a6ac97af77 :expectedresults: Once the system is unregistered from the RHAI web interface then the unregistered system should return `1` on running the service 'redhat-access-insights' """ # Register a VM to Access Insights Service with VirtualMachine(distro=DISTRO_RHEL7) as vm: try: vm.configure_rhai_client(self.ak_name, self.org_label, DISTRO_RHEL7) with Session(self) as session: session.nav.go_to_select_org(self.org_name) Navigator(self.browser).go_to_insights_systems() # Click on the unregister icon 'X' in the table against the # registered system listed. strategy, value = locators['insights.unregister_system'] session.nav.click( (strategy, value % vm.hostname), wait_for_ajax=True, ajax_timeout=40, ) # Confirm selection for clicking on 'Yes' to unregister the # system session.nav.click( locators['insights.unregister_button'] ) self.browser.refresh() time.sleep(60) self.browser.refresh() result = vm.run('redhat-access-insights') self.assertEqual(result.return_code, 1, "System has not been unregistered") finally: vm.get('/var/log/redhat-access-insights/' 'redhat-access-insights.log', './insights_unregister.log')
gpl-3.0
mancoast/CPythonPyc_test
cpython/221_test_cl.py
21
3925
#! /usr/bin/env python """Whimpy test script for the cl module Roger E. Masse """ import cl from test_support import verbose clattrs = ['ADDED_ALGORITHM_ERROR', 'ALAW', 'ALGORITHM_ID', 'ALGORITHM_VERSION', 'AUDIO', 'AWARE_ERROR', 'AWARE_MPEG_AUDIO', 'AWARE_MULTIRATE', 'AWCMP_CONST_QUAL', 'AWCMP_FIXED_RATE', 'AWCMP_INDEPENDENT', 'AWCMP_JOINT_STEREO', 'AWCMP_LOSSLESS', 'AWCMP_MPEG_LAYER_I', 'AWCMP_MPEG_LAYER_II', 'AWCMP_STEREO', 'Algorithm', 'AlgorithmNumber', 'AlgorithmType', 'AudioFormatName', 'BAD_ALGORITHM_NAME', 'BAD_ALGORITHM_TYPE', 'BAD_BLOCK_SIZE', 'BAD_BOARD', 'BAD_BUFFERING', 'BAD_BUFFERLENGTH_NEG', 'BAD_BUFFERLENGTH_ODD', 'BAD_BUFFER_EXISTS', 'BAD_BUFFER_HANDLE', 'BAD_BUFFER_POINTER', 'BAD_BUFFER_QUERY_SIZE', 'BAD_BUFFER_SIZE', 'BAD_BUFFER_SIZE_POINTER', 'BAD_BUFFER_TYPE', 'BAD_COMPRESSION_SCHEME', 'BAD_COMPRESSOR_HANDLE', 'BAD_COMPRESSOR_HANDLE_POINTER', 'BAD_FRAME_SIZE', 'BAD_FUNCTIONALITY', 'BAD_FUNCTION_POINTER', 'BAD_HEADER_SIZE', 'BAD_INITIAL_VALUE', 'BAD_INTERNAL_FORMAT', 'BAD_LICENSE', 'BAD_MIN_GT_MAX', 'BAD_NO_BUFFERSPACE', 'BAD_NUMBER_OF_BLOCKS', 'BAD_PARAM', 'BAD_PARAM_ID_POINTER', 'BAD_PARAM_TYPE', 'BAD_POINTER', 'BAD_PVBUFFER', 'BAD_SCHEME_POINTER', 'BAD_STREAM_HEADER', 'BAD_STRING_POINTER', 'BAD_TEXT_STRING_PTR', 'BEST_FIT', 'BIDIRECTIONAL', 'BITRATE_POLICY', 'BITRATE_TARGET', 'BITS_PER_COMPONENT', 'BLENDING', 'BLOCK_SIZE', 'BOTTOM_UP', 'BUFFER_NOT_CREATED', 'BUF_DATA', 'BUF_FRAME', 'BytesPerPixel', 'BytesPerSample', 'CHANNEL_POLICY', 'CHROMA_THRESHOLD', 'CODEC', 'COMPONENTS', 'COMPRESSED_BUFFER_SIZE', 'COMPRESSION_RATIO', 'COMPRESSOR', 'CONTINUOUS_BLOCK', 'CONTINUOUS_NONBLOCK', 'CompressImage', 'DATA', 'DECOMPRESSOR', 'DecompressImage', 'EDGE_THRESHOLD', 'ENABLE_IMAGEINFO', 'END_OF_SEQUENCE', 'ENUM_VALUE', 'EXACT_COMPRESSION_RATIO', 'EXTERNAL_DEVICE', 'FLOATING_ENUM_VALUE', 'FLOATING_RANGE_VALUE', 'FRAME', 'FRAME_BUFFER_SIZE', 'FRAME_BUFFER_SIZE_ZERO', 'FRAME_RATE', 'FRAME_TYPE', 'G711_ALAW', 'G711_ULAW', 'GRAYSCALE', 'GetAlgorithmName', 'HDCC', 'HDCC_SAMPLES_PER_TILE', 'HDCC_TILE_THRESHOLD', 'HEADER_START_CODE', 'IMAGE_HEIGHT', 'IMAGE_WIDTH', 'INTERNAL_FORMAT', 'INTERNAL_IMAGE_HEIGHT', 'INTERNAL_IMAGE_WIDTH', 'INTRA', 'JPEG', 'JPEG_ERROR', 'JPEG_NUM_PARAMS', 'JPEG_QUALITY_FACTOR', 'JPEG_QUANTIZATION_TABLES', 'JPEG_SOFTWARE', 'JPEG_STREAM_HEADERS', 'KEYFRAME', 'LAST_FRAME_INDEX', 'LAYER', 'LUMA_THRESHOLD', 'MAX_NUMBER_OF_AUDIO_ALGORITHMS', 'MAX_NUMBER_OF_ORIGINAL_FORMATS', 'MAX_NUMBER_OF_PARAMS', 'MAX_NUMBER_OF_VIDEO_ALGORITHMS', 'MONO', 'MPEG_VIDEO', 'MVC1', 'MVC2', 'MVC2_BLENDING', 'MVC2_BLENDING_OFF', 'MVC2_BLENDING_ON', 'MVC2_CHROMA_THRESHOLD', 'MVC2_EDGE_THRESHOLD', 'MVC2_ERROR', 'MVC2_LUMA_THRESHOLD', 'NEXT_NOT_AVAILABLE', 'NOISE_MARGIN', 'NONE', 'NUMBER_OF_FRAMES', 'NUMBER_OF_PARAMS', 'ORIENTATION', 'ORIGINAL_FORMAT', 'OpenCompressor', 'OpenDecompressor', 'PARAM_OUT_OF_RANGE', 'PREDICTED', 'PREROLL', 'ParamID', 'ParamNumber', 'ParamType', 'QUALITY_FACTOR', 'QUALITY_LEVEL', 'QueryAlgorithms', 'QueryMaxHeaderSize', 'QueryScheme', 'QuerySchemeFromName', 'RANGE_VALUE', 'RGB', 'RGB332', 'RGB8', 'RGBA', 'RGBX', 'RLE', 'RLE24', 'RTR', 'RTR1', 'RTR_QUALITY_LEVEL', 'SAMPLES_PER_TILE', 'SCHEME_BUSY', 'SCHEME_NOT_AVAILABLE', 'SPEED', 'STEREO_INTERLEAVED', 'STREAM_HEADERS', 'SetDefault', 'SetMax', 'SetMin', 'TILE_THRESHOLD', 'TOP_DOWN', 'ULAW', 'UNCOMPRESSED', 'UNCOMPRESSED_AUDIO', 'UNCOMPRESSED_VIDEO', 'UNKNOWN_SCHEME', 'VIDEO', 'VideoFormatName', 'Y', 'YCbCr', 'YCbCr422', 'YCbCr422DC', 'YCbCr422HC', 'YUV', 'YUV422', 'YUV422DC', 'YUV422HC', '__doc__', '__name__', 'cvt_type', 'error'] # This is a very inobtrusive test for the existence of the cl # module and all it's attributes. def main(): # touch all the attributes of al without doing anything if verbose: print 'Touching cl module attributes...' for attr in clattrs: if verbose: print 'touching: ', attr getattr(cl, attr) main()
gpl-3.0
mancoast/CPythonPyc_test
cpython/272_test_slice.py
113
4413
# tests for slice objects; in particular the indices method. import unittest from test import test_support from cPickle import loads, dumps import sys class SliceTest(unittest.TestCase): def test_constructor(self): self.assertRaises(TypeError, slice) self.assertRaises(TypeError, slice, 1, 2, 3, 4) def test_repr(self): self.assertEqual(repr(slice(1, 2, 3)), "slice(1, 2, 3)") def test_hash(self): # Verify clearing of SF bug #800796 self.assertRaises(TypeError, hash, slice(5)) self.assertRaises(TypeError, slice(5).__hash__) def test_cmp(self): s1 = slice(1, 2, 3) s2 = slice(1, 2, 3) s3 = slice(1, 2, 4) self.assertEqual(s1, s2) self.assertNotEqual(s1, s3) class Exc(Exception): pass class BadCmp(object): def __eq__(self, other): raise Exc __hash__ = None # Silence Py3k warning s1 = slice(BadCmp()) s2 = slice(BadCmp()) self.assertRaises(Exc, cmp, s1, s2) self.assertEqual(s1, s1) s1 = slice(1, BadCmp()) s2 = slice(1, BadCmp()) self.assertEqual(s1, s1) self.assertRaises(Exc, cmp, s1, s2) s1 = slice(1, 2, BadCmp()) s2 = slice(1, 2, BadCmp()) self.assertEqual(s1, s1) self.assertRaises(Exc, cmp, s1, s2) def test_members(self): s = slice(1) self.assertEqual(s.start, None) self.assertEqual(s.stop, 1) self.assertEqual(s.step, None) s = slice(1, 2) self.assertEqual(s.start, 1) self.assertEqual(s.stop, 2) self.assertEqual(s.step, None) s = slice(1, 2, 3) self.assertEqual(s.start, 1) self.assertEqual(s.stop, 2) self.assertEqual(s.step, 3) class AnyClass: pass obj = AnyClass() s = slice(obj) self.assertTrue(s.stop is obj) def test_indices(self): self.assertEqual(slice(None ).indices(10), (0, 10, 1)) self.assertEqual(slice(None, None, 2).indices(10), (0, 10, 2)) self.assertEqual(slice(1, None, 2).indices(10), (1, 10, 2)) self.assertEqual(slice(None, None, -1).indices(10), (9, -1, -1)) self.assertEqual(slice(None, None, -2).indices(10), (9, -1, -2)) self.assertEqual(slice(3, None, -2).indices(10), (3, -1, -2)) # issue 3004 tests self.assertEqual(slice(None, -9).indices(10), (0, 1, 1)) self.assertEqual(slice(None, -10).indices(10), (0, 0, 1)) self.assertEqual(slice(None, -11).indices(10), (0, 0, 1)) self.assertEqual(slice(None, -10, -1).indices(10), (9, 0, -1)) self.assertEqual(slice(None, -11, -1).indices(10), (9, -1, -1)) self.assertEqual(slice(None, -12, -1).indices(10), (9, -1, -1)) self.assertEqual(slice(None, 9).indices(10), (0, 9, 1)) self.assertEqual(slice(None, 10).indices(10), (0, 10, 1)) self.assertEqual(slice(None, 11).indices(10), (0, 10, 1)) self.assertEqual(slice(None, 8, -1).indices(10), (9, 8, -1)) self.assertEqual(slice(None, 9, -1).indices(10), (9, 9, -1)) self.assertEqual(slice(None, 10, -1).indices(10), (9, 9, -1)) self.assertEqual( slice(-100, 100 ).indices(10), slice(None).indices(10) ) self.assertEqual( slice(100, -100, -1).indices(10), slice(None, None, -1).indices(10) ) self.assertEqual(slice(-100L, 100L, 2L).indices(10), (0, 10, 2)) self.assertEqual(range(10)[::sys.maxint - 1], [0]) self.assertRaises(OverflowError, slice(None).indices, 1L<<100) def test_setslice_without_getslice(self): tmp = [] class X(object): def __setslice__(self, i, j, k): tmp.append((i, j, k)) x = X() with test_support.check_py3k_warnings(): x[1:2] = 42 self.assertEqual(tmp, [(1, 2, 42)]) def test_pickle(self): s = slice(10, 20, 3) for protocol in (0,1,2): t = loads(dumps(s, protocol)) self.assertEqual(s, t) self.assertEqual(s.indices(15), t.indices(15)) self.assertNotEqual(id(s), id(t)) def test_main(): test_support.run_unittest(SliceTest) if __name__ == "__main__": test_main()
gpl-3.0
RCHG/blog_backup
_vendor/bundle/ruby/2.1.0/gems/pygments.rb-0.6.0/vendor/pygments-main/scripts/get_vimkw.py
38
1478
from __future__ import print_function import re r_line = re.compile(r"^(syn keyword vimCommand contained|syn keyword vimOption " r"contained|syn keyword vimAutoEvent contained)\s+(.*)") r_item = re.compile(r"(\w+)(?:\[(\w+)\])?") def getkw(input, output): out = file(output, 'w') output_info = {'command': [], 'option': [], 'auto': []} for line in file(input): m = r_line.match(line) if m: # Decide which output gets mapped to d if 'vimCommand' in m.group(1): d = output_info['command'] elif 'AutoEvent' in m.group(1): d = output_info['auto'] else: d = output_info['option'] # Extract all the shortened versions for i in r_item.finditer(m.group(2)): d.append('(%r,%r)' % (i.group(1), "%s%s" % (i.group(1), i.group(2) or ''))) output_info['option'].append("('nnoremap','nnoremap')") output_info['option'].append("('inoremap','inoremap')") output_info['option'].append("('vnoremap','vnoremap')") for a, b in output_info.items(): b.sort() print('%s=[%s]' % (a, ','.join(b)), file=out) def is_keyword(w, keywords): for i in range(len(w), 0, -1): if w[:i] in keywords: return keywords[w[:i]][:len(w)] == w return False if __name__ == "__main__": getkw("/usr/share/vim/vim73/syntax/vim.vim", "temp.py")
mit
cernops/nova
nova/db/sqlalchemy/migrate_repo/versions/273_sqlite_foreign_keys.py
79
4690
# Copyright 2014 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate import ForeignKeyConstraint, UniqueConstraint from oslo_db.sqlalchemy import utils from sqlalchemy import MetaData, schema, Table FKEYS = [ ('fixed_ips', 'instance_uuid', 'instances', 'uuid', 'fixed_ips_instance_uuid_fkey'), ('block_device_mapping', 'instance_uuid', 'instances', 'uuid', 'block_device_mapping_instance_uuid_fkey'), ('instance_info_caches', 'instance_uuid', 'instances', 'uuid', 'instance_info_caches_instance_uuid_fkey'), ('instance_metadata', 'instance_uuid', 'instances', 'uuid', 'instance_metadata_instance_uuid_fkey'), ('instance_system_metadata', 'instance_uuid', 'instances', 'uuid', 'instance_system_metadata_ibfk_1'), ('instance_type_projects', 'instance_type_id', 'instance_types', 'id', 'instance_type_projects_ibfk_1'), ('iscsi_targets', 'volume_id', 'volumes', 'id', 'iscsi_targets_volume_id_fkey'), ('reservations', 'usage_id', 'quota_usages', 'id', 'reservations_ibfk_1'), ('security_group_instance_association', 'instance_uuid', 'instances', 'uuid', 'security_group_instance_association_instance_uuid_fkey'), ('security_group_instance_association', 'security_group_id', 'security_groups', 'id', 'security_group_instance_association_ibfk_1'), ('virtual_interfaces', 'instance_uuid', 'instances', 'uuid', 'virtual_interfaces_instance_uuid_fkey'), ('compute_nodes', 'service_id', 'services', 'id', 'fk_compute_nodes_service_id'), ('instance_actions', 'instance_uuid', 'instances', 'uuid', 'fk_instance_actions_instance_uuid'), ('instance_faults', 'instance_uuid', 'instances', 'uuid', 'fk_instance_faults_instance_uuid'), ('migrations', 'instance_uuid', 'instances', 'uuid', 'fk_migrations_instance_uuid'), ] UNIQUES = [ ('compute_nodes', 'uniq_compute_nodes0host0hypervisor_hostname', ['host', 'hypervisor_hostname']), ('fixed_ips', 'uniq_fixed_ips0address0deleted', ['address', 'deleted']), ('instance_info_caches', 'uniq_instance_info_caches0instance_uuid', ['instance_uuid']), ('instance_type_projects', 'uniq_instance_type_projects0instance_type_id0project_id0deleted', ['instance_type_id', 'project_id', 'deleted']), ('pci_devices', 'uniq_pci_devices0compute_node_id0address0deleted', ['compute_node_id', 'address', 'deleted']), ('virtual_interfaces', 'uniq_virtual_interfaces0address0deleted', ['address', 'deleted']), ] def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine if migrate_engine.name == 'sqlite': # SQLite is also missing this one index if not utils.index_exists(migrate_engine, 'fixed_ips', 'address'): utils.add_index(migrate_engine, 'fixed_ips', 'address', ['address']) for src_table, src_column, dst_table, dst_column, name in FKEYS: src_table = Table(src_table, meta, autoload=True) if name in set(fk.name for fk in src_table.foreign_keys): continue src_column = src_table.c[src_column] dst_table = Table(dst_table, meta, autoload=True) dst_column = dst_table.c[dst_column] fkey = ForeignKeyConstraint(columns=[src_column], refcolumns=[dst_column], name=name) fkey.create() # SQLAlchemy versions < 1.0.0 don't reflect unique constraints # for SQLite correctly causing sqlalchemy-migrate to recreate # some tables with missing unique constraints. Re-add some # potentially missing unique constraints as a workaround. for table_name, name, column_names in UNIQUES: table = Table(table_name, meta, autoload=True) if name in set(c.name for c in table.constraints if isinstance(table, schema.UniqueConstraint)): continue uc = UniqueConstraint(*column_names, table=table, name=name) uc.create()
apache-2.0
engdan77/edoAutoHomeMobile
twisted/runner/procmontap.py
65
2298
# -*- test-case-name: twisted.runner.test.test_procmontap -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Support for creating a service which runs a process monitor. """ from twisted.python import usage from twisted.runner.procmon import ProcessMonitor class Options(usage.Options): """ Define the options accepted by the I{twistd procmon} plugin. """ synopsis = "[procmon options] commandline" optParameters = [["threshold", "t", 1, "How long a process has to live " "before the death is considered instant, in seconds.", float], ["killtime", "k", 5, "How long a process being killed " "has to get its affairs in order before it gets killed " "with an unmaskable signal.", float], ["minrestartdelay", "m", 1, "The minimum time (in " "seconds) to wait before attempting to restart a " "process", float], ["maxrestartdelay", "M", 3600, "The maximum time (in " "seconds) to wait before attempting to restart a " "process", float]] optFlags = [] longdesc = """\ procmon runs processes, monitors their progress, and restarts them when they die. procmon will not attempt to restart a process that appears to die instantly; with each "instant" death (less than 1 second, by default), it will delay approximately twice as long before restarting it. A successful run will reset the counter. Eg twistd procmon sleep 10""" def parseArgs(self, *args): """ Grab the command line that is going to be started and monitored """ self['args'] = args def postOptions(self): """ Check for dependencies. """ if len(self["args"]) < 1: raise usage.UsageError("Please specify a process commandline") def makeService(config): s = ProcessMonitor() s.threshold = config["threshold"] s.killTime = config["killtime"] s.minRestartDelay = config["minrestartdelay"] s.maxRestartDelay = config["maxrestartdelay"] s.addProcess(" ".join(config["args"]), config["args"]) return s
mit
mandeepdhami/neutron
neutron/db/migration/alembic_migrations/firewall_init_ops.py
61
3536
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for firewall service plugin from alembic import op import sqlalchemy as sa action_types = sa.Enum('allow', 'deny', name='firewallrules_action') def upgrade(): op.create_table( 'firewall_policies', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('audited', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table( 'firewalls', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('admin_state_up', sa.Boolean(), nullable=True), sa.Column('status', sa.String(length=16), nullable=True), sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['firewall_policy_id'], ['firewall_policies.id'], name='firewalls_ibfk_1'), sa.PrimaryKeyConstraint('id')) op.create_table( 'firewall_rules', sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('firewall_policy_id', sa.String(length=36), nullable=True), sa.Column('shared', sa.Boolean(), nullable=True), sa.Column('protocol', sa.String(length=40), nullable=True), sa.Column('ip_version', sa.Integer(), nullable=False), sa.Column('source_ip_address', sa.String(length=46), nullable=True), sa.Column('destination_ip_address', sa.String(length=46), nullable=True), sa.Column('source_port_range_min', sa.Integer(), nullable=True), sa.Column('source_port_range_max', sa.Integer(), nullable=True), sa.Column('destination_port_range_min', sa.Integer(), nullable=True), sa.Column('destination_port_range_max', sa.Integer(), nullable=True), sa.Column('action', action_types, nullable=True), sa.Column('enabled', sa.Boolean(), nullable=True), sa.Column('position', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['firewall_policy_id'], ['firewall_policies.id'], name='firewall_rules_ibfk_1'), sa.PrimaryKeyConstraint('id'))
apache-2.0
0x7678/youtube-dl
youtube_dl/extractor/slideshare.py
16
2025
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_urlparse, ) from ..utils import ( ExtractorError, ) class SlideshareIE(InfoExtractor): _VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)' _TEST = { 'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity', 'info_dict': { 'id': '25665706', 'ext': 'mp4', 'title': 'Managing Scale and Complexity', 'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.', }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) page_title = mobj.group('title') webpage = self._download_webpage(url, page_title) slideshare_obj = self._search_regex( r'var slideshare_object = ({.*?}); var user_info =', webpage, 'slideshare object') info = json.loads(slideshare_obj) if info['slideshow']['type'] != 'video': raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True) doc = info['doc'] bucket = info['jsplayer']['video_bucket'] ext = info['jsplayer']['video_extension'] video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext) description = self._html_search_regex( r'<p\s+(?:style="[^"]*"\s+)?class=".*?description.*?"[^>]*>(.*?)</p>', webpage, 'description', fatal=False) return { '_type': 'video', 'id': info['slideshow']['id'], 'title': info['slideshow']['title'], 'ext': ext, 'url': video_url, 'thumbnail': info['slideshow']['pin_image_url'], 'description': description, }
unlicense
foreni-packages/golismero
misc/old_tests/plugin_tests/ui/test.py
8
4271
#!/usr/bin/env python # -*- coding: utf-8 -*- __license__ = """ GoLismero 2.0 - The web knife - Copyright (C) 2011-2014 Golismero project site: https://github.com/golismero Golismero project mail: [email protected] This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ from golismero.api.audit import get_audit_count from golismero.api.config import Config from golismero.api.data import Data from golismero.api.data.db import Database from golismero.api.plugin import UIPlugin from golismero.main.console import colorize from golismero.messaging.codes import MessageType, MessageCode, MessagePriority from golismero.messaging.message import Message import time import warnings #------------------------------------------------------------------------------ class TestUIPlugin(UIPlugin): """ Test UI plugin. """ #-------------------------------------------------------------------------- def run(self, info): if not isinstance(info, Data): raise TypeError("Expected Data, got %r instead" % type(info)) print "-" * 79 print "ID: %s" % info.identity print "Data: %r" % info history = Database.get_plugin_history(info.identity) if history: print "History:" for plugin_id in history: print " " + plugin_id print #-------------------------------------------------------------------------- def recv_msg(self, message): if not isinstance(message, Message): raise TypeError("Expected Message, got %r instead" % type(message)) print "-" * 79 print "Message:" print " Timestamp: %s" % time.ctime(message.timestamp) print " Audit: %s" % message.audit_name print " Plugin: %s" % message.plugin_id print " Type: %s" % MessageType.get_name_from_value(message.message_type) print " Code: %s" % MessageCode.get_name_from_value_and_type(message.message_code, message.message_type) print " Priority: %s" % MessagePriority.get_name_from_value(message.priority) print " Payload: %r" % (message.message_info,) print if message.message_type == MessageType.MSG_TYPE_CONTROL: if message.message_code == MessageCode.MSG_CONTROL_STOP_AUDIT: if get_audit_count() == 1: Config._context.send_msg( message_type = MessageType.MSG_TYPE_CONTROL, message_code = MessageCode.MSG_CONTROL_STOP, message_info = True, priority = MessagePriority.MSG_PRIORITY_LOW ) elif message.message_code == MessageCode.MSG_CONTROL_LOG: (text, level, is_error) = message.message_info if is_error: print colorize(text, "magenta") else: print colorize(text, "cyan") elif message.message_code == MessageCode.MSG_CONTROL_ERROR: (description, traceback) = message.message_info print colorize(description, "magenta") print colorize(traceback, "magenta") elif message.message_code == MessageCode.MSG_CONTROL_WARNING: for w in message.message_info: formatted = warnings.formatwarning(w.message, w.category, w.filename, w.lineno, w.line) print colorize(formatted, "yellow") #-------------------------------------------------------------------------- def get_accepted_types(self): pass
gpl-2.0
micropython/micropython
examples/asmled.py
15
1679
# flash LED #1 using inline assembler # this version is overly verbose and uses word stores @micropython.asm_thumb def flash_led(r0): movw(r1, (stm.GPIOA + stm.GPIO_BSRRL) & 0xFFFF) movt(r1, ((stm.GPIOA + stm.GPIO_BSRRL) >> 16) & 0x7FFF) movw(r2, 1 << 13) movt(r2, 0) movw(r3, 0) movt(r3, 1 << 13) b(loop_entry) label(loop1) # turn LED on str(r2, [r1, 0]) # delay for a bit movw(r4, 5599900 & 0xFFFF) movt(r4, (5599900 >> 16) & 0xFFFF) label(delay_on) sub(r4, r4, 1) cmp(r4, 0) bgt(delay_on) # turn LED off str(r3, [r1, 0]) # delay for a bit movw(r4, 5599900 & 0xFFFF) movt(r4, (5599900 >> 16) & 0xFFFF) label(delay_off) sub(r4, r4, 1) cmp(r4, 0) bgt(delay_off) # loop r0 times sub(r0, r0, 1) label(loop_entry) cmp(r0, 0) bgt(loop1) # flash LED #2 using inline assembler # this version uses half-word sortes, and the convenience assembler operation 'movwt' @micropython.asm_thumb def flash_led_v2(r0): # get the GPIOA address in r1 movwt(r1, stm.GPIOA) # get the bit mask for PA14 (the pin LED #2 is on) movw(r2, 1 << 14) b(loop_entry) label(loop1) # turn LED on strh(r2, [r1, stm.GPIO_BSRRL]) # delay for a bit movwt(r4, 5599900) label(delay_on) sub(r4, r4, 1) cmp(r4, 0) bgt(delay_on) # turn LED off strh(r2, [r1, stm.GPIO_BSRRH]) # delay for a bit movwt(r4, 5599900) label(delay_off) sub(r4, r4, 1) cmp(r4, 0) bgt(delay_off) # loop r0 times sub(r0, r0, 1) label(loop_entry) cmp(r0, 0) bgt(loop1) flash_led(5) flash_led_v2(5)
mit
saadqc/Sub-Merge
VideoPlayer.py
1
7603
#! /usr/bin/python # # Qt example for VLC Python bindings # Copyright (C) 2009-2010 the VideoLAN team # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. # import sys import vlc from PyQt4 import QtGui, QtCore from Messenger import Messenger class Player(QtGui.QMainWindow): """A simple Media Player using VLC and Qt """ def __init__(self, master=None): QtGui.QMainWindow.__init__(self, master) self.setWindowTitle("Media Player") # creating a basic vlc instance self.instance = vlc.Instance() # creating an empty vlc media player self.mediaplayer = self.instance.media_player_new() self.createUI() self.isPaused = False def createUI(self): """Set up the user interface, signals & slots """ self.widget = QtGui.QWidget(self) self.setCentralWidget(self.widget) # In this widget, the video will be drawn if sys.platform == "darwin": # for MacOS self.videoframe = QtGui.QMacCocoaViewContainer(0) else: self.videoframe = QtGui.QFrame() self.palette = self.videoframe.palette() self.palette.setColor (QtGui.QPalette.Window, QtGui.QColor(0,0,0)) self.videoframe.setPalette(self.palette) self.videoframe.setAutoFillBackground(True) self.positionslider = QtGui.QSlider(QtCore.Qt.Horizontal, self) self.positionslider.setToolTip("Position") self.positionslider.setMaximum(1000) self.connect(self.positionslider, QtCore.SIGNAL("sliderMoved(int)"), self.setPosition) self.hbuttonbox = QtGui.QHBoxLayout() self.playbutton = QtGui.QPushButton("Play") self.hbuttonbox.addWidget(self.playbutton) self.connect(self.playbutton, QtCore.SIGNAL("clicked()"), self.PlayPause) self.stopbutton = QtGui.QPushButton("Stop") self.hbuttonbox.addWidget(self.stopbutton) self.connect(self.stopbutton, QtCore.SIGNAL("clicked()"), self.Stop) self.hbuttonbox.addStretch(1) self.volumeslider = QtGui.QSlider(QtCore.Qt.Horizontal, self) self.volumeslider.setMaximum(100) self.volumeslider.setValue(self.mediaplayer.audio_get_volume()) self.volumeslider.setToolTip("Volume") self.hbuttonbox.addWidget(self.volumeslider) self.connect(self.volumeslider, QtCore.SIGNAL("valueChanged(int)"), self.setVolume) self.vboxlayout = QtGui.QVBoxLayout() self.vboxlayout.addWidget(self.videoframe) self.vboxlayout.addWidget(self.positionslider) self.vboxlayout.addLayout(self.hbuttonbox) self.widget.setLayout(self.vboxlayout) self.timer = QtCore.QTimer(self) self.timer.setInterval(200) self.connect(self.timer, QtCore.SIGNAL("timeout()"), self.updateUI) def PlayPause(self): """Toggle play/pause status """ if self.mediaplayer.is_playing(): self.mediaplayer.pause() self.playbutton.setText("Play") self.isPaused = True else: if self.mediaplayer.play() == -1: self.OpenFile() return self.mediaplayer.play() self.playbutton.setText("Pause") self.timer.start() self.isPaused = False def isPaused(self): """ :return: """ return not self.mediaplayer.is_playing() def closeEvent(self, QCloseEvent): """ Stop media player on window close event :param QCloseEvent: :return: """ self.mediaplayer.stop() self.mediaplayer.release() Messenger.main_window.player = None def Stop(self): """Stop player """ self.mediaplayer.stop() self.playbutton.setText("Play") def OpenFile(self, filename=None): """Open a media file in a MediaPlayer """ # create the media if sys.version < '3': filename = unicode(filename) self.media = self.instance.media_new(filename) # put the media in the media player self.mediaplayer.set_media(self.media) # parse the metadata of the file self.media.parse() # set the title of the track as window title self.setWindowTitle(self.media.get_meta(0)) # the media player has to be 'connected' to the QFrame # (otherwise a video would be displayed in it's own window) # this is platform specific! # you have to give the id of the QFrame (or similar object) to # vlc, different platforms have different functions for this if sys.platform.startswith('linux'): # for Linux using the X Server self.mediaplayer.set_xwindow(self.videoframe.winId()) elif sys.platform == "win32": # for Windows self.mediaplayer.set_hwnd(self.videoframe.winId()) elif sys.platform == "darwin": # for MacOS self.mediaplayer.set_nsobject(self.videoframe.winId()) self.PlayPause() def setVolume(self, Volume): """Set the volume """ self.mediaplayer.audio_set_volume(Volume) def setPosition(self, position): """Set the position """ # setting the position to where the slider was dragged self.mediaplayer.set_position(position / 1000.0) # the vlc MediaPlayer needs a float value between 0 and 1, Qt # uses integer variables, so you need a factor; the higher the # factor, the more precise are the results # (1000 should be enough) def setTime(self, position): """ Set position of video in ms :param position: :return: """ self.mediaplayer.set_time(position) def getPosition(self): """ Get Video Position """ return self.mediaplayer.get_time() def getLength(self): """ Get video length :return: """ return self.mediaplayer.get_length() def getVideoSize(self): """ Return video size :return: """ video_size = self.mediaplayer.video_get_size() return video_size def updateUI(self): """updates the user interface""" # setting the slider to the desired position self.positionslider.setValue(self.mediaplayer.get_position() * 1000) if not self.mediaplayer.is_playing(): # no need to call this function if nothing is played self.timer.stop() if not self.isPaused: # after the video finished, the play button stills shows # "Pause", not the desired behavior of a media player # this will fix it self.Stop()
gpl-3.0
bluedynamics/node.ext.python
src/node/ext/python/parser.py
1
23088
import os import _ast import ast import types import copy import exceptions from odict import odict from zope.component import provideHandler from node.ext.directory.interfaces import IFileAddedEvent from utils import get_dotted_name_from_astnode from node.ext.python.interfaces import ( CODESECTION_STARTTOKEN, CODESECTION_ENDTOKEN, Call, IModule, IFunction, IDocstring, IImport, IAttribute, IDecorator, ) from node.ext.python.nodes import ( Module, Docstring, ProtectedSection, Import, Attribute, Decorator, Function, Class, Block, ) CODESECTION_STARTTOKEN = '##code-section ' CODESECTION_ENDTOKEN = '##/code-section ' POSITION_INSERT = 0 POSITION_AFTER = 1 POSITION_BEFORE = -1 class BaseParser(object): def __init__(self, model): self.model = model def __call__(self): raise NotImplemented(u'BaseParser does not implement ``__call__``') def _createastchild(self, astnode): if hasattr(astnode, 'lineno'): if astnode.lineno - 1 in self.model.readlines: return if isinstance(astnode, _ast.Import) \ or isinstance(astnode, _ast.ImportFrom): import_ = Import(None, [], astnode, self.model.buffer) import_.readlines = self.model.readlines self.model[str(import_.uuid)] = import_ elif isinstance(astnode, _ast.FunctionDef): function = Function(None, astnode, self.model.buffer) function.readlines = self.model.readlines self.model[str(function.uuid)] = function for childastnode in astnode.body: function.parser._createastchild(childastnode) function.initdecorators() elif isinstance(astnode, _ast.ClassDef): class_ = Class(None, astnode, self.model.buffer) class_.readlines = self.model.readlines self.model[str(class_.uuid)] = class_ for childastnode in astnode.body: class_.parser._createastchild(childastnode) class_.initdecorators() elif isinstance(astnode, _ast.Expr) \ and isinstance(astnode.value, _ast.Str): docstring = Docstring(None, astnode, self.model.buffer) docstring.readlines = self.model.readlines self.model[str(docstring.uuid)] = docstring elif isinstance(astnode, _ast.Assign): if not IFunction.providedBy(self.model): attribute = Attribute([], None, astnode, self.model.buffer) attribute.readlines = self.model.readlines self.model[str(attribute.uuid)] = attribute def _marklines(self, *args): for arg in args: if not arg in self.model.readlines: self.model.readlines.append(arg) def _findbodyend(self, node): if not hasattr(node, '_fields'): return for fieldname in node._fields: fields = getattr(node, fieldname) if type(fields) is not types.ListType: fields = [fields] for field in fields: if hasattr(field, 'lineno'): if field.lineno > self.model.bufend: self.model.bufend = field.lineno self._findbodyend(field) def _checkbodyendsmultilined(self): pointer = self.model.bufend buflen = len(self.model.buffer) source = '' while True: if buflen - 1 <= pointer: break line = self.model.buffer[pointer].strip() source = '%s\n%s' % (source, line) try: compile(source, '<string>', 'exec') break except SyntaxError, e: pointer += 1 self.model.bufend = pointer def _checkbodyendsprotected(self): pointer = self.model.bufend if pointer < len(self.model.buffer) - 1: next = self.model.buffer[pointer].strip() if next.startswith(CODESECTION_ENDTOKEN): self.model.bufend += 1 def _findnodeposition(self, startlineno, endlineno, indent): values = [v for v in self.model.values() \ if not IDecorator.providedBy(v)] if not values: return self.model, POSITION_INSERT last = None for child in values: # inrange case if child.startlineno <= startlineno \ and child.endlineno >= endlineno: return child.parser._findnodeposition(startlineno, endlineno, indent) # before case if endlineno < child.startlineno: return child, POSITION_BEFORE last = child # after case - indent check if last.indent == indent: return last, POSITION_AFTER return self.model, POSITION_AFTER def _findindent(self, lines): indent = None for line in lines: if not line.strip(): continue curindent = 0 for char in line: if char != u' ': break curindent += 1 if indent is None or curindent < indent: indent = curindent if indent is None: return None return indent / 4 # XXX improve def _cutline(self, line): return line[self.model.indent * 4:] # XXX improve def _resolvearg(self, arg): if isinstance(arg, _ast.Str): return repr(arg.s) elif isinstance(arg, _ast.Num): return arg.n elif isinstance(arg, _ast.Name): return arg.id elif isinstance(arg, _ast.Call): args = list() for a in arg.args: args.append(self._resolvearg(a)) kwargs = odict() for keyword in arg.keywords: kwargs[keyword.arg] = self._resolvearg(keyword.value) try: return Call(name=arg.func.id, args=args, kwargs=kwargs) except AttributeError: return Call(name=arg.func.attr, args=args, kwargs=kwargs) elif isinstance(arg, _ast.Tuple) or isinstance(arg, _ast.List): ret = list() for a in arg.elts: ret.append(self._resolvearg(a)) if isinstance(arg, _ast.Tuple): ret = tuple(ret) return ret elif isinstance(arg, _ast.Dict): ret = dict() pointer = 0 for key in arg.keys: key = self._resolvearg(key) ret[key] = self._resolvearg(arg.values[pointer]) pointer += 1 return ret def parsedecorators(self, astnode): for dec in astnode.decorator_list: decorator = Decorator(None, dec) decorator.buffer = self.model.buffer decorator.readlines = self.model.readlines self.model._decorators.append(decorator) def parse_module_handler(obj, event): """Called, if ``Module`` is created and added to ``Directory`` node. """ obj.parser() provideHandler(parse_module_handler, [IModule, IFileAddedEvent]) class ModuleParser(BaseParser): def __call__(self): path = self.model.filepath self.model._buffer = list() if not os.path.exists(path): return if self.model._do_parse: self._parse() def _parse(self): file = open(self.model.filepath, 'r') cont = file.read() # Leading and trailing blank lines cause problems in the builtin # "compile" function, so we strip them. In order to provide correct # line numbers we store the offset - we use in case of an Exception... before = len(cont.split(os.linesep)) cont = cont.lstrip() after = len(cont.split(os.linesep)) cont = cont.rstrip() self.model._buffer = cont.split(os.linesep) offset = before - after file.close() self.model.readlines = list() self._extractencoding() self.model.bufstart = 0 self.model.bufend = len(self.model._buffer) self.model.bufoffset = offset try: self.model.astnode = ast.parse( os.linesep.join(self.model.buffer).strip(), self.model.filepath) except SyntaxError, e: # Since the python source files are being stripped we have to # add an offset to the line number we get thrown from compile() ex = exceptions.SyntaxError((e[0], \ (e[1][0], e[1][1] + offset, e[1][2], e[1][3]))) # <- don't read that raise ex except TypeError, e: # We don't have to modify TypeErrors since they don't contain # line numbers. raise e children = self._protectedsections() for node in children: self._marklines(*range(node.bufstart, node.bufend)) # for i in xrange(len(self.model.astnode.body)): # astnode = self.model.astnode.body for astnode in self.model.astnode.body: self._createastchild(astnode) self._markastrelated(self.model) children += self._parsecodeblocks() self._hookchildren(children) def _extractencoding(self): if len(self.model.buffer) == 0: return line = self.model.buffer[0].strip() if line.startswith(u'# -*- coding:') \ and line.endswith(u'-*-'): encoding = line[14:len(line) - 3].strip() self.model.encoding = unicode(encoding) self.model.readlines.append(0) def _markastrelated(self, node): for child in node.values(): if IDocstring.providedBy(child) \ or IImport.providedBy(child) \ or IAttribute.providedBy(child) \ or IDecorator.providedBy(child): self._marklines(*range(child.bufstart, child.bufend)) else: self._marklines(*range(child.bufstart, child.defendlineno)) self._markastrelated(child) def _protectedsections(self): i = 0 currentnode = None in_protected_section = False allnodes = list() for line in self.model.buffer: line_strip = line.strip() if line_strip.startswith('#'): if line_strip.startswith(CODESECTION_STARTTOKEN): if in_protected_section: print "WARNING: Nested protected sections" continue # Protected section is starting here in_protected_section = True name = line_strip[len(CODESECTION_STARTTOKEN):] node = ProtectedSection(name, self.model.buffer) node.sectionname = name node.readlines = self.model.readlines node.bufstart = i currentnode = node elif line_strip.startswith(CODESECTION_ENDTOKEN): if not in_protected_section: raise RuntimeError, \ "ERROR: Protected section closed without open" if line_strip != CODESECTION_ENDTOKEN + \ currentnode.__name__: # Protected section is continuing here currentnode.lines.append(line) continue # Protected section is ending here currentnode.bufend = i + 1 allnodes.append(currentnode) in_protected_section = False currentnode = None i += 1 if in_protected_section: raise RuntimeError, \ "ERROR: Protected section did not close" return allnodes def _parsecodeblocks(self): blocks = list() start = end = 0 curline = 0 for line in self.model.buffer: if curline in self.model.readlines: if start != end: blocks += self._createcodeblocks(start, end) start = end + 1 else: start = curline + 1 curline += 1 end = curline blocks += self._createcodeblocks(start, end) return blocks def _createcodeblocks(self, start, end): lines = self.model.buffer[start:end] if not ''.join(lines).strip(): return [] previndent = None pointer = 0 ret = [] for line in lines: pointer += 1 if not line.strip() or line.strip().startswith('#'): continue if previndent is None: previndent = self._findindent([self.model.buffer[start]]) curindent = self._findindent([line]) if curindent >= previndent: continue elif curindent < previndent: block = Block(None, self.model.buffer) block.readlines = self.model.readlines block.bufstart = start block.bufend = start + pointer - 1 ret.append(block) start = start + pointer - 1 previndent = curindent block = Block(None, self.model.buffer) block.readlines = self.model.readlines block.bufstart = start block.bufend = end ret.append(block) return ret def _hookchildren(self, children): for child in children: if not child.__name__: child.__name__ = str(child.uuid) child.__parent__ = self.model node, position = self._findnodeposition(child.startlineno, child.endlineno, child.indent) child.__parent__ = None if position == POSITION_INSERT: node[child.__name__] = child elif position == POSITION_BEFORE: node.__parent__.insertbefore(child, node) elif position == POSITION_AFTER: try: node.__parent__.insertafter(child, node) except AttributeError: #XXX: handle this problem correctly #Currently I added the message, so that the user #knows how to workaround if isinstance(child,Block): code='\n'.join(child.buffer) raise ValueError( 'This should not have happened, the parser has \n\ currently problems when a function or class ends\n \ with a comment\n \ So please check if your block has a comment at its end\n \ and remove it if necessary\n\ see the code: \n%s' % code) else: raise class ImportParser(BaseParser): def __call__(self): astnode = self.model.astnode if isinstance(astnode, _ast.ImportFrom): self.model.fromimport = unicode(astnode.module) for name in astnode.names: asname = name.asname is not None and unicode(name.asname) or None self.model.names.append([unicode(name.name), asname]) self.model._fromimport_orgin = copy.deepcopy(self.model.fromimport) self.model._names_orgin = copy.deepcopy(self.model.names) def _definitionends(self, bufno): if len(self.model.buffer) < bufno: return True if len(self.model.buffer) <= bufno + 1: return True line = self.model.buffer[bufno + 1].strip() for term in [u'from ', u'import ', u'if ', u'for ', u'while ', u'try ', u'with ', u'class ', u'def ', u'@', u'#', u'"""', u'\'\'\'']: if line.startswith(term): return True if line == u'' or line.find(u'=') != -1: return True return False class AttributeParser(BaseParser): def __call__(self): astnode = self.model.astnode for target in astnode.targets: if isinstance(target, _ast.Tuple): for name in target.elts: self.model.targets.append(name.id) elif isinstance(target, _ast.Subscript): self.model.targets.append(get_dotted_name_from_astnode(target.value)) else: try: self.model.targets.append(target.id) except AttributeError: self.model.targets.append(target.value.id) self.model._targets_orgin = copy.deepcopy(self.model.targets) self._findattributeend() self._extractvalue() self._parseastargs(astnode) self.model._args_orgin = copy.deepcopy(self.model.args) self.model._kwargs_orgin = copy.deepcopy(self.model.kwargs) def _findattributeend(self): pointer = self.model.bufstart buflen = len(self.model.buffer) source = '' while True: #if pointer + 1 == buflen: if pointer == buflen: break line = self.model.buffer[pointer].strip() source = '%s\n%s' % (source, line) try: compile(source, '<string>', 'exec') pointer += 1 break except SyntaxError, e: pointer += 1 self.model.bufend = pointer def _extractvalue(self): lines = self.model.buffer[self.model.bufstart:self.model.bufend] if not lines: lines.append(self.model.buffer[self.model.bufstart]) lines[0] = lines[0][lines[0].find('=') + 1:].strip() for i in range(1, len(lines)): lines[i] = self._cutline(lines[i]) self.model.value = '\n'.join(lines) self.model._value_orgin = '\n'.join(lines) def _parseastargs(self, astnode): if not hasattr(astnode.value, 'args'): return for arg in astnode.value.args: self.model.args.append(self._resolvearg(arg)) for keyword in astnode.value.keywords: self.model.kwargs[keyword.arg] = self._resolvearg(keyword.value) class DecoratorParser(BaseParser): def __call__(self): astnode = self.model.astnode if isinstance(astnode, _ast.Name) or isinstance(astnode, _ast.Attribute): #the case where the decorator has no parameters if not getattr(astnode, 'id', None): # XXX: added by phil because sometimes astnode.id is None astnode.id = get_dotted_name_from_astnode(astnode) self.model.decoratorname = astnode.id self.model._decoratorname_orgin = astnode.id return #the decorator has parameters self.model.is_callable=True if not getattr(astnode.func, 'id', None): astnode.func.id=get_dotted_name_from_astnode(astnode.func) self.model.decoratorname = astnode.func.id self.model._decoratorname_orgin = astnode.func.id self._parseastargs(astnode) self.model._args_orgin = copy.deepcopy(self.model.args) self.model._kwargs_orgin = copy.deepcopy(self.model.kwargs) def _parseastargs(self, astnode): for arg in astnode.args: self.model.args.append(self._resolvearg(arg)) for keyword in astnode.keywords: self.model.kwargs[keyword.arg] = self._resolvearg(keyword.value) def _definitionends(self, bufno): if len(self.model.buffer) <= bufno: return True line = self.model.buffer[bufno + 1].strip() for term in [u'class ', u'def ', u'@']: if line.startswith(term): return True return False class FunctionParser(BaseParser): def __call__(self): astnode = self.model.astnode self.model.functionname = astnode.name self._findbodyend(astnode) self._checkbodyendsmultilined() self._checkbodyendsprotected() self._parseastargs(astnode) self.model._args_orgin = copy.deepcopy(self.model.args) self.model._kwargs_orgin = copy.deepcopy(self.model.kwargs) self.parsedecorators(astnode) def _parseastargs(self, astnode): all = list() for arg in astnode.args.args: all.append(self._resolvearg(arg)) args = all[:len(all) - len(astnode.args.defaults)] kwargs = all[len(all) - len(astnode.args.defaults):] for arg in astnode.args.args: resolved = self._resolvearg(arg) if resolved in args: self.model.args.append(resolved) pointer = 0 for kwarg in astnode.args.defaults: self.model.kwargs[kwargs[pointer]] = self._resolvearg(kwarg) pointer += 1 if astnode.args.vararg: self.model.args.append('*%s' % astnode.args.vararg) if astnode.args.kwarg: self.model.kwargs['**%s' % astnode.args.kwarg] = None def _definitionends(self, bufno): if len(self.model.buffer) <= bufno: return True line = self.model.buffer[bufno].strip() if line.find(u'#') > 0: line = line[0:line.find(u'#')].strip() if line.endswith(u'\\') \ or line.endswith(u','): return False if line.endswith(u':'): return True return False class ClassParser(BaseParser): def __call__(self): astnode = self.model.astnode self.model.classname = astnode.name self._findbodyend(astnode) self._checkbodyendsmultilined() self._checkbodyendsprotected() def base_name(astnode): name = list() while True: if isinstance(astnode, _ast.Attribute): name.append(astnode.attr) astnode = astnode.value else: name.append(astnode.id) break name.reverse() return '.'.join(name) self.model.bases = [base_name(base) for base in astnode.bases] self.model._bases_orgin = copy.deepcopy(self.model.bases) self.parsedecorators(astnode) def _definitionends(self, bufno): if len(self.model.buffer) <= bufno: return True line = self.model.buffer[bufno].strip() if line.find(u'#') > 0: line = line[0:line.find(u'#')].strip() if line.endswith(u'\\') \ or line.endswith(u','): return False if line.endswith(u':'): return True return False
bsd-3-clause
kouaw/CouchPotatoServer
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndr.py
9
3029
# encoding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, qualities, ) class NDRIE(InfoExtractor): IE_NAME = 'ndr' IE_DESC = 'NDR.de - Mediathek' _VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html' _TESTS = [ { 'url': 'http://www.ndr.de/fernsehen/media/dienordreportage325.html', 'md5': '4a4eeafd17c3058b65f0c8f091355855', 'note': 'Video file', 'info_dict': { 'id': '325', 'ext': 'mp4', 'title': 'Blaue Bohnen aus Blocken', 'description': 'md5:190d71ba2ccddc805ed01547718963bc', 'duration': 1715, }, }, { 'url': 'http://www.ndr.de/info/audio51535.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'note': 'Audio file', 'info_dict': { 'id': '51535', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', 'duration': 884, } } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') page = self._download_webpage(url, video_id, 'Downloading page') title = self._og_search_title(page).strip() description = self._og_search_description(page) if description: description = description.strip() duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', fatal=False)) formats = [] mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page) if mp3_url: formats.append({ 'url': mp3_url.group('audio'), 'format_id': 'mp3', }) thumbnail = None video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page) if video_url: thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page) if thumbnails: quality_key = qualities(['xs', 's', 'm', 'l', 'xl']) largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1])) thumbnail = 'http://www.ndr.de' + largest[0] for format_id in 'lo', 'hi', 'hq': formats.append({ 'url': '%s.%s.mp4' % (video_url.group('video'), format_id), 'format_id': format_id, }) if not formats: raise ExtractorError('No media links available for %s' % video_id) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }
gpl-3.0
solashirai/edx-platform
common/djangoapps/student/tests/test_roles.py
147
7798
""" Tests of student.roles """ import ddt from django.test import TestCase from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory from student.tests.factories import AnonymousUserFactory from student.roles import ( GlobalStaff, CourseRole, CourseStaffRole, CourseInstructorRole, OrgStaffRole, OrgInstructorRole, RoleCache, CourseBetaTesterRole ) from opaque_keys.edx.locations import SlashSeparatedCourseKey class RolesTestCase(TestCase): """ Tests of student.roles """ def setUp(self): super(RolesTestCase, self).setUp() self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') self.course_loc = self.course_key.make_usage_key('course', '2012_Fall') self.anonymous_user = AnonymousUserFactory() self.student = UserFactory() self.global_staff = UserFactory(is_staff=True) self.course_staff = StaffFactory(course_key=self.course_key) self.course_instructor = InstructorFactory(course_key=self.course_key) def test_global_staff(self): self.assertFalse(GlobalStaff().has_user(self.student)) self.assertFalse(GlobalStaff().has_user(self.course_staff)) self.assertFalse(GlobalStaff().has_user(self.course_instructor)) self.assertTrue(GlobalStaff().has_user(self.global_staff)) def test_group_name_case_sensitive(self): uppercase_course_id = "ORG/COURSE/NAME" lowercase_course_id = uppercase_course_id.lower() uppercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(uppercase_course_id) lowercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(lowercase_course_id) role = "role" lowercase_user = UserFactory() CourseRole(role, lowercase_course_key).add_users(lowercase_user) uppercase_user = UserFactory() CourseRole(role, uppercase_course_key).add_users(uppercase_user) self.assertTrue(CourseRole(role, lowercase_course_key).has_user(lowercase_user)) self.assertFalse(CourseRole(role, uppercase_course_key).has_user(lowercase_user)) self.assertFalse(CourseRole(role, lowercase_course_key).has_user(uppercase_user)) self.assertTrue(CourseRole(role, uppercase_course_key).has_user(uppercase_user)) def test_course_role(self): """ Test that giving a user a course role enables access appropriately """ self.assertFalse( CourseStaffRole(self.course_key).has_user(self.student), "Student has premature access to {}".format(self.course_key) ) CourseStaffRole(self.course_key).add_users(self.student) self.assertTrue( CourseStaffRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # remove access and confirm CourseStaffRole(self.course_key).remove_users(self.student) self.assertFalse( CourseStaffRole(self.course_key).has_user(self.student), "Student still has access to {}".format(self.course_key) ) def test_org_role(self): """ Test that giving a user an org role enables access appropriately """ self.assertFalse( OrgStaffRole(self.course_key.org).has_user(self.student), "Student has premature access to {}".format(self.course_key.org) ) OrgStaffRole(self.course_key.org).add_users(self.student) self.assertTrue( OrgStaffRole(self.course_key.org).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key.org)) ) # remove access and confirm OrgStaffRole(self.course_key.org).remove_users(self.student) if hasattr(self.student, '_roles'): del self.student._roles self.assertFalse( OrgStaffRole(self.course_key.org).has_user(self.student), "Student still has access to {}".format(self.course_key.org) ) def test_org_and_course_roles(self): """ Test that Org and course roles don't interfere with course roles or vice versa """ OrgInstructorRole(self.course_key.org).add_users(self.student) CourseInstructorRole(self.course_key).add_users(self.student) self.assertTrue( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key.org)) ) self.assertTrue( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # remove access and confirm OrgInstructorRole(self.course_key.org).remove_users(self.student) self.assertFalse( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student still has access to {}".format(self.course_key.org) ) self.assertTrue( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # ok now keep org role and get rid of course one OrgInstructorRole(self.course_key.org).add_users(self.student) CourseInstructorRole(self.course_key).remove_users(self.student) self.assertTrue( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student lost has access to {}".format(self.course_key.org) ) self.assertFalse( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) def test_get_user_for_role(self): """ test users_for_role """ role = CourseStaffRole(self.course_key) role.add_users(self.student) self.assertGreater(len(role.users_with_role()), 0) def test_add_users_doesnt_add_duplicate_entry(self): """ Tests that calling add_users multiple times before a single call to remove_users does not result in the user remaining in the group. """ role = CourseStaffRole(self.course_key) role.add_users(self.student) self.assertTrue(role.has_user(self.student)) # Call add_users a second time, then remove just once. role.add_users(self.student) role.remove_users(self.student) self.assertFalse(role.has_user(self.student)) @ddt.ddt class RoleCacheTestCase(TestCase): IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') NOT_IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2013_Fall') ROLES = ( (CourseStaffRole(IN_KEY), ('staff', IN_KEY, 'edX')), (CourseInstructorRole(IN_KEY), ('instructor', IN_KEY, 'edX')), (OrgStaffRole(IN_KEY.org), ('staff', None, 'edX')), (OrgInstructorRole(IN_KEY.org), ('instructor', None, 'edX')), (CourseBetaTesterRole(IN_KEY), ('beta_testers', IN_KEY, 'edX')), ) def setUp(self): super(RoleCacheTestCase, self).setUp() self.user = UserFactory() @ddt.data(*ROLES) @ddt.unpack def test_only_in_role(self, role, target): role.add_users(self.user) cache = RoleCache(self.user) self.assertTrue(cache.has_role(*target)) for other_role, other_target in self.ROLES: if other_role == role: continue self.assertFalse(cache.has_role(*other_target)) @ddt.data(*ROLES) @ddt.unpack def test_empty_cache(self, role, target): cache = RoleCache(self.user) self.assertFalse(cache.has_role(*target))
agpl-3.0
tadeo/xhtml2pdf
xhtml2pdf/w3c/cssParser.py
53
39118
#!/usr/bin/env python ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##~ Copyright (C) 2002-2004 TechGame Networks, LLC. ##~ ##~ This library is free software; you can redistribute it and/or ##~ modify it under the terms of the BSD style License as found in the ##~ LICENSE file included with this distribution. ## ## Modified by Dirk Holtwick <[email protected]>, 2007-2008 ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """CSS-2.1 parser. The CSS 2.1 Specification this parser was derived from can be found at http://www.w3.org/TR/CSS21/ Primary Classes: * CSSParser Parses CSS source forms into results using a Builder Pattern. Must provide concrete implemenation of CSSBuilderAbstract. * CSSBuilderAbstract Outlines the interface between CSSParser and it's rule-builder. Compose CSSParser with a concrete implementation of the builder to get usable results from the CSS parser. Dependencies: python 2.3 (or greater) re """ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ Imports #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ import re import cssSpecial #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ Definitions #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def isAtRuleIdent(src, ident): return re.match(r'^@' + ident + r'\s*', src) def stripAtRuleIdent(src): return re.sub(r'^@[a-z\-]+\s*', '', src) class CSSSelectorAbstract(object): """Outlines the interface between CSSParser and it's rule-builder for selectors. CSSBuilderAbstract.selector and CSSBuilderAbstract.combineSelectors must return concrete implementations of this abstract. See css.CSSMutableSelector for an example implementation. """ def addHashId(self, hashId): raise NotImplementedError('Subclass responsibility') def addClass(self, class_): raise NotImplementedError('Subclass responsibility') def addAttribute(self, attrName): raise NotImplementedError('Subclass responsibility') def addAttributeOperation(self, attrName, op, attrValue): raise NotImplementedError('Subclass responsibility') def addPseudo(self, name): raise NotImplementedError('Subclass responsibility') def addPseudoFunction(self, name, value): raise NotImplementedError('Subclass responsibility') class CSSBuilderAbstract(object): """Outlines the interface between CSSParser and it's rule-builder. Compose CSSParser with a concrete implementation of the builder to get usable results from the CSS parser. See css.CSSBuilder for an example implementation """ def setCharset(self, charset): raise NotImplementedError('Subclass responsibility') #~ css results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def beginStylesheet(self): raise NotImplementedError('Subclass responsibility') def stylesheet(self, elements): raise NotImplementedError('Subclass responsibility') def endStylesheet(self): raise NotImplementedError('Subclass responsibility') def beginInline(self): raise NotImplementedError('Subclass responsibility') def inline(self, declarations): raise NotImplementedError('Subclass responsibility') def endInline(self): raise NotImplementedError('Subclass responsibility') def ruleset(self, selectors, declarations): raise NotImplementedError('Subclass responsibility') #~ css namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def resolveNamespacePrefix(self, nsPrefix, name): raise NotImplementedError('Subclass responsibility') #~ css @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def atCharset(self, charset): raise NotImplementedError('Subclass responsibility') def atImport(self, import_, mediums, cssParser): raise NotImplementedError('Subclass responsibility') def atNamespace(self, nsPrefix, uri): raise NotImplementedError('Subclass responsibility') def atMedia(self, mediums, ruleset): raise NotImplementedError('Subclass responsibility') def atPage(self, page, pseudopage, declarations): raise NotImplementedError('Subclass responsibility') def atFontFace(self, declarations): raise NotImplementedError('Subclass responsibility') def atIdent(self, atIdent, cssParser, src): return src, NotImplemented #~ css selectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def combineSelectors(self, selectorA, combiner, selectorB): """Return value must implement CSSSelectorAbstract""" raise NotImplementedError('Subclass responsibility') def selector(self, name): """Return value must implement CSSSelectorAbstract""" raise NotImplementedError('Subclass responsibility') #~ css declarations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def property(self, name, value, important=False): raise NotImplementedError('Subclass responsibility') def combineTerms(self, termA, combiner, termB): raise NotImplementedError('Subclass responsibility') def termIdent(self, value): raise NotImplementedError('Subclass responsibility') def termNumber(self, value, units=None): raise NotImplementedError('Subclass responsibility') def termRGB(self, value): raise NotImplementedError('Subclass responsibility') def termURI(self, value): raise NotImplementedError('Subclass responsibility') def termString(self, value): raise NotImplementedError('Subclass responsibility') def termUnicodeRange(self, value): raise NotImplementedError('Subclass responsibility') def termFunction(self, name, value): raise NotImplementedError('Subclass responsibility') def termUnknown(self, src): raise NotImplementedError('Subclass responsibility') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ CSS Parser #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class CSSParseError(Exception): src = None ctxsrc = None fullsrc = None inline = False srcCtxIdx = None srcFullIdx = None ctxsrcFullIdx = None def __init__(self, msg, src, ctxsrc=None): Exception.__init__(self, msg) self.src = src self.ctxsrc = ctxsrc or src if self.ctxsrc: self.srcCtxIdx = self.ctxsrc.find(self.src) if self.srcCtxIdx < 0: del self.srcCtxIdx def __str__(self): if self.ctxsrc: return Exception.__str__(self) + ':: (' + repr(self.ctxsrc[:self.srcCtxIdx]) + ', ' + repr(self.ctxsrc[self.srcCtxIdx:self.srcCtxIdx+20]) + ')' else: return Exception.__str__(self) + ':: ' + repr(self.src[:40]) def setFullCSSSource(self, fullsrc, inline=False): self.fullsrc = fullsrc if inline: self.inline = inline if self.fullsrc: self.srcFullIdx = self.fullsrc.find(self.src) if self.srcFullIdx < 0: del self.srcFullIdx self.ctxsrcFullIdx = self.fullsrc.find(self.ctxsrc) if self.ctxsrcFullIdx < 0: del self.ctxsrcFullIdx #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class CSSParser(object): """CSS-2.1 parser dependent only upon the re module. Implemented directly from http://www.w3.org/TR/CSS21/grammar.html Tested with some existing CSS stylesheets for portability. CSS Parsing API: * setCSSBuilder() To set your concrete implementation of CSSBuilderAbstract * parseFile() Use to parse external stylesheets using a file-like object >>> cssFile = open('test.css', 'r') >>> stylesheets = myCSSParser.parseFile(cssFile) * parse() Use to parse embedded stylesheets using source string >>> cssSrc = ''' body,body.body { font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif; background: White; color: Black; } a {text-decoration: underline;} ''' >>> stylesheets = myCSSParser.parse(cssSrc) * parseInline() Use to parse inline stylesheets using attribute source string >>> style = 'font: 110%, "Times New Roman", Arial, Verdana, Helvetica, serif; background: White; color: Black' >>> stylesheets = myCSSParser.parseInline(style) * parseAttributes() Use to parse attribute string values into inline stylesheets >>> stylesheets = myCSSParser.parseAttributes( font='110%, "Times New Roman", Arial, Verdana, Helvetica, serif', background='White', color='Black') * parseSingleAttr() Use to parse a single string value into a CSS expression >>> fontValue = myCSSParser.parseSingleAttr('110%, "Times New Roman", Arial, Verdana, Helvetica, serif') """ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ Constants / Variables / Etc. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ParseError = CSSParseError AttributeOperators = ['=', '~=', '|=', '&=', '^=', '!=', '<>'] SelectorQualifiers = ('#', '.', '[', ':') SelectorCombiners = ['+', '>'] ExpressionOperators = ('/', '+', ',') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ Regular expressions #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if True: # makes the following code foldable _orRule = lambda *args: '|'.join(args) _reflags = re.I | re.M | re.U i_hex = '[0-9a-fA-F]' i_nonascii = u'[\200-\377]' i_unicode = '\\\\(?:%s){1,6}\s?' % i_hex i_escape = _orRule(i_unicode, u'\\\\[ -~\200-\377]') # i_nmstart = _orRule('[A-Za-z_]', i_nonascii, i_escape) i_nmstart = _orRule('\-[^0-9]|[A-Za-z_]', i_nonascii, i_escape) # XXX Added hyphen, http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier i_nmchar = _orRule('[-0-9A-Za-z_]', i_nonascii, i_escape) i_ident = '((?:%s)(?:%s)*)' % (i_nmstart,i_nmchar) re_ident = re.compile(i_ident, _reflags) i_element_name = '((?:%s)|\*)' % (i_ident[1:-1],) re_element_name = re.compile(i_element_name, _reflags) i_namespace_selector = '((?:%s)|\*|)\|(?!=)' % (i_ident[1:-1],) re_namespace_selector = re.compile(i_namespace_selector, _reflags) i_class = '\\.' + i_ident re_class = re.compile(i_class, _reflags) i_hash = '#((?:%s)+)' % i_nmchar re_hash = re.compile(i_hash, _reflags) i_rgbcolor = '(#%s{6}|#%s{3})' % (i_hex, i_hex) re_rgbcolor = re.compile(i_rgbcolor, _reflags) i_nl = u'\n|\r\n|\r|\f' i_escape_nl = u'\\\\(?:%s)' % i_nl i_string_content = _orRule(u'[\t !#$%&(-~]', i_escape_nl, i_nonascii, i_escape) i_string1 = u'\"((?:%s|\')*)\"' % i_string_content i_string2 = u'\'((?:%s|\")*)\'' % i_string_content i_string = _orRule(i_string1, i_string2) re_string = re.compile(i_string, _reflags) i_uri = (u'url\\(\s*(?:(?:%s)|((?:%s)+))\s*\\)' % (i_string, _orRule('[!#$%&*-~]', i_nonascii, i_escape))) # XXX For now # i_uri = u'(url\\(.*?\\))' re_uri = re.compile(i_uri, _reflags) i_num = u'(([-+]?[0-9]+(?:\\.[0-9]+)?)|([-+]?\\.[0-9]+))' # XXX Added out paranthesis, because e.g. .5em was not parsed correctly re_num = re.compile(i_num, _reflags) i_unit = '(%%|%s)?' % i_ident re_unit = re.compile(i_unit, _reflags) i_function = i_ident + '\\(' re_function = re.compile(i_function, _reflags) i_functionterm = u'[-+]?' + i_function re_functionterm = re.compile(i_functionterm, _reflags) i_unicoderange1 = "(?:U\\+%s{1,6}-%s{1,6})" % (i_hex, i_hex) i_unicoderange2 = "(?:U\\+\?{1,6}|{h}(\?{0,5}|{h}(\?{0,4}|{h}(\?{0,3}|{h}(\?{0,2}|{h}(\??|{h}))))))" i_unicoderange = i_unicoderange1 # u'(%s|%s)' % (i_unicoderange1, i_unicoderange2) re_unicoderange = re.compile(i_unicoderange, _reflags) # i_comment = u'(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)|(?://.*)' # gabriel: only C convention for comments is allowed in CSS i_comment = u'(?:\/\*[^*]*\*+([^/*][^*]*\*+)*\/)' re_comment = re.compile(i_comment, _reflags) i_important = u'!\s*(important)' re_important = re.compile(i_important, _reflags) del _orRule #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ Public #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def __init__(self, cssBuilder=None): self.setCSSBuilder(cssBuilder) #~ CSS Builder to delegate to ~~~~~~~~~~~~~~~~~~~~~~~~ def getCSSBuilder(self): """A concrete instance implementing CSSBuilderAbstract""" return self._cssBuilder def setCSSBuilder(self, cssBuilder): """A concrete instance implementing CSSBuilderAbstract""" self._cssBuilder = cssBuilder cssBuilder = property(getCSSBuilder, setCSSBuilder) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ Public CSS Parsing API #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def parseFile(self, srcFile, closeFile=False): """Parses CSS file-like objects using the current cssBuilder. Use for external stylesheets.""" try: result = self.parse(srcFile.read()) finally: if closeFile: srcFile.close() return result def parse(self, src): """Parses CSS string source using the current cssBuilder. Use for embedded stylesheets.""" self.cssBuilder.beginStylesheet() try: # XXX Some simple preprocessing src = cssSpecial.cleanupCSS(src) try: src, stylesheet = self._parseStylesheet(src) except self.ParseError, err: err.setFullCSSSource(src) raise finally: self.cssBuilder.endStylesheet() return stylesheet def parseInline(self, src): """Parses CSS inline source string using the current cssBuilder. Use to parse a tag's 'sytle'-like attribute.""" self.cssBuilder.beginInline() try: try: src, properties = self._parseDeclarationGroup(src.strip(), braces=False) except self.ParseError, err: err.setFullCSSSource(src, inline=True) raise result = self.cssBuilder.inline(properties) finally: self.cssBuilder.endInline() return result def parseAttributes(self, attributes={}, **kwAttributes): """Parses CSS attribute source strings, and return as an inline stylesheet. Use to parse a tag's highly CSS-based attributes like 'font'. See also: parseSingleAttr """ if attributes: kwAttributes.update(attributes) self.cssBuilder.beginInline() try: properties = [] try: for propertyName, src in kwAttributes.iteritems(): src, property = self._parseDeclarationProperty(src.strip(), propertyName) properties.append(property) except self.ParseError, err: err.setFullCSSSource(src, inline=True) raise result = self.cssBuilder.inline(properties) finally: self.cssBuilder.endInline() return result def parseSingleAttr(self, attrValue): """Parse a single CSS attribute source string, and returns the built CSS expression. Use to parse a tag's highly CSS-based attributes like 'font'. See also: parseAttributes """ results = self.parseAttributes(temp=attrValue) if 'temp' in results[1]: return results[1]['temp'] else: return results[0]['temp'] #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~ Internal _parse methods #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def _parseStylesheet(self, src): """stylesheet : [ CHARSET_SYM S* STRING S* ';' ]? [S|CDO|CDC]* [ import [S|CDO|CDC]* ]* [ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]* ; """ # Get rid of the comments src = self.re_comment.sub(u'', src) # [ CHARSET_SYM S* STRING S* ';' ]? src = self._parseAtCharset(src) # [S|CDO|CDC]* src = self._parseSCDOCDC(src) # [ import [S|CDO|CDC]* ]* src, stylesheetImports = self._parseAtImports(src) # [ namespace [S|CDO|CDC]* ]* src = self._parseAtNamespace(src) stylesheetElements = [] # [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]* while src: # due to ending with ]* if src.startswith('@'): # @media, @page, @font-face src, atResults = self._parseAtKeyword(src) if atResults is not None: stylesheetElements.extend(atResults) else: # ruleset src, ruleset = self._parseRuleset(src) stylesheetElements.append(ruleset) # [S|CDO|CDC]* src = self._parseSCDOCDC(src) stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports) return src, stylesheet def _parseSCDOCDC(self, src): """[S|CDO|CDC]*""" while 1: src = src.lstrip() if src.startswith('<!--'): src = src[4:] elif src.startswith('-->'): src = src[3:] else: break return src #~ CSS @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def _parseAtCharset(self, src): """[ CHARSET_SYM S* STRING S* ';' ]?""" if isAtRuleIdent(src, 'charset'): src = stripAtRuleIdent(src) charset, src = self._getString(src) src = src.lstrip() if src[:1] != ';': raise self.ParseError('@charset expected a terminating \';\'', src, ctxsrc) src = src[1:].lstrip() self.cssBuilder.atCharset(charset) return src def _parseAtImports(self, src): """[ import [S|CDO|CDC]* ]*""" result = [] while isAtRuleIdent(src, 'import'): ctxsrc = src src = stripAtRuleIdent(src) import_, src = self._getStringOrURI(src) if import_ is None: raise self.ParseError('Import expecting string or url', src, ctxsrc) mediums = [] medium, src = self._getIdent(src.lstrip()) while medium is not None: mediums.append(medium) if src[:1] == ',': src = src[1:].lstrip() medium, src = self._getIdent(src) else: break # XXX No medium inherits and then "all" is appropriate if not mediums: mediums = ["all"] if src[:1] != ';': raise self.ParseError('@import expected a terminating \';\'', src, ctxsrc) src = src[1:].lstrip() stylesheet = self.cssBuilder.atImport(import_, mediums, self) if stylesheet is not None: result.append(stylesheet) src = self._parseSCDOCDC(src) return src, result def _parseAtNamespace(self, src): """namespace : @namespace S* [IDENT S*]? [STRING|URI] S* ';' S* """ src = self._parseSCDOCDC(src) while isAtRuleIdent(src, 'namespace'): ctxsrc = src src = stripAtRuleIdent(src) namespace, src = self._getStringOrURI(src) if namespace is None: nsPrefix, src = self._getIdent(src) if nsPrefix is None: raise self.ParseError('@namespace expected an identifier or a URI', src, ctxsrc) namespace, src = self._getStringOrURI(src.lstrip()) if namespace is None: raise self.ParseError('@namespace expected a URI', src, ctxsrc) else: nsPrefix = None src = src.lstrip() if src[:1] != ';': raise self.ParseError('@namespace expected a terminating \';\'', src, ctxsrc) src = src[1:].lstrip() self.cssBuilder.atNamespace(nsPrefix, namespace) src = self._parseSCDOCDC(src) return src def _parseAtKeyword(self, src): """[media | page | font_face | unknown_keyword]""" ctxsrc = src if isAtRuleIdent(src, 'media'): src, result = self._parseAtMedia(src) elif isAtRuleIdent(src, 'page'): src, result = self._parseAtPage(src) elif isAtRuleIdent(src, 'font-face'): src, result = self._parseAtFontFace(src) # XXX added @import, was missing! elif isAtRuleIdent(src, 'import'): src, result = self._parseAtImports(src) elif isAtRuleIdent(src, 'frame'): src, result = self._parseAtFrame(src) elif src.startswith('@'): src, result = self._parseAtIdent(src) else: raise self.ParseError('Unknown state in atKeyword', src, ctxsrc) return src, result def _parseAtMedia(self, src): """media : MEDIA_SYM S* medium [ ',' S* medium ]* '{' S* ruleset* '}' S* ; """ ctxsrc = src src = src[len('@media '):].lstrip() mediums = [] while src and src[0] != '{': medium, src = self._getIdent(src) if medium is None: raise self.ParseError('@media rule expected media identifier', src, ctxsrc) mediums.append(medium) if src[0] == ',': src = src[1:].lstrip() else: src = src.lstrip() if not src.startswith('{'): raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc) src = src[1:].lstrip() stylesheetElements = [] #while src and not src.startswith('}'): # src, ruleset = self._parseRuleset(src) # stylesheetElements.append(ruleset) # src = src.lstrip() # Containing @ where not found and parsed while src and not src.startswith('}'): if src.startswith('@'): # @media, @page, @font-face src, atResults = self._parseAtKeyword(src) if atResults is not None: stylesheetElements.extend(atResults) else: # ruleset src, ruleset = self._parseRuleset(src) stylesheetElements.append(ruleset) src = src.lstrip() if not src.startswith('}'): raise self.ParseError('Ruleset closing \'}\' not found', src, ctxsrc) else: src = src[1:].lstrip() result = self.cssBuilder.atMedia(mediums, stylesheetElements) return src, result def _parseAtPage(self, src): """page : PAGE_SYM S* IDENT? pseudo_page? S* '{' S* declaration [ ';' S* declaration ]* '}' S* ; """ ctxsrc = src src = src[len('@page '):].lstrip() page, src = self._getIdent(src) if src[:1] == ':': pseudopage, src = self._getIdent(src[1:]) page = page + '_' + pseudopage else: pseudopage = None #src, properties = self._parseDeclarationGroup(src.lstrip()) # Containing @ where not found and parsed stylesheetElements = [] src = src.lstrip() properties = [] # XXX Extended for PDF use if not src.startswith('{'): raise self.ParseError('Ruleset opening \'{\' not found', src, ctxsrc) else: src = src[1:].lstrip() while src and not src.startswith('}'): if src.startswith('@'): # @media, @page, @font-face src, atResults = self._parseAtKeyword(src) if atResults is not None: stylesheetElements.extend(atResults) else: src, nproperties = self._parseDeclarationGroup(src.lstrip(), braces=False) properties += nproperties src = src.lstrip() result = [self.cssBuilder.atPage(page, pseudopage, properties)] return src[1:].lstrip(), result def _parseAtFrame(self, src): """ XXX Proprietary for PDF """ ctxsrc = src src = src[len('@frame '):].lstrip() box, src = self._getIdent(src) src, properties = self._parseDeclarationGroup(src.lstrip()) result = [self.cssBuilder.atFrame(box, properties)] return src.lstrip(), result def _parseAtFontFace(self, src): ctxsrc = src src = src[len('@font-face '):].lstrip() src, properties = self._parseDeclarationGroup(src) result = [self.cssBuilder.atFontFace(properties)] return src, result def _parseAtIdent(self, src): ctxsrc = src atIdent, src = self._getIdent(src[1:]) if atIdent is None: raise self.ParseError('At-rule expected an identifier for the rule', src, ctxsrc) src, result = self.cssBuilder.atIdent(atIdent, self, src) if result is NotImplemented: # An at-rule consists of everything up to and including the next semicolon (;) or the next block, whichever comes first semiIdx = src.find(';') if semiIdx < 0: semiIdx = None blockIdx = src[:semiIdx].find('{') if blockIdx < 0: blockIdx = None if semiIdx is not None and semiIdx < blockIdx: src = src[semiIdx+1:].lstrip() elif blockIdx is None: # consume the rest of the content since we didn't find a block or a semicolon src = src[-1:-1] elif blockIdx is not None: # expecing a block... src = src[blockIdx:] try: # try to parse it as a declarations block src, declarations = self._parseDeclarationGroup(src) except self.ParseError: # try to parse it as a stylesheet block src, stylesheet = self._parseStylesheet(src) else: raise self.ParserError('Unable to ignore @-rule block', src, ctxsrc) return src.lstrip(), result #~ ruleset - see selector and declaration groups ~~~~ def _parseRuleset(self, src): """ruleset : selector [ ',' S* selector ]* '{' S* declaration [ ';' S* declaration ]* '}' S* ; """ src, selectors = self._parseSelectorGroup(src) src, properties = self._parseDeclarationGroup(src.lstrip()) result = self.cssBuilder.ruleset(selectors, properties) return src, result #~ selector parsing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def _parseSelectorGroup(self, src): selectors = [] while src[:1] not in ('{','}', ']','(',')', ';', ''): src, selector = self._parseSelector(src) if selector is None: break selectors.append(selector) if src.startswith(','): src = src[1:].lstrip() return src, selectors def _parseSelector(self, src): """selector : simple_selector [ combinator simple_selector ]* ; """ src, selector = self._parseSimpleSelector(src) srcLen = len(src) # XXX while src[:1] not in ('', ',', ';', '{','}', '[',']','(',')'): for combiner in self.SelectorCombiners: if src.startswith(combiner): src = src[len(combiner):].lstrip() break else: combiner = ' ' src, selectorB = self._parseSimpleSelector(src) # XXX Fix a bug that occured here e.g. : .1 {...} if len(src) >= srcLen: src = src[1:] while src and (src[:1] not in ('', ',', ';', '{','}', '[',']','(',')')): src = src[1:] return src.lstrip(), None selector = self.cssBuilder.combineSelectors(selector, combiner, selectorB) return src.lstrip(), selector def _parseSimpleSelector(self, src): """simple_selector : [ namespace_selector ]? element_name? [ HASH | class | attrib | pseudo ]* S* ; """ ctxsrc = src.lstrip() nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src) name, src = self._getMatchResult(self.re_element_name, src) if name: pass # already *successfully* assigned elif src[:1] in self.SelectorQualifiers: name = '*' else: raise self.ParseError('Selector name or qualifier expected', src, ctxsrc) name = self.cssBuilder.resolveNamespacePrefix(nsPrefix, name) selector = self.cssBuilder.selector(name) while src and src[:1] in self.SelectorQualifiers: hash_, src = self._getMatchResult(self.re_hash, src) if hash_ is not None: selector.addHashId(hash_) continue class_, src = self._getMatchResult(self.re_class, src) if class_ is not None: selector.addClass(class_) continue if src.startswith('['): src, selector = self._parseSelectorAttribute(src, selector) elif src.startswith(':'): src, selector = self._parseSelectorPseudo(src, selector) else: break return src.lstrip(), selector def _parseSelectorAttribute(self, src, selector): """attrib : '[' S* [ namespace_selector ]? IDENT S* [ [ '=' | INCLUDES | DASHMATCH ] S* [ IDENT | STRING ] S* ]? ']' ; """ ctxsrc = src if not src.startswith('['): raise self.ParseError('Selector Attribute opening \'[\' not found', src, ctxsrc) src = src[1:].lstrip() nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src) attrName, src = self._getIdent(src) src=src.lstrip() if attrName is None: raise self.ParseError('Expected a selector attribute name', src, ctxsrc) if nsPrefix is not None: attrName = self.cssBuilder.resolveNamespacePrefix(nsPrefix, attrName) for op in self.AttributeOperators: if src.startswith(op): break else: op = '' src = src[len(op):].lstrip() if op: attrValue, src = self._getIdent(src) if attrValue is None: attrValue, src = self._getString(src) if attrValue is None: raise self.ParseError('Expected a selector attribute value', src, ctxsrc) else: attrValue = None if not src.startswith(']'): raise self.ParseError('Selector Attribute closing \']\' not found', src, ctxsrc) else: src = src[1:] if op: selector.addAttributeOperation(attrName, op, attrValue) else: selector.addAttribute(attrName) return src, selector def _parseSelectorPseudo(self, src, selector): """pseudo : ':' [ IDENT | function ] ; """ ctxsrc = src if not src.startswith(':'): raise self.ParseError('Selector Pseudo \':\' not found', src, ctxsrc) src = re.search('^:{1,2}(.*)', src, re.M | re.S).group(1) name, src = self._getIdent(src) if not name: raise self.ParseError('Selector Pseudo identifier not found', src, ctxsrc) if src.startswith('('): # function src = src[1:].lstrip() src, term = self._parseExpression(src, True) if not src.startswith(')'): raise self.ParseError('Selector Pseudo Function closing \')\' not found', src, ctxsrc) src = src[1:] selector.addPseudoFunction(name, term) else: selector.addPseudo(name) return src, selector #~ declaration and expression parsing ~~~~~~~~~~~~~~~ def _parseDeclarationGroup(self, src, braces=True): ctxsrc = src if src.startswith('{'): src, braces = src[1:], True elif braces: raise self.ParseError('Declaration group opening \'{\' not found', src, ctxsrc) properties = [] src = src.lstrip() while src[:1] not in ('', ',', '{','}', '[',']','(',')','@'): # XXX @? src, property = self._parseDeclaration(src) # XXX Workaround for styles like "*font: smaller" if src.startswith("*"): src = "-nothing-" + src[1:] continue if property is None: break properties.append(property) if src.startswith(';'): src = src[1:].lstrip() else: break if braces: if not src.startswith('}'): raise self.ParseError('Declaration group closing \'}\' not found', src, ctxsrc) src = src[1:] return src.lstrip(), properties def _parseDeclaration(self, src): """declaration : ident S* ':' S* expr prio? | /* empty */ ; """ # property propertyName, src = self._getIdent(src) if propertyName is not None: src = src.lstrip() # S* : S* if src[:1] in (':', '='): # Note: we are being fairly flexable here... technically, the # ":" is *required*, but in the name of flexibility we # suppor a null transition, as well as an "=" transition src = src[1:].lstrip() src, property = self._parseDeclarationProperty(src, propertyName) else: property = None return src, property def _parseDeclarationProperty(self, src, propertyName): # expr src, expr = self._parseExpression(src) # prio? important, src = self._getMatchResult(self.re_important, src) src = src.lstrip() property = self.cssBuilder.property(propertyName, expr, important) return src, property def _parseExpression(self, src, returnList=False): """ expr : term [ operator term ]* ; """ src, term = self._parseExpressionTerm(src) operator = None while src[:1] not in ('', ';', '{','}', '[',']', ')'): for operator in self.ExpressionOperators: if src.startswith(operator): src = src[len(operator):] break else: operator = ' ' src, term2 = self._parseExpressionTerm(src.lstrip()) if term2 is NotImplemented: break else: term = self.cssBuilder.combineTerms(term, operator, term2) if operator is None and returnList: term = self.cssBuilder.combineTerms(term, None, None) return src, term else: return src, term def _parseExpressionTerm(self, src): """term : unary_operator? [ NUMBER S* | PERCENTAGE S* | LENGTH S* | EMS S* | EXS S* | ANGLE S* | TIME S* | FREQ S* | function ] | STRING S* | IDENT S* | URI S* | RGB S* | UNICODERANGE S* | hexcolor ; """ ctxsrc = src result, src = self._getMatchResult(self.re_num, src) if result is not None: units, src = self._getMatchResult(self.re_unit, src) term = self.cssBuilder.termNumber(result, units) return src.lstrip(), term result, src = self._getString(src, self.re_uri) if result is not None: # XXX URL!!!! term = self.cssBuilder.termURI(result) return src.lstrip(), term result, src = self._getString(src) if result is not None: term = self.cssBuilder.termString(result) return src.lstrip(), term result, src = self._getMatchResult(self.re_functionterm, src) if result is not None: src, params = self._parseExpression(src, True) if src[0] != ')': raise self.ParseError('Terminal function expression expected closing \')\'', src, ctxsrc) src = src[1:].lstrip() term = self.cssBuilder.termFunction(result, params) return src, term result, src = self._getMatchResult(self.re_rgbcolor, src) if result is not None: term = self.cssBuilder.termRGB(result) return src.lstrip(), term result, src = self._getMatchResult(self.re_unicoderange, src) if result is not None: term = self.cssBuilder.termUnicodeRange(result) return src.lstrip(), term nsPrefix, src = self._getMatchResult(self.re_namespace_selector, src) result, src = self._getIdent(src) if result is not None: if nsPrefix is not None: result = self.cssBuilder.resolveNamespacePrefix(nsPrefix, result) term = self.cssBuilder.termIdent(result) return src.lstrip(), term return self.cssBuilder.termUnknown(src) #~ utility methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def _getIdent(self, src, default=None): return self._getMatchResult(self.re_ident, src, default) def _getString(self, src, rexpression=None, default=None): if rexpression is None: rexpression = self.re_string result = rexpression.match(src) if result: strres = filter(None, result.groups()) if strres: strres = strres[0] else: strres = '' return strres, src[result.end():] else: return default, src def _getStringOrURI(self, src): result, src = self._getString(src, self.re_uri) if result is None: result, src = self._getString(src) return result, src def _getMatchResult(self, rexpression, src, default=None, group=1): result = rexpression.match(src) if result: return result.group(group), src[result.end():] else: return default, src
apache-2.0
bilderbuchi/GCode_Z_Splice
splicer_GUI.py
1
3376
import splicer_GUI_FB from templatesGCodePanel import GCodePanel from templatesTransitionPanel import TransitionPanel import wx import wx.xrc import logging import splicer logger = logging.getLogger(__name__) # Logic implementation file for the GUI class MyFrame( splicer_GUI_FB.mainFrameGUI ): def __init__( self, parent ): #GCode Splicer init code splicer_GUI_FB.mainFrameGUI.__init__( self, parent ) self.FirstGCodePanel.title.SetLabel('G-code file 1') self.FirstGCodePanel.z_from.SetValue('0') self.FirstGCodePanel.z_from.Enable(False) self.FirstTransitionPanel.title.SetLabel('Transition file 1') self.SecondGCodePanel.title.SetLabel('G-code file 2') self.SecondGCodePanel.z_from.Enable(False) self.SecondGCodePanel.z_to.Enable(False) # self.GCodePanel2 = wx.Panel( self.m_scrolledWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL ) # self.title = wx.StaticText( self.GCodePanel2, wx.ID_ANY, u"G-code file 2", wx.DefaultPosition, wx.DefaultSize, 0 ) # self.m_scrolledWindow.GetSizer().Add( self.GCodePanel2, 0, wx.ALL|wx.EXPAND, 5 ) # # Construct GUI manually and dynamically from self-defined classes # self.panel1 = GCodePanelTemplate(self.m_scrolledWindow) # self.panel1.title.SetLabel('templated Gcodefile 2') # self.m_scrolledWindow.GetSizer().Add( self.panel1, 0, wx.ALL|wx.EXPAND, 5 ) # self.m_staticline_column = wx.StaticLine( self.m_scrolledWindow, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL ) # self.m_scrolledWindow.GetSizer().Add( self.m_staticline_column, 0, wx.EXPAND, 5 ) # self.panel2 = TransitionPanel(self.m_scrolledWindow) # self.panel2.title.SetLabel('templated Transitionfile 2') # self.m_scrolledWindow.GetSizer().Add( self.panel2, 0, wx.ALL|wx.EXPAND, 5 ) # Handlers for mainFrameGUI events. def OnAddFile( self, event ): # TODO: Implement OnAddFile pass def OnRemoveFile( self, event ): # TODO: Implement OnRemoveFile pass def OnGenerate( self, event ): logger.info('Generating files') output=self.resultFilePicker.GetPath() logger.info('Output file: ' + output) g_files=[] z_values=[] t_files=[] for c in self.m_scrolledWindow.GetSizer().GetChildren(): pass # TODO: implement scan over panels # TODO: make sure this is independent of order widget = c.GetWindow() if type(widget) is GCodePanel: logger.info('Found GCode panel ' + widget.title.GetLabel()) path=widget.filePicker.GetPath() if path: logger.debug('path: ' + path + '.') g_files.append(path) zval=widget.z_to.GetValue() if path and zval: logger.info('Found Z value') logger.debug(zval) z_values.append(float(zval)) elif type(widget) is TransitionPanel: logger.info('Found transition panel ' + widget.title.GetLabel()) path=widget.filePicker.GetPath() if path and widget.useTransition_checkbox.IsChecked(): logger.debug('path: ' + path +'.') t_files.append(path) logger.info(g_files) logger.info(z_values) logger.info(t_files) if splicer.splice_files(output, g_files, z_values, t_files) is not 0: logger.error('An error occurred during splicing!') logger.info('Finished splicing!') def OnClose( self, event ): logger.info('Closing GUI') self.Close() def OnResultFileSelected( self, event ): # TODO: Implement OnResultFileSelected pass
agpl-3.0