repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mwv/scikit-learn | sklearn/preprocessing/_function_transformer.py | 163 | 2407 | from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
def _identity(X):
"""The identity function.
"""
return X
class FunctionTransformer(BaseEstimator, TransformerMixin):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
A FunctionTransformer will not do any checks on its function's output.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
Parameters
----------
func : callable, optional default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
validate : bool, optional default=True
Indicate that the input X array should be checked before calling
func. If validate is false, there will be no input validation.
If it is true, then X will be converted to a 2-dimensional NumPy
array or sparse matrix. If this conversion is not possible or X
contains NaN or infinity, an exception is raised.
accept_sparse : boolean, optional
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
pass_y: bool, optional default=False
Indicate that transform should forward the y argument to the
inner callable.
"""
def __init__(self, func=None, validate=True,
accept_sparse=False, pass_y=False):
self.func = func
self.validate = validate
self.accept_sparse = accept_sparse
self.pass_y = pass_y
def fit(self, X, y=None):
if self.validate:
check_array(X, self.accept_sparse)
return self
def transform(self, X, y=None):
if self.validate:
X = check_array(X, self.accept_sparse)
func = self.func if self.func is not None else _identity
return func(X, *((y,) if self.pass_y else ()))
| bsd-3-clause | -2,043,291,850,381,902,600 | 35.469697 | 77 | 0.676776 | false |
jtyr/ansible-modules-core | utilities/logic/pause.py | 10 | 2331 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pause
short_description: Pause playbook execution
description:
- Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt.
- "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)."
- "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts."
version_added: "0.8"
options:
minutes:
description:
- A positive number of minutes to pause for.
required: false
default: null
seconds:
description:
- A positive number of seconds to pause for.
required: false
default: null
prompt:
description:
- Optional text to use for the prompt message.
required: false
default: null
author: "Tim Bielawa (@tbielawa)"
notes:
- Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
'''
EXAMPLES = '''
# Pause for 5 minutes to build app cache.
- pause:
minutes: 5
# Pause until you can verify updates to an application were successful.
- pause:
# A helpful reminder of what to look out for post-update.
- pause:
prompt: "Make sure org.foo.FooOverload exception is not present"
'''
| gpl-3.0 | 2,515,413,707,544,757,000 | 38.508475 | 270 | 0.731446 | false |
guerrerocarlos/odoo | addons/subscription/__openerp__.py | 261 | 1885 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recurring Documents',
'version': '1.0',
'category': 'Tools',
'description': """
Create recurring documents.
===========================
This module allows to create new documents and add subscriptions on that document.
e.g. To have an invoice generated automatically periodically:
-------------------------------------------------------------
* Define a document type based on Invoice object
* Define a subscription whose source document is the document defined as
above. Specify the interval information and partner to be invoice.
""",
'author': 'OpenERP SA',
'depends': ['base'],
'data': ['security/subcription_security.xml', 'security/ir.model.access.csv', 'subscription_view.xml'],
'demo': ['subscription_demo.xml',],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,003,282,403,103,577,000 | 39.978261 | 107 | 0.609549 | false |
tenaciousjzh/titan-solr-cloud-test | zookeeper-3.3.5/contrib/zkpython/src/python/zk.py | 61 | 2528 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zookeeper, time, threading
f = open("out.log","w")
zookeeper.set_log_stream(f)
connected = False
conn_cv = threading.Condition( )
def my_connection_watcher(handle,type,state,path):
global connected, conn_cv
print "Connected, handle is ", handle
conn_cv.acquire()
connected = True
conn_cv.notifyAll()
conn_cv.release()
conn_cv.acquire()
print "Connecting to localhost:2181 -- "
handle = zookeeper.init("localhost:2181", my_connection_watcher, 10000, 0)
while not connected:
conn_cv.wait()
conn_cv.release()
def my_getc_watch( handle, type, state, path ):
print "Watch fired -- "
print type, state, path
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"};
try:
zookeeper.create(handle, "/zk-python", "data", [ZOO_OPEN_ACL_UNSAFE], 0)
zookeeper.get_children(handle, "/zk-python", my_getc_watch)
for i in xrange(5):
print "Creating sequence node ", i, " ", zookeeper.create(handle, "/zk-python/sequencenode", "data", [ZOO_OPEN_ACL_UNSAFE], zookeeper.SEQUENCE )
except:
pass
def pp_zk(handle,root, indent = 0):
"""Pretty print a zookeeper tree, starting at root"""
def make_path(child):
if root == "/":
return "/" + child
return root + "/" + child
children = zookeeper.get_children(handle, root, None)
out = ""
for i in xrange(indent):
out += "\t"
out += "|---"+root + " :: " + zookeeper.get(handle, root, None)[0]
print out
for child in children:
pp_zk(handle,make_path(child),indent+1)
print "ZNode tree -- "
pp_zk(handle,"/")
print "Getting ACL / Stat for /zk-python --"
(stat, acl) = zookeeper.get_acl(handle, "/zk-python")
print "Stat:: ", stat
print "Acl:: ", acl
| apache-2.0 | 2,320,307,165,894,712,000 | 32.263158 | 152 | 0.675633 | false |
pidydx/grr | grr/lib/aff4_objects/reports_test.py | 2 | 1849 | #!/usr/bin/env python
"""Reporting tests."""
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.aff4_objects import reports
from grr.lib.rdfvalues import client as rdf_client
class ReportsTest(test_lib.AFF4ObjectTest):
"""Test the timeline implementation."""
def testClientListReport(self):
"""Check that we can create and run a ClientList Report."""
# Create some clients.
client_ids = self.SetupClients(10)
with aff4.FACTORY.Open(
client_ids[0], token=self.token, mode="rw") as client:
interfaces = client.Schema.INTERFACES()
interfaces.Append(
addresses=[
rdf_client.NetworkAddress(
human_readable="1.1.1.1", address_type="INET")
],
mac_address="11:11:11:11:11:11",
ifname="eth0")
client.Set(interfaces)
client.Set(client.Schema.HOSTNAME("lawman"))
# Also initialize a broken client with no hostname.
with aff4.FACTORY.Open(
client_ids[1], token=self.token, mode="rw") as client:
client.Set(client.Schema.CLIENT_INFO())
# Create a report for all clients.
report = reports.ClientListReport(token=self.token)
report.Run()
self.assertEqual(len(report.results), 10)
hostnames = [x.get("Host") for x in report.results]
self.assertTrue("lawman" in hostnames)
report.SortResults("Host")
self.assertEqual(len(report.AsDict()), 10)
self.assertEqual(len(report.AsCsv().getvalue().splitlines()), 11)
self.assertEqual(len(report.AsText().getvalue().splitlines()), 10)
self.assertEqual(report.results[-1]["Interfaces"], "1.1.1.1")
self.assertEqual(len(report.broken_clients), 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 | 5,161,971,852,973,258,000 | 30.87931 | 70 | 0.665765 | false |
ptoraskar/django | tests/template_tests/syntax_tests/test_invalid_string.py | 440 | 2310 | from django.test import SimpleTestCase
from ..utils import setup
class InvalidStringTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'invalidstr01': '{{ var|default:"Foo" }}'})
def test_invalidstr01(self):
output = self.engine.render_to_string('invalidstr01')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, 'Foo')
@setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'})
def test_invalidstr02(self):
output = self.engine.render_to_string('invalidstr02')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'})
def test_invalidstr03(self):
output = self.engine.render_to_string('invalidstr03')
self.assertEqual(output, '')
@setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'})
def test_invalidstr04(self):
output = self.engine.render_to_string('invalidstr04')
self.assertEqual(output, 'No')
@setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'})
def test_invalidstr04_2(self):
output = self.engine.render_to_string('invalidstr04_2')
self.assertEqual(output, 'Yes')
@setup({'invalidstr05': '{{ var }}'})
def test_invalidstr05(self):
output = self.engine.render_to_string('invalidstr05')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr06': '{{ var.prop }}'})
def test_invalidstr06(self):
output = self.engine.render_to_string('invalidstr06')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'})
def test_invalidstr07(self):
output = self.engine.render_to_string('invalidstr07')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause | -9,180,522,483,129,134,000 | 36.258065 | 91 | 0.604329 | false |
phlax/pootle | pootle/apps/pootle_translationproject/migrations/0006_relink_or_drop_orphan_translationprojects.py | 7 | 1197 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-08 21:27
from __future__ import unicode_literals
from django.db import migrations
def relink_or_drop_orphan_translationprojects(apps, schema_editor):
"""Relink or drop TPs with no project."""
Project = apps.get_model("pootle_project.Project")
TP = apps.get_model("pootle_translationproject.TranslationProject")
for proj_key in set(TP.objects.values_list("project_id", flat=True)):
if not Project.objects.filter(pk=proj_key).exists():
for tp in TP.objects.filter(project_id=proj_key):
proj_code = tp.pootle_path.split("/")[2]
projects = Project.objects.filter(code=proj_code)
if projects.exists():
tp.project = projects.first()
tp.save()
else:
tp.delete()
class Migration(migrations.Migration):
dependencies = [
('pootle_translationproject', '0005_remove_empty_translationprojects'),
('pootle_project', '0014_just_rename_label_for_choice'),
]
operations = [
migrations.RunPython(relink_or_drop_orphan_translationprojects),
]
| gpl-3.0 | -385,315,388,075,579,650 | 33.2 | 79 | 0.621554 | false |
shakilkanji/rmc | data/processor.py | 3 | 23484 | import rmc.shared.constants as c
import rmc.shared.util as rmc_util
import rmc.models as m
import argparse
from datetime import datetime
import dateutil.parser
import glob
import json
import mongoengine as me
import os
import time
import re
import sys
def import_departments():
def clean_opendata_department(department):
return {
'id': department['subject'].lower(),
'name': department['name'],
'faculty_id': department['faculty_id'].lower(),
'url': 'http://ugradcalendar.uwaterloo.ca/courses/{0}'.format(
department['subject'].lower()),
}
file_name = os.path.join(os.path.dirname(__file__),
c.DEPARTMENTS_DATA_DIR, 'opendata2_departments.json')
with open(file_name, 'r') as f:
data = json.load(f)
for department in data:
department = clean_opendata_department(department)
if m.Department.objects.with_id(department['id']):
continue
m.Department(**department).save()
print 'imported departments:', m.Department.objects.count()
def import_courses():
def get_department_name_from_file_path(file_path):
return re.findall(r'([^/]*).json$', file_path)[0].lower()
def build_keywords(department, number, course_title):
department = department.lower()
number = str(number)
course_title = course_title.lower()
course_title = re.sub(r'\s+', ' ', course_title)
# Separate on hypens in title for keywords list
course_title = re.sub(r'-', ' ', course_title)
keywords = [department, number, department + number]
keywords.extend(course_title.split(' '))
return keywords
def clean_description(des):
return des or "No description"
def clean_opendata_course(dep, course):
number = course['catalog_number'].lower()
return {
'id': '%s%s' % (dep, number),
'department_id': dep,
'number': number,
'name': course['title'],
'description': clean_description(course['description']),
'_keywords': build_keywords(
dep, number, course['title']),
'antireqs': course['antirequisites'],
'coreqs': course['corequisites'],
'prereqs': course['prerequisites'],
}
added = 0
updated = 0
for file_name in glob.glob(os.path.join(os.path.dirname(__file__),
c.OPENDATA2_COURSES_DATA_DIR, '*.json')):
with open(file_name, 'r') as f:
courses = json.load(f)
dep_name = get_department_name_from_file_path(file_name)
if not m.Department.objects.with_id(dep_name):
print 'could not find department %s' % dep_name
continue
# The input data can be a list or dict (with course number as key)
if isinstance(courses, dict):
courses = courses.values()
# For each course, update it if it already exists, else insert it
for course in courses:
if not course:
continue
course = clean_opendata_course(dep_name, course)
old_course = m.Course.objects.with_id(course['id'])
if old_course:
for key, value in course.iteritems():
if key == 'id':
continue
old_course[key] = value
old_course.save()
updated += 1
else:
m.Course(**course).save()
added += 1
# Update courses with terms offered data
with open(os.path.join(os.path.dirname(__file__),
c.TERMS_OFFERED_DATA_DIR, 'terms_offered.txt')) as f:
def map_term(term):
return {
'W': '01',
'S': '05',
'F': '09',
}[term]
terms_offered_by_course = json.load(f)
for course_id, terms_offered in terms_offered_by_course.items():
course = m.Course.objects.with_id(course_id)
if not course:
continue
course.terms_offered = map(map_term, terms_offered)
course.save()
for course in m.Course.objects:
if course.prereqs:
course.prereqs = normalize_reqs_str(course.prereqs)
if course.coreqs:
course.coreqs = normalize_reqs_str(course.coreqs)
if course.antireqs:
course.antireqs = normalize_reqs_str(course.antireqs)
course.save()
print 'OpenData courses, added: %d, updated: %d' % (added, updated)
print 'Total courses:', m.Course.objects.count()
def normalize_reqs_str(str_):
"""Normalize the prereq string of a course
TODO(mack): handle the following special cases:
1) "CS/ECE 121"
"""
# Split on non-alphanumeric characters (includes chars we split on)
old_splits = re.compile('(\W+)').split(str_)
# Newly normalized splits
new_splits = []
# Last department id encountered as we traverse prereq str
last_dep_id = None
# Traverse the splits
for split in old_splits:
new_split = split
if last_dep_id and re.findall(r'^[0-9]{3}[a-z]?$', split.lower()):
# If there's a previous dep id and this matches the number portion
# of a course, check if this is a valid course
# NOTE: we're not validating whether the course exists since
# we should still normalize to make the output to look consistent,
# even when the course does not exist
new_split = last_dep_id.upper() + split
elif (re.findall('^[A-Z]+', split) and
m.Department.objects.with_id(split.lower())):
# We check it's uppercase, so we don't have false positives like
# "Earth" that was part of "Earth Science student"
last_dep_id = split.lower()
# Do not include the department id since it will be included
# with the course we find
new_split = ''
new_splits.append(new_split)
# We're here if this split matches a department id
# Increment idx by 1 more to skip the next non-alphanum character
new_str = ''.join(new_splits)
# While removing department ids, we could have left redundant spaces
# (e.g. "CS 247" => " CS247", so remove them now.
return re.sub('\s+', ' ', new_str).strip()
# TODO(mack): should return (first_name, last_name)
def get_prof_name(prof_name_menlo):
matches = re.findall(r'^(.+?), (.+)$', prof_name_menlo)[0]
return {
'first_name': matches[1],
'last_name': matches[0],
}
def import_professors():
# NOTE: not safe to drop table anymore since users can add their own
# professors now
def clean_professor(professor):
def clean_name(name):
return re.sub(r'\s+', ' ', name.strip())
prof_name = get_prof_name(professor['prof_name'])
return {
'first_name': clean_name(prof_name['first_name']),
'last_name': clean_name(prof_name['last_name']),
}
file_names = glob.glob(os.path.join(os.path.dirname(__file__),
c.REVIEWS_DATA_DIR, '*.txt'))
for file_name in file_names:
with open(file_name, 'r') as f:
data = json.load(f)
professor = clean_professor(data)
# Since user's can now add professors, gotta first check
# that the professor does not aleady exist
if not m.Professor.objects(**professor):
m.Professor(**professor).save()
print 'imported professors:', m.Professor.objects.count()
def import_reviews():
m.MenloCourse.objects._collection.drop()
def clean_review(review):
course = review['class']
if course is None:
return {}
course = course.lower()
matches = re.findall(r'([a-z]+).*?([0-9]{3}[a-z]?)(?:[^0-9]|$)',
course)
# TODO(mack): investigate if we are missing any good courses with
# this regex
if len(matches) != 1 or len(matches[0]) != 2:
return {}
department_id = matches[0][0].lower()
course_number = matches[0][1].lower()
course_id = department_id + course_number
prof_name = get_prof_name(data['prof_name'])
prof_id = m.Professor.get_id_from_name(
prof_name['first_name'], prof_name['last_name'])
clean_review = {
'professor_id': prof_id,
'course_id': course_id,
'course_review': m.CourseReview(),
'professor_review': m.ProfessorReview(),
}
def normalize_rating(menlo_rating):
# normalize 1..5 to Yes/No:
# 1,2 => No, 3 => None, 4,5 => Yes
try:
menlo_rating = int(menlo_rating)
if menlo_rating <= 2:
return 0
elif menlo_rating >= 4:
return 1
else:
return None
except:
return None
# TODO(mack): include 'r_helpful'?
if 'r_clarity' in review:
clean_review['professor_review'].clarity = \
normalize_rating(review['r_clarity'])
if 'r_easy' in review:
clean_review['course_review'].easiness = \
normalize_rating(review['r_easy'])
if 'r_interest' in review:
clean_review['course_review'].interest = \
normalize_rating(review['r_interest'])
clean_review['professor_review'].comment = review['comment']
clean_review['professor_review'].comment_date = datetime.strptime(
review['date'], '%m/%d/%y')
return clean_review
file_names = glob.glob(os.path.join(os.path.dirname(__file__),
c.REVIEWS_DATA_DIR, '*.txt'))
for file_name in file_names:
with open(file_name, 'r') as f:
data = json.load(f)
for review in data['ratings']:
review = clean_review(review)
if (not 'course_id' in review
or not m.Course.objects.with_id(review['course_id'])):
#print 'skipping rating because invalid course_id ' + course_id
continue
try:
m.MenloCourse(**review).save()
except:
print 'failed on review', review
print 'imported reviews:', m.MenloCourse.objects.count()
def group_similar_exam_sections(exam_sections):
"""Groups together exam sections that have the same date, time,
and location.
Args:
exam_sections: A list of sections for an exam as returned by OpenData's
examschedule.json endpoint.
Returns a consolidated list of sections in the same format, where each item
has a unique date/time/location.
"""
def order_sections(sections):
sections_list = sorted(sections.split(', '))
return ', '.join(sections_list)
def is_similar(first, second):
return (first.get('start_time') == second.get('start_time') and
first.get('end_time') == second.get('end_time') and
first.get('date') == second.get('date') and
first.get('location') == second.get('location'))
different_sections = []
for section in exam_sections:
similar_exams = [s for s in different_sections if
is_similar(s, section)]
if similar_exams:
similar_exams[0]['section'] += ', ' + section.get('section')
else:
different_sections.append(section)
for section in different_sections:
section['section'] = order_sections(section.get('section'))
return different_sections
def import_opendata_exam_schedules():
"""Import exam schedules data from the OpenData API"""
today = datetime.today()
file_name = os.path.join(
os.path.dirname(__file__),
'%s/uw_exams_%s.txt' % (c.EXAMS_DATA_DIR,
today.strftime('%Y_%m_%d')))
processed_exams = []
errors = []
with open(file_name, 'r') as f:
data = json.load(f)
# Data will contain something like this:
#
# [{
# "course": "AFM 131",
# "sections": [
# {
# "date": "2014-04-17",
# "day": "Thursday",
# "end_time": "10:00 PM",
# "location": "DC 1350",
# "notes": "",
# "section": "001",
# "start_time": "7:30 PM"
# },
# {
# "date": "",
# "day": "",
# "end_time": "",
# "location": "",
# "notes": "See blah blah blah",
# "section": "081 Online",
# "start_time": ""
# }
# ]
# }, ...]
#
# TODO(jlfwong): Refactor this to separate concerns of file IO, db
# storage, and data processing so that the data processing step can be
# tested, and this example can be moved into tests.
for exam_data in data:
course_id = m.Course.code_to_id(exam_data.get('course'))
grouped_sections = group_similar_exam_sections(
exam_data.get('sections', []))
for section_data in grouped_sections:
section = section_data.get('section')
day = section_data.get('day')
# Catch these to be more detailed in our errors
if section.endswith('Online'):
errors.append("Skipping online course: %s %s"
% (course_id, section))
continue
if 'Exam removed' in day:
errors.append("Skipping removed course: %s" % (course_id))
continue
if 'See http:' in day:
errors.append("Skipping url for course: %s" % (course_id))
continue
# E.g. 2014-04-17
date = section_data.get('date')
# E.g. 11:30 AM
start_time = section_data.get('start_time')
end_time = section_data.get('end_time')
# E.g. 2014-04-17 7:30 PM
# 2014-04-17 10:00 PM
date_format = "%Y-%m-%d %I:%M %p"
start_date_string = "%s %s" % (date, start_time)
end_date_string = "%s %s" % (date, end_time)
try:
start_date = rmc_util.eastern_to_utc(
datetime.fromtimestamp(
time.mktime(
time.strptime(start_date_string,
date_format))))
end_date = rmc_util.eastern_to_utc(
datetime.fromtimestamp(
time.mktime(
time.strptime(end_date_string, date_format))))
except Exception as exp:
errors.append("Could not get date (%s)\n%s" %
(section_data, exp))
continue
exam = m.Exam(
course_id=course_id,
sections=section,
start_date=start_date,
end_date=end_date,
location=section_data.get('location'),
info_known=bool(start_date and end_date),
)
processed_exams.append(exam)
# Do some sanity checks to make sure OpenData is being reasonable.
# This number is arbitrary and just reminds us to double-check
# TODO(Sandy): This ranges from 775 (Fall & Winter) to 325 (Spring)
season = m.Term.get_season_from_id(m.Term.get_current_term_id())
EXAM_ITEMS_THRESHOLD = 325 if season == 'Spring' else 775
if len(processed_exams) < EXAM_ITEMS_THRESHOLD:
raise ValueError("processor.py: too few exam items %d (< %d)"
% (len(processed_exams), EXAM_ITEMS_THRESHOLD))
# Everything should be fine by here, drop the old exams collection
m.Exam.objects.delete()
for exam in processed_exams:
exam.save()
return errors
def _opendata_to_section_meeting(data, term_year):
"""Converts OpenData class section info to a SectionMeeting instance.
Args:
data: An object from the `classes` field returned by OpenData.
term_year: The year this term is in.
"""
date = data['date']
days = []
if date['weekdays']:
days = re.findall(r'[A-Z][a-z]?',
date['weekdays'].replace('U', 'Su'))
# TODO(david): Actually use the term begin/end dates when we get nulls
date_format = '%m/%d'
start_date = datetime.strptime(date['start_date'], date_format).replace(
year=term_year) if date['start_date'] else None
end_date = datetime.strptime(date['end_date'], date_format).replace(
year=term_year) if date['end_date'] else None
time_format = '%H:%M'
# TODO(david): DRY-up
start_seconds = None
if date['start_time']:
start_time = datetime.strptime(date['start_time'], time_format)
start_seconds = (start_time -
start_time.replace(hour=0, minute=0, second=0)).seconds
end_seconds = None
if date['end_time']:
end_time = datetime.strptime(date['end_time'], time_format)
end_seconds = (end_time -
end_time.replace(hour=0, minute=0, second=0)).seconds
meeting = m.SectionMeeting(
start_seconds=start_seconds,
end_seconds=end_seconds,
days=days,
start_date=start_date,
end_date=end_date,
building=data['location']['building'],
room=data['location']['room'],
is_tba=date['is_tba'],
is_cancelled=date['is_cancelled'],
is_closed=date['is_closed'],
)
if data['instructors']:
last_name, first_name = data['instructors'][0].split(',')
prof_id = m.Professor.get_id_from_name(first_name, last_name)
if not m.Professor.objects.with_id(prof_id):
m.Professor(id=prof_id, first_name=first_name,
last_name=last_name).save()
meeting.prof_id = prof_id
return meeting
def _clean_section(data):
"""Converts OpenData section info to a dict that can be consumed by
Section.
"""
course_id = m.Course.code_to_id(data['subject'] + data['catalog_number'])
term_id = m.Term.get_term_id_from_quest_id(data['term'])
section_type, section_num = data['section'].split(' ')
last_updated = dateutil.parser.parse(data['last_updated'])
year = m.Term.get_year_from_id(term_id)
meetings = map(lambda klass: _opendata_to_section_meeting(klass, year),
data['classes'])
return {
'course_id': course_id,
'term_id': term_id,
'section_type': section_type.upper(),
'section_num': section_num,
'campus': data['campus'],
'enrollment_capacity': data['enrollment_capacity'],
'enrollment_total': data['enrollment_total'],
'waiting_capacity': data['waiting_capacity'],
'waiting_total': data['waiting_total'],
'meetings': meetings,
'class_num': str(data['class_number']),
'units': data['units'],
'note': data['note'],
'last_updated': last_updated,
}
def _clean_scholarship(data):
"""Converts OpenData scholarship data in to a dict that can be used by
Scholarship
"""
return {
'id': str(data['id']),
'title': data['title'],
'description': data['description'],
'citizenship': data['citizenship'],
'programs': data['programs'],
'eligibility': data['application']['eligibility'],
'instructions': data['application']['instructions'],
'enrollment_year': data['application']['enrollment_year'],
'contact': data['contact'],
'link': data['link'],
}
def import_opendata_sections():
num_added = 0
num_updated = 0
filenames = glob.glob(os.path.join(os.path.dirname(__file__),
c.SECTIONS_DATA_DIR, '*.json'))
for filename in filenames:
with open(filename, 'r') as f:
data = json.load(f)
for section_data in data:
section_dict = _clean_section(section_data)
# TODO(david): Is there a more natural way of doing an
# upsert with MongoEngine?
existing_section = m.Section.objects(
course_id=section_dict['course_id'],
term_id=section_dict['term_id'],
section_type=section_dict['section_type'],
section_num=section_dict['section_num'],
).first()
if existing_section:
for key, val in section_dict.iteritems():
existing_section[key] = val
existing_section.save()
num_updated += 1
else:
m.Section(**section_dict).save()
num_added += 1
print 'Added %s sections and updated %s sections' % (
num_added, num_updated)
def import_scholarships():
num_added = 0
num_updated = 0
filenames = glob.glob(os.path.join(os.path.dirname(__file__),
c.SCHOLARSHIPS_DATA_DIR, '*.json'))
for filename in filenames:
with open(filename, 'r') as f:
data = json.load(f).get('data')
for scholarship_data in data:
scholarship_dict = _clean_scholarship(scholarship_data)
existing_scholarship = m.Scholarship.objects(
id=scholarship_dict['id']
).first()
if existing_scholarship:
for key, val in scholarship_dict.iteritems():
if key != 'id':
existing_scholarship[key] = val
existing_scholarship.save()
num_updated += 1
else:
m.Scholarship(**scholarship_dict).save()
num_added += 1
print 'Added %s scholarships and updated %s scholarships' % (
num_added, num_updated)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
supported_modes = ['professors', 'departments', 'courses', 'reviews']
parser.add_argument('mode', help='one of %s' % ','.join(supported_modes))
args = parser.parse_args()
me.connect(c.MONGO_DB_RMC, host=c.MONGO_HOST, port=c.MONGO_PORT)
if args.mode == 'professors':
import_professors()
elif args.mode == 'departments':
import_departments()
elif args.mode == 'courses':
import_courses()
elif args.mode == 'reviews':
import_reviews()
elif args.mode == 'exams':
import_opendata_exam_schedules()
elif args.mode == 'sections':
import_opendata_sections()
elif args.mode == 'scholarships':
import_scholarships()
else:
sys.exit('The mode %s is not supported' % args.mode)
| mit | 1,930,025,305,532,086,500 | 34.908257 | 79 | 0.539303 | false |
bdrung/audacity | lib-src/lv2/lv2/plugins/eg-metro.lv2/waflib/Tools/d.py | 278 | 2076 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Utils,Task,Errors
from waflib.TaskGen import taskgen_method,feature,extension
from waflib.Tools import d_scan,d_config
from waflib.Tools.ccroot import link_task,stlink_task
class d(Task.Task):
color='GREEN'
run_str='${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_SRC_F:SRC} ${D_TGT_F:TGT}'
scan=d_scan.scan
class d_with_header(d):
run_str='${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}'
class d_header(Task.Task):
color='BLUE'
run_str='${D} ${D_HEADER} ${SRC}'
class dprogram(link_task):
run_str='${D_LINKER} ${LINKFLAGS} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F:TGT} ${RPATH_ST:RPATH} ${DSTLIB_MARKER} ${DSTLIBPATH_ST:STLIBPATH} ${DSTLIB_ST:STLIB} ${DSHLIB_MARKER} ${DLIBPATH_ST:LIBPATH} ${DSHLIB_ST:LIB}'
inst_to='${BINDIR}'
class dshlib(dprogram):
inst_to='${LIBDIR}'
class dstlib(stlink_task):
pass
@extension('.d','.di','.D')
def d_hook(self,node):
ext=Utils.destos_to_binfmt(self.env.DEST_OS)=='pe'and'obj'or'o'
out='%s.%d.%s'%(node.name,self.idx,ext)
def create_compiled_task(self,name,node):
task=self.create_task(name,node,node.parent.find_or_declare(out))
try:
self.compiled_tasks.append(task)
except AttributeError:
self.compiled_tasks=[task]
return task
if getattr(self,'generate_headers',None):
tsk=create_compiled_task(self,'d_with_header',node)
tsk.outputs.append(node.change_ext(self.env['DHEADER_ext']))
else:
tsk=create_compiled_task(self,'d',node)
return tsk
@taskgen_method
def generate_header(self,filename):
try:
self.header_lst.append([filename,self.install_path])
except AttributeError:
self.header_lst=[[filename,self.install_path]]
@feature('d')
def process_header(self):
for i in getattr(self,'header_lst',[]):
node=self.path.find_resource(i[0])
if not node:
raise Errors.WafError('file %r not found on d obj'%i[0])
self.create_task('d_header',node,node.change_ext('.di'))
| gpl-2.0 | -6,967,568,142,258,597,000 | 37.444444 | 211 | 0.703276 | false |
frankbp/robotframework-selenium2library | test/run_tests.py | 42 | 3301 | #!/usr/bin/env python
import env
import os
import sys
from subprocess import Popen, call
from tempfile import TemporaryFile
from run_unit_tests import run_unit_tests
ROBOT_ARGS = [
'--doc', 'SeleniumSPacceptanceSPtestsSPwithSP%(browser)s',
'--outputdir', '%(outdir)s',
'--variable', 'browser:%(browser)s',
'--escape', 'space:SP',
'--report', 'none',
'--log', 'none',
#'--suite', 'Acceptance.Keywords.Textfields',
'--loglevel', 'DEBUG',
'--pythonpath', '%(pythonpath)s',
]
REBOT_ARGS = [
'--outputdir', '%(outdir)s',
'--name', '%(browser)sSPAcceptanceSPTests',
'--escape', 'space:SP',
'--critical', 'regression',
'--noncritical', 'inprogress',
]
ARG_VALUES = {'outdir': env.RESULTS_DIR, 'pythonpath': env.SRC_DIR}
def acceptance_tests(interpreter, browser, args):
ARG_VALUES['browser'] = browser.replace('*', '')
start_http_server()
runner = {'python': 'pybot', 'jython': 'jybot', 'ipy': 'ipybot'}[interpreter]
if os.sep == '\\':
runner += '.bat'
execute_tests(runner, args)
stop_http_server()
return process_output()
def start_http_server():
server_output = TemporaryFile()
Popen(['python', env.HTTP_SERVER_FILE ,'start'],
stdout=server_output, stderr=server_output)
def execute_tests(runner, args):
if not os.path.exists(env.RESULTS_DIR):
os.mkdir(env.RESULTS_DIR)
command = [runner] + [arg % ARG_VALUES for arg in ROBOT_ARGS] + args + [env.ACCEPTANCE_TEST_DIR]
print ''
print 'Starting test execution with command:\n' + ' '.join(command)
syslog = os.path.join(env.RESULTS_DIR, 'syslog.txt')
call(command, shell=os.sep=='\\', env=dict(os.environ, ROBOT_SYSLOG_FILE=syslog))
def stop_http_server():
call(['python', env.HTTP_SERVER_FILE, 'stop'])
def process_output():
print
if _has_robot_27():
call(['python', os.path.join(env.RESOURCES_DIR, 'statuschecker.py'),
os.path.join(env.RESULTS_DIR, 'output.xml')])
rebot = 'rebot' if os.sep == '/' else 'rebot.bat'
rebot_cmd = [rebot] + [ arg % ARG_VALUES for arg in REBOT_ARGS ] + \
[os.path.join(ARG_VALUES['outdir'], 'output.xml') ]
rc = call(rebot_cmd, env=os.environ)
if rc == 0:
print 'All critical tests passed'
else:
print '%d critical test%s failed' % (rc, 's' if rc != 1 else '')
return rc
def _has_robot_27():
try:
from robot.result import ExecutionResult
except:
return False
return True
def _exit(rc):
sys.exit(rc)
def _help():
print 'Usage: python run_tests.py python|jython browser [options]'
print
print 'See README.txt for details.'
return 255
def _run_unit_tests():
print 'Running unit tests'
failures = run_unit_tests()
if failures != 0:
print '\n%d unit tests failed - not running acceptance tests!' % failures
else:
print 'All unit tests passed'
return failures
if __name__ == '__main__':
if not len(sys.argv) > 2:
_exit(_help())
unit_failures = _run_unit_tests()
if unit_failures:
_exit(unit_failures)
interpreter = sys.argv[1]
browser = sys.argv[2].lower()
args = sys.argv[3:]
if browser != 'unit':
_exit(acceptance_tests(interpreter, browser, args))
| apache-2.0 | -8,557,650,495,162,871,000 | 29.284404 | 100 | 0.610724 | false |
DataMarket/multilingual | multilingual/query.py | 1 | 28615 | """
Django-multilingual: a QuerySet subclass for models with translatable
fields.
This file contains the implementation for QSRF Django.
"""
import datetime
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query import QuerySet, Q, ValuesQuerySet
from django.db.models.sql.query import Query
from django.db.models.sql.datastructures import (
EmptyResultSet,
Empty,
MultiJoin)
from django.db.models.sql.constants import *
from django.db.models.sql.where import WhereNode, EverythingNode, AND, OR
try:
# handle internal API changes in Django rev. 9700
from django.db.models.sql.where import Constraint
def constraint_tuple(alias, col, field, lookup_type, value):
return (Constraint(alias, col, field), lookup_type, value)
except ImportError:
# backwards compatibility, for Django versions 1.0 to rev. 9699
def constraint_tuple(alias, col, field, lookup_type, value):
return (alias, col, field, lookup_type, value)
from multilingual.languages import (
get_translation_table_alias,
get_language_id_list,
get_default_language,
get_translated_field_alias,
get_language_id_from_id_or_code)
__ALL__ = ['MultilingualModelQuerySet']
class MultilingualQuery(Query):
def __init__(self, model, connection, where=WhereNode):
self.extra_join = {}
self.include_translation_data = True
extra_select = {}
super(MultilingualQuery, self).__init__(model, connection, where=where)
opts = self.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
master_table_name = opts.db_table
translation_opts = opts.translation_model._meta
trans_table_name = translation_opts.db_table
if hasattr(opts, 'translation_model'):
master_table_name = opts.db_table
for language_id in get_language_id_list():
for fname in [f.attname for f in translation_opts.fields]:
table_alias = get_translation_table_alias(trans_table_name,
language_id)
field_alias = get_translated_field_alias(fname,
language_id)
extra_select[field_alias] = qn2(table_alias) + '.' + qn2(fname)
self.add_extra(extra_select, None, None, None, None, None)
self._trans_extra_select_count = len(self.extra_select)
def clone(self, klass=None, **kwargs):
defaults = {
'extra_join': self.extra_join,
'include_translation_data': self.include_translation_data,
}
defaults.update(kwargs)
return super(MultilingualQuery, self).clone(klass=klass, **defaults)
def pre_sql_setup(self):
"""Adds the JOINS and SELECTS for fetching multilingual data.
"""
super(MultilingualQuery, self).pre_sql_setup()
if not self.include_translation_data:
return
opts = self.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if hasattr(opts, 'translation_model'):
master_table_name = opts.db_table
translation_opts = opts.translation_model._meta
trans_table_name = translation_opts.db_table
for language_id in get_language_id_list():
table_alias = get_translation_table_alias(trans_table_name,
language_id)
trans_join = ('LEFT JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_id = %s))'
% (qn2(translation_opts.db_table),
qn2(table_alias),
qn2(table_alias),
qn(master_table_name),
qn2(self.model._meta.pk.column),
qn2(table_alias),
language_id))
self.extra_join[table_alias] = trans_join
def get_from_clause(self):
"""Add the JOINS for related multilingual fields filtering.
"""
result = super(MultilingualQuery, self).get_from_clause()
if not self.include_translation_data:
return result
from_ = result[0]
for join in self.extra_join.values():
from_.append(join)
return (from_, result[1])
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True):
"""Copied from add_filter to generate WHERES for translation fields.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from 'parts', if necessary.
if len(parts) == 1 or parts[-1] not in self.query_terms:
lookup_type = 'exact'
else:
lookup_type = parts.pop()
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif (value == '' and lookup_type == 'exact' and
connection.features.interprets_empty_strings_as_nulls):
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, can_reuse=can_reuse,
negate=negate, process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
#NOTE: here comes Django Multilingual
if hasattr(opts, 'translation_model'):
field_name = parts[-1]
if field_name == 'pk':
field_name = opts.pk.name
translation_opts = opts.translation_model._meta
if field_name in translation_opts.translated_fields.keys():
field, model, direct, m2m = opts.get_field_by_name(field_name)
if model == opts.translation_model:
language_id = translation_opts.translated_fields[field_name][1]
if language_id is None:
language_id = get_default_language()
master_table_name = opts.db_table
trans_table_alias = get_translation_table_alias(
model._meta.db_table, language_id)
new_table = (master_table_name + "__" + trans_table_alias)
self.where.add(constraint_tuple(new_table, field.column, field, lookup_type, value), connector)
return
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and len(join_list) > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
# An optimization: if the final join is against the same column as
# we are comparing against, we can go back one step in the join
# chain and compare against the lhs of the join instead (and then
# repeat the optimization). The result, potentially, involves less
# table joins.
join = self.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list = join_list[:-1]
final -= 1
if final == penultimate:
penultimate = last.pop()
if (lookup_type == 'isnull' and value is True and not negate and
final > 1):
# If the comparison is against NULL, we need to use a left outer
# join when connecting to the previous model. We make that
# adjustment here. We don't do this unless needed as it's less
# efficient at the database level.
self.promote_alias(join_list[penultimate])
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
table_promote = False
join_promote = False
for join in join_it:
table = table_it.next()
if join == table and self.alias_refcount[join] > 1:
continue
join_promote = self.promote_alias(join)
if table != join:
table_promote = self.promote_alias(table)
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote)
self.where.add(constraint_tuple(alias, col, field, lookup_type, value), connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if final > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(constraint_tuple(alias, j_col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
break
elif not (lookup_type == 'in' and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
entry = self.where_class()
entry.add(constraint_tuple(alias, col, None, 'isnull', True), AND)
entry.negate()
self.where.add(entry, AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def _setup_joins_with_translation(self, names, opts, alias,
dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None,
negate=False, process_extras=True):
"""
This is based on a full copy of Query.setup_joins because
currently I see no way to handle it differently.
TO DO: there might actually be a way, by splitting a single
multi-name setup_joins call into separate calls. Check it.
-- [email protected]
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters).
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
for pos, name in enumerate(names):
try:
exclusions.add(int_alias)
except NameError:
pass
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
#NOTE: Start Django Multilingual specific code
if hasattr(opts, 'translation_model'):
translation_opts = opts.translation_model._meta
if model == opts.translation_model:
language_id = translation_opts.translated_fields[name][1]
if language_id is None:
language_id = get_default_language()
#TODO: check alias
master_table_name = opts.db_table
trans_table_alias = get_translation_table_alias(
model._meta.db_table, language_id)
new_table = (master_table_name + "__" + trans_table_alias)
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
trans_join = ('LEFT JOIN %s AS %s ON ((%s.master_id = %s.%s) AND (%s.language_id = %s))'
% (qn2(model._meta.db_table),
qn2(new_table),
qn2(new_table),
qn(master_table_name),
qn2(model._meta.pk.column),
qn2(new_table),
language_id))
self.extra_join[new_table] = trans_join
target = field
continue
#NOTE: End Django Multilingual specific code
elif model:
# The field lives on a base class of the current model.
for int_model in opts.get_base_chain(model):
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.pk.column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.pk.column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
try:
self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias)
except NameError:
self.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if pos != len(names) - 1:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
if not self.include_translation_data:
return super(MultilingualQuery, self).setup_joins(names, opts, alias,
dupe_multis, allow_many,
allow_explicit_fk,
can_reuse, negate,
process_extras)
else:
return self._setup_joins_with_translation(names, opts, alias, dupe_multis,
allow_many, allow_explicit_fk,
can_reuse, negate, process_extras)
def get_count(self):
# optimize for the common special case: count without any
# filters
if ((not (self.select or self.where))
and self.include_translation_data):
obj = self.clone(extra_select = {},
extra_join = {},
include_translation_data = False)
return obj.get_count()
else:
return super(MultilingualQuery, self).get_count()
class MultilingualModelQuerySet(QuerySet):
"""
A specialized QuerySet that knows how to handle translatable
fields in ordering and filtering methods.
"""
def __init__(self, model=None, query=None):
query = query or MultilingualQuery(model, connection)
super(MultilingualModelQuerySet, self).__init__(model, query)
def for_language(self, language_id_or_code):
"""
Set the default language for all objects returned with this
query.
"""
clone = self._clone()
clone._default_language = get_language_id_from_id_or_code(language_id_or_code)
return clone
def iterator(self):
"""
Add the default language information to all returned objects.
"""
default_language = getattr(self, '_default_language', None)
for obj in super(MultilingualModelQuerySet, self).iterator():
obj._default_language = default_language
yield obj
def _clone(self, klass=None, **kwargs):
"""
Override _clone to preserve additional information needed by
MultilingualModelQuerySet.
"""
clone = super(MultilingualModelQuerySet, self)._clone(klass, **kwargs)
clone._default_language = getattr(self, '_default_language', None)
return clone
def order_by(self, *field_names):
if hasattr(self.model._meta, 'translation_model'):
trans_opts = self.model._meta.translation_model._meta
new_field_names = []
for field_name in field_names:
prefix = ''
if field_name[0] == '-':
prefix = '-'
field_name = field_name[1:]
field_and_lang = trans_opts.translated_fields.get(field_name)
if field_and_lang:
field, language_id = field_and_lang
if language_id is None:
language_id = getattr(self, '_default_language', None)
real_name = get_translated_field_alias(field.attname,
language_id)
new_field_names.append(prefix + real_name)
else:
new_field_names.append(prefix + field_name)
return super(MultilingualModelQuerySet, self).extra(order_by=new_field_names)
else:
return super(MultilingualModelQuerySet, self).order_by(*field_names)
def values(self, *fields):
if hasattr(self.model._meta, 'translation_model'):
extra_select = {}
trans_opts = self.model._meta.translation_model._meta
trans_table_name = trans_opts.db_table
qn2 = self.query.connection.ops.quote_name
for field_name in fields:
field_and_lang = trans_opts.translated_fields.get(field_name)
if field_and_lang:
field, language_id = field_and_lang
if language_id is None:
language_id = getattr(self, '_default_language', None)
table_alias = get_translation_table_alias(trans_table_name,
language_id)
extra_select[field_name] = qn2(table_alias) + '.' + qn2(field.attname)
# this maps columns to required field_names
result = self.extra(select = extra_select)
# and it returns MultilingualModelQuerySet instance, so we have to super it
return super(MultilingualModelQuerySet, result).values(*fields)
else:
return super(MultilingualModelQuerySet, self).values(*fields)
def values_list(self, *fields, **kwargs):
if hasattr(self.model._meta, 'translation_model'):
extra_select = {}
trans_opts = self.model._meta.translation_model._meta
trans_table_name = trans_opts.db_table
qn2 = self.query.connection.ops.quote_name
for field_name in fields:
field_and_lang = trans_opts.translated_fields.get(field_name)
if field_and_lang:
field, language_id = field_and_lang
if language_id is None:
language_id = getattr(self, '_default_language', None)
table_alias = get_translation_table_alias(trans_table_name,
language_id)
extra_select[field_name] = qn2(table_alias) + '.' + qn2(field.attname)
# this maps columns to required field_names
result = self.extra(select = extra_select)
# and it return MultilingualModelQuerySet instance, so we have to super it
return super(MultilingualModelQuerySet, result).values_list(*fields, **kwargs)
else:
return super(MultilingualModelQuerySet, self).values_list(*fields, **kwargs)
| mit | 3,234,908,400,310,867,000 | 44.492846 | 115 | 0.516792 | false |
scifiswapnil/Project-LoCatr | lib/python2.7/site-packages/django/db/models/base.py | 16 | 75879 | from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection,
connections, router, transaction,
)
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation,
)
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.signals import (
class_prepared, post_init, post_save, pre_init, pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
@python_2_unicode_compatible
class Deferred(object):
def __repr__(self):
return str('<Deferred field>')
def __str__(self):
return str('<Deferred field>')
DEFERRED = Deferred()
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
classcell = attrs.pop('__classcell__', None)
if classcell is not None:
new_attrs['__classcell__'] = classcell
new_class = super_new(cls, name, bases, new_attrs)
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes |= set(base.__dict__.keys())
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name." % (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (field.name not in field_names and
field.name not in new_class.__dict__ and
field.name not in inherited_attributes):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
new_class.add_to_class(field.name, copy.deepcopy(field))
if base_meta and base_meta.abstract and not abstract:
new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes]
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added
# to the model at that point.
for index in new_class._meta.indexes:
if not index.name:
index.set_name_with_model(new_class)
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
if not opts.managers or cls._requires_legacy_default_manager():
if any(f.name == 'objects' for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class('objects', manager)
class_prepared.send(sender=cls)
def _requires_legacy_default_manager(cls): # RemovedInDjango20Warning
opts = cls._meta
if opts.manager_inheritance_from_future:
return False
future_default_manager = opts.default_manager
# Step 1: Locate a manager that would have been promoted
# to default manager with the legacy system.
for manager in opts.managers:
originating_model = manager._originating_model
if (cls is originating_model or cls._meta.proxy or
originating_model._meta.abstract):
if manager is not cls._default_manager and not opts.default_manager_name:
warnings.warn(
"Managers from concrete parents will soon qualify as default "
"managers if they appear before any other managers in the "
"MRO. As a result, '{legacy_default_manager}' declared on "
"'{legacy_default_manager_model}' will no longer be the "
"default manager for '{model}' in favor of "
"'{future_default_manager}' declared on "
"'{future_default_manager_model}'. "
"You can redeclare '{legacy_default_manager}' on '{cls}' "
"to keep things the way they are or you can switch to the new "
"behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
legacy_default_manager=manager.name,
legacy_default_manager_model=manager._originating_model._meta.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
opts.default_manager_name = manager.name
opts._expire_cache()
break
# Step 2: Since there are managers but none of them qualified as
# default managers under the legacy system (meaning that there are
# managers from concrete parents that would be promoted under the
# new system), we need to create a new Manager instance for the
# 'objects' attribute as a deprecation shim.
else:
# If the "future" default manager was auto created there is no
# point warning the user since it's basically the same manager.
if not future_default_manager.auto_created:
warnings.warn(
"Managers from concrete parents will soon qualify as "
"default managers. As a result, the 'objects' manager "
"won't be created (or recreated) automatically "
"anymore on '{model}' and '{future_default_manager}' "
"declared on '{future_default_manager_model}' will be "
"promoted to default manager. You can declare "
"explicitly `objects = models.Manager()` on '{cls}' "
"to keep things the way they are or you can switch "
"to the new behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
return True
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
for prop in tuple(kwargs):
try:
# Any remaining kwargs must correspond to properties or
# virtual fields.
if prop in property_names or opts.get_field(prop):
if kwargs[prop] is not _DEFERRED:
_setattr(self, prop, kwargs[prop])
del kwargs[prop]
except (AttributeError, FieldDoesNotExist):
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [values.pop() if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return str('%s object' % self.__class__.__name__)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled model instance's Django version %s does not match "
"the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
db_instance_qs = self.__class__._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields,
)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None and
getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
update_pk = meta.auto_field and not pk_set
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self.__class__._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if (lookup_value is None or
(lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = (
cls._check_id_field() +
cls._check_field_name_clashes() +
cls._check_model_name_db_lookup_clashes()
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith('_') or model_name.endswith('_'):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E023'
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E024'
)
)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model '%s'."
% (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by only one field).",
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if LOOKUP_SEP not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if f.db_column is None and column_name is not None and len(column_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, six.string_types):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(ordered_obj, self):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
curry(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
curry(method_set_order, model)
)
########
# MISC #
########
def model_unpickle(model_id):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| mit | 8,267,178,087,904,420,000 | 40.737624 | 114 | 0.539873 | false |
SgfPythonDevs/tchristell-raspi | scripts/Gmail.py | 2 | 4325 | #!/user/bin/env python
# Gmail.py
# Checks for new mail using IMAPclient and gmail account
# Uses callback to react to push button to send text message
from imapclient import IMAPClient
import time
import RPi.GPIO as GPIO
# Flag to enable debugging statements
DEBUG = True
# Used for IMAP mail retrieval
HOSTNAME = 'imap.gmail.com'
USERNAME = '[email protected]'
PASSWORD = 'password'
MAILBOX = 'Inbox'
#Loop timer for mail check
MAIL_CHECK_FREQUENCY = 60
# SMTPLIB uses this info for sending text
global EMAIL_USER
EMAIL_USER = "tlcruns"
global EMAIL_PASSWORD
EMAIL_PASSWORD = "password"
global FROM_EMAIL_ADDRESS
FROM_EMAIL_ADDRESS = "[email protected]"
global EMAIL_TO_ADDRESS
EMAIL_TO_ADDRESS = "[email protected]"
global CELL_TO_ADDRESS
CELL_TO_ADDRESS = "[email protected]"
# Flag to set number of emails
FIRST_TIME = True
# Only define one button to trigger text message
buttonOne = 17 #Connected to 3.3 volts
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# Only using GREEN_LED to make things simple. RED left in for
# example
GREEN_LED = 4
RED_LED = 23
# set both LED pins to output
GPIO.setup(RED_LED, GPIO.OUT)
GPIO.setup(GREEN_LED, GPIO.OUT)
# When we push buttonOne it connects +3.3 volts to input pin 17
# GPIO.PUD_DOWN "pulls" the pin low (ground) so it can detect
# the "high" that the button sends when pushed
GPIO.setup(buttonOne, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# Create callback function for button one
def text_Function(Channel):
send_email("Test", " Button One Pressed", CELL_TO_ADDRESS)
# Add callback function to GPIO.Rising event on buttonOne (add bouncetime=300)
GPIO.add_event_detect(buttonOne, GPIO.RISING, callback=text_Function, bouncetime=300)
# ----------------------------------------------------------------------------
# send_email()
# this uses the smtplib library to generate emails, usig vtext to send text
# messages for this demo
# ----------------------------------------------------------------------------
def send_email(sub, text, to):
import smtplib
user = EMAIL_USER
pwd = EMAIL_PASSWORD
FROM = FROM_EMAIL_ADDRESS
TO = [to]
SUBJECT = sub
TEXT = text
# Prepare actual message
message = """\From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com:587")
server.ehlo()
server.starttls()
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
print "successfully sent the mail to: {}".format(to)
except:
print("Can't send emailto: {}".format(to))
# ----------------------------------------------------------------------------
# loop()
# loop logs into gmail account usint IMAPClient and checks for the number of
# unread email messages. If the count is greater than last time it lights the
# LED on pin 17 for 60 seconds, which is the loop timer
# ----------------------------------------------------------------------------
def loop():
global FIRST_TIME
global NEWMAIL_OFFSET
server = IMAPClient(HOSTNAME, use_uid=True, ssl=True)
server.login(USERNAME, PASSWORD)
if DEBUG:
print('Loggin in as ' + USERNAME)
select_info = server.select_folder(MAILBOX)
print('%d messages in INBOX' % select_info['EXISTS'])
folder_status = server.folder_status(MAILBOX, 'UNSEEN')
newmails = int(folder_status['UNSEEN'])
if FIRST_TIME:
FIRST_TIME = False
NEWMAIL_OFFSET = newmails
print('first time and newmail_offset is ', NEWMAIL_OFFSET)
if newmails > NEWMAIL_OFFSET:
print('newmails is ', newmails, ' and newmailoffset is ', NEWMAIL_OFFSET)
NEWMAIL_OFFSET = newmails
GPIO.output(GREEN_LED, True)
GPIO.output(RED_LED, False)
if DEBUG:
print "You have", newmails, "New emails"
else:
print('in else and newmail_offset is ', NEWMAIL_OFFSET)
GPIO.output(GREEN_LED, False)
GPIO.output(RED_LED, True)
server.logout()
time.sleep(MAIL_CHECK_FREQUENCY)
if __name__ == '__main__':
try:
print 'Press Ctrl-C to quit.'
while True:
loop()
finally:
GPIO.cleanup()
| mit | 9,021,506,824,321,068,000 | 27.642384 | 85 | 0.615954 | false |
skarphed/skarphed | admin/src/skarphedadmin/gui/YesNoPage.py | 1 | 2349 | #!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import pygtk
pygtk.require("2.0")
import gtk
from skarphedadmin.glue.lng import _
class YesNoPage(gtk.Frame):
def __init__(self, par, message, callback):
gtk.Frame.__init__(self, _("Yes/No"))
self.par = par
self.hbox = gtk.HBox()
self.vbox = gtk.VBox()
self.dummy = gtk.Label("")
self.label = gtk.Label(message)
self.yes = gtk.Button(stock=gtk.STOCK_YES);
self.no = gtk.Button(stock=gtk.STOCK_NO)
self.hbox.pack_start(self.yes)
self.hbox.pack_start(self.no)
self.vbox.pack_start(self.label,False)
self.vbox.pack_start(self.hbox,False)
self.vbox.pack_start(self.dummy,True)
self.vbox.set_spacing(30)
self.alignment = gtk.Alignment(0.5,0.5,0.5,0.5)
self.alignment.add(self.vbox)
self.add(self.alignment)
self.callback = callback
self.yes.connect('clicked', self.yes_callback)
self.no.connect('clicked', self.no_callback)
self.getApplication().getMainWindow().openDialogPane(self)
def no_callback(self, button, data=None):
self.getApplication().getMainWindow().closeDialogPane()
def yes_callback(self, button, data=None):
if self.callback:
self.callback()
self.getApplication().getMainWindow().closeDialogPane()
def getPar(self):
return self.par
def getApplication(self):
return self.par.getApplication()
| agpl-3.0 | 8,220,114,302,601,139,000 | 32.542857 | 70 | 0.631175 | false |
mcflugen/dakota-experiments | dakota_utils/models/tests/test_hydrotrend.py | 2 | 4020 | #!/usr/bin/env python
#
# Tests for dakota_utils.models.hydrotrend.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper ([email protected])
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.models.hydrotrend import HydroTrend
def setup_module():
print('HydroTrend tests:')
os.environ['_test_hydrotrend_dir'] = tempfile.mkdtemp()
os.chdir(os.environ['_test_hydrotrend_dir'])
global h
h = HydroTrend()
def teardown_module():
shutil.rmtree(os.environ['_test_hydrotrend_dir'])
def test_HydroTrend_no_arguments():
'''
Tests whether no arguments creates input and output directories.
'''
assert_true(os.path.exists(h.input_dir))
assert_true(os.path.exists(h.output_dir))
def test_HydroTrend_set_input_dir():
'''
Tests setting the input directory on init.
'''
os.chdir(os.environ['_test_hydrotrend_dir'])
input_dir = '__hydro_in'
h = HydroTrend(input_dir)
assert_equal(h.input_dir, input_dir)
def test_HydroTrend_get_input_dir():
'''
Tests getting the input directory.
'''
input_dir = 'HYDRO_IN' # the default
assert_equal(os.path.basename(h.input_dir), input_dir)
def test_HydroTrend_set_output_dir():
'''
Tests setting the output directory on init.
'''
os.chdir(os.environ['_test_hydrotrend_dir'])
output_dir = '__hydro_out'
h = HydroTrend(None, output_dir)
assert_equal(h.output_dir, output_dir)
def test_HydroTrend_get_output_dir():
'''
Tests getting the output directory.
'''
output_dir = 'HYDRO_OUTPUT' # the default
assert_equal(os.path.basename(h.output_dir), output_dir)
def test_HydroTrend_get_input_file():
'''
Tests getting the input file name.
'''
input_file = 'HYDRO.IN' # the default
assert_equal(h.input_file, input_file)
def test_HydroTrend_set_input_file():
'''
Tests setting the input file name.
'''
input_file = '__hydro.in'
h.input_file = input_file
assert_equal(h.input_file, input_file)
def test_HydroTrend_get_input_template():
'''
Tests getting the input template name.
'''
input_template = 'HYDRO.IN.template' # the default
assert_equal(h.input_template, input_template)
def test_HydroTrend_set_input_template():
'''
Tests setting the input template name.
'''
input_template = '__hydro.in.template'
h.input_template = input_template
assert_equal(h.input_template, input_template)
def test_HydroTrend_get_hypsometry_file():
'''
Tests getting the hypsometry file name.
'''
hypsometry_file = 'HYDRO0.HYPS' # the default
assert_equal(h.hypsometry_file, hypsometry_file)
def test_HydroTrend_set_hypsometry_file():
'''
Tests setting the hypsometry file name.
'''
hypsometry_file = '__hydro0.hyps'
h.hypsometry_file = hypsometry_file
assert_equal(h.hypsometry_file, hypsometry_file)
def test_HydroTrend_get_output_files():
'''
Tests getting the tuple of output file names.
'''
output_files = ('HYDROASCII.QS') # the default
assert_equal(h.output_files, output_files)
def test_HydroTrend_set_output_files():
'''
Tests setting the tuple of output file names.
'''
output_files = ('foo', 'bar', 'baz')
h.output_files = output_files
assert_equal(h.output_files, output_files)
def test_get_response_statistic():
'''
Tests getting the current response_statistic.
'''
rstat = 'mean' # the default
assert_equal(h.response_statistic, rstat)
def test_set_response_statistic():
'''
Tests setting the response_statistic.
'''
rstat = 'sum'
h.response_statistic = rstat
assert_equal(h.response_statistic, rstat)
@raises(TypeError)
def test_load_zero_arguments():
'''
Tests load() when no argument is passed.
'''
r = h.load()
def test_load_does_not_exist():
'''
Tests load() when a nonexistent output file is defined.
'''
r = h.load('vfnqeubnuen.f')
assert_is_none(r)
| mit | 7,346,356,174,288,590,000 | 24.935484 | 68 | 0.656965 | false |
jakevdp/megaman | megaman/embedding/tests/test_embeddings.py | 4 | 1798 | """General tests for embeddings"""
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from itertools import product
import numpy as np
from numpy.testing import assert_raises, assert_allclose
from megaman.embedding import (Isomap, LocallyLinearEmbedding,
LTSA, SpectralEmbedding)
from megaman.geometry.geometry import Geometry
EMBEDDINGS = [Isomap, LocallyLinearEmbedding, LTSA, SpectralEmbedding]
# # TODO: make estimator_checks pass!
# def test_estimator_checks():
# from sklearn.utils.estimator_checks import check_estimator
# for Embedding in EMBEDDINGS:
# yield check_estimator, Embedding
def test_embeddings_fit_vs_transform():
rand = np.random.RandomState(42)
X = rand.rand(100, 5)
geom = Geometry(adjacency_kwds = {'radius':1.0},
affinity_kwds = {'radius':1.0})
def check_embedding(Embedding, n_components):
model = Embedding(n_components=n_components,
geom=geom, random_state=rand)
embedding = model.fit_transform(X)
assert model.embedding_.shape == (X.shape[0], n_components)
assert_allclose(embedding, model.embedding_)
for Embedding in EMBEDDINGS:
for n_components in [1, 2, 3]:
yield check_embedding, Embedding, n_components
def test_embeddings_bad_arguments():
rand = np.random.RandomState(32)
X = rand.rand(100, 3)
def check_bad_args(Embedding):
# no radius set
embedding = Embedding()
assert_raises(ValueError, embedding.fit, X)
# unrecognized geometry
embedding = Embedding(radius=2, geom='blah')
assert_raises(ValueError, embedding.fit, X)
for Embedding in EMBEDDINGS:
yield check_bad_args, Embedding
| bsd-2-clause | -698,770,212,139,749,600 | 31.690909 | 77 | 0.669077 | false |
m0ppers/arangodb | 3rdParty/boost/1.61.0/tools/build/src/build_system.py | 11 | 33823 | # Status: mostly ported. Missing is --out-xml support, 'configure' integration
# and some FIXME.
# Base revision: 64351
# Copyright 2003, 2005 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2003, 2004, 2005, 2006, 2007 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from b2.build.engine import Engine
from b2.manager import Manager
from b2.util.path import glob
from b2.build import feature, property_set
import b2.build.virtual_target
from b2.build.targets import ProjectTarget
from b2.util.sequence import unique
import b2.build.build_request
from b2.build.errors import ExceptionWithUserContext
import b2.tools.common
from b2.build.toolset import using
import b2.build.project as project
import b2.build.virtual_target as virtual_target
import b2.build.build_request as build_request
import b2.util.regex
from b2.manager import get_manager
from b2.util import cached
from b2.util import option
import bjam
import os
import sys
import re
################################################################################
#
# Module global data.
#
################################################################################
# Flag indicating we should display additional debugging information related to
# locating and loading Boost Build configuration files.
debug_config = False
# The cleaning is tricky. Say, if user says 'bjam --clean foo' where 'foo' is a
# directory, then we want to clean targets which are in 'foo' as well as those
# in any children Jamfiles under foo but not in any unrelated Jamfiles. To
# achieve this we collect a list of projects under which cleaning is allowed.
project_targets = []
# Virtual targets obtained when building main targets references on the command
# line. When running 'bjam --clean main_target' we want to clean only files
# belonging to that main target so we need to record which targets are produced
# for it.
results_of_main_targets = []
# Was an XML dump requested?
out_xml = False
# Default toolset & version to be used in case no other toolset has been used
# explicitly by either the loaded configuration files, the loaded project build
# scripts or an explicit toolset request on the command line. If not specified,
# an arbitrary default will be used based on the current host OS. This value,
# while not strictly necessary, has been added to allow testing Boost-Build's
# default toolset usage functionality.
default_toolset = None
default_toolset_version = None
################################################################################
#
# Public rules.
#
################################################################################
# Returns the property set with the free features from the currently processed
# build request.
#
def command_line_free_features():
return command_line_free_features
# Sets the default toolset & version to be used in case no other toolset has
# been used explicitly by either the loaded configuration files, the loaded
# project build scripts or an explicit toolset request on the command line. For
# more detailed information see the comment related to used global variables.
#
def set_default_toolset(toolset, version=None):
default_toolset = toolset
default_toolset_version = version
pre_build_hook = []
def add_pre_build_hook(callable):
pre_build_hook.append(callable)
post_build_hook = None
def set_post_build_hook(callable):
post_build_hook = callable
################################################################################
#
# Local rules.
#
################################################################################
# Returns actual Jam targets to be used for executing a clean request.
#
def actual_clean_targets(targets):
# Construct a list of projects explicitly detected as targets on this build
# system run. These are the projects under which cleaning is allowed.
for t in targets:
if isinstance(t, b2.build.targets.ProjectTarget):
project_targets.append(t.project_module())
# Construct a list of targets explicitly detected on this build system run
# as a result of building main targets.
targets_to_clean = set()
for t in results_of_main_targets:
# Do not include roots or sources.
targets_to_clean.update(virtual_target.traverse(t))
to_clean = []
for t in get_manager().virtual_targets().all_targets():
# Remove only derived targets.
if t.action():
p = t.project()
if t in targets_to_clean or should_clean_project(p.project_module()):
to_clean.append(t)
return [t.actualize() for t in to_clean]
_target_id_split = re.compile("(.*)//(.*)")
# Given a target id, try to find and return the corresponding target. This is
# only invoked when there is no Jamfile in ".". This code somewhat duplicates
# code in project-target.find but we can not reuse that code without a
# project-targets instance.
#
def find_target(target_id):
projects = get_manager().projects()
m = _target_id_split.match(target_id)
if m:
pm = projects.find(m.group(1), ".")
else:
pm = projects.find(target_id, ".")
if pm:
result = projects.target(pm)
if m:
result = result.find(m.group(2))
return result
def initialize_config_module(module_name, location=None):
get_manager().projects().initialize(module_name, location)
# Helper rule used to load configuration files. Loads the first configuration
# file with the given 'filename' at 'path' into module with name 'module-name'.
# Not finding the requested file may or may not be treated as an error depending
# on the must-find parameter. Returns a normalized path to the loaded
# configuration file or nothing if no file was loaded.
#
def load_config(module_name, filename, paths, must_find=False):
if debug_config:
print "notice: Searching '%s' for '%s' configuration file '%s." \
% (paths, module_name, filename)
where = None
for path in paths:
t = os.path.join(path, filename)
if os.path.exists(t):
where = t
break
if where:
where = os.path.realpath(where)
if debug_config:
print "notice: Loading '%s' configuration file '%s' from '%s'." \
% (module_name, filename, where)
# Set source location so that path-constant in config files
# with relative paths work. This is of most importance
# for project-config.jam, but may be used in other
# config files as well.
attributes = get_manager().projects().attributes(module_name) ;
attributes.set('source-location', os.path.dirname(where), True)
get_manager().projects().load_standalone(module_name, where)
else:
msg = "Configuration file '%s' not found in '%s'." % (filename, path)
if must_find:
get_manager().errors()(msg)
elif debug_config:
print msg
return where
# Loads all the configuration files used by Boost Build in the following order:
#
# -- test-config --
# Loaded only if specified on the command-line using the --test-config
# command-line parameter. It is ok for this file not to exist even if
# specified. If this configuration file is loaded, regular site and user
# configuration files will not be. If a relative path is specified, file is
# searched for in the current folder.
#
# -- site-config --
# Always named site-config.jam. Will only be found if located on the system
# root path (Windows), /etc (non-Windows), user's home folder or the Boost
# Build path, in that order. Not loaded in case the test-config configuration
# file is loaded or the --ignore-site-config command-line option is specified.
#
# -- user-config --
# Named user-config.jam by default or may be named explicitly using the
# --user-config command-line option or the BOOST_BUILD_USER_CONFIG environment
# variable. If named explicitly the file is looked for from the current working
# directory and if the default one is used then it is searched for in the
# user's home directory and the Boost Build path, in that order. Not loaded in
# case either the test-config configuration file is loaded or an empty file
# name is explicitly specified. If the file name has been given explicitly then
# the file must exist.
#
# Test configurations have been added primarily for use by Boost Build's
# internal unit testing system but may be used freely in other places as well.
#
def load_configuration_files():
# Flag indicating that site configuration should not be loaded.
ignore_site_config = "--ignore-site-config" in sys.argv
initialize_config_module("test-config")
test_config = None
for a in sys.argv:
m = re.match("--test-config=(.*)$", a)
if m:
test_config = b2.util.unquote(m.group(1))
break
if test_config:
where = load_config("test-config", os.path.basename(test_config), [os.path.dirname(test_config)])
if where:
if debug_config:
print "notice: Regular site and user configuration files will"
print "notice: be ignored due to the test configuration being loaded."
user_path = [os.path.expanduser("~")] + bjam.variable("BOOST_BUILD_PATH")
site_path = ["/etc"] + user_path
if os.name in ["nt"]:
site_path = [os.getenv("SystemRoot")] + user_path
if debug_config and not test_config and ignore_site_config:
print "notice: Site configuration files will be ignored due to the"
print "notice: --ignore-site-config command-line option."
initialize_config_module("site-config")
if not test_config and not ignore_site_config:
load_config('site-config', 'site-config.jam', site_path)
initialize_config_module('user-config')
if not test_config:
# Here, user_config has value of None if nothing is explicitly
# specified, and value of '' if user explicitly does not want
# to load any user config.
user_config = None
for a in sys.argv:
m = re.match("--user-config=(.*)$", a)
if m:
user_config = m.group(1)
break
if user_config is None:
user_config = os.getenv("BOOST_BUILD_USER_CONFIG")
# Special handling for the case when the OS does not strip the quotes
# around the file name, as is the case when using Cygwin bash.
user_config = b2.util.unquote(user_config)
explicitly_requested = user_config
if user_config is None:
user_config = "user-config.jam"
if user_config:
if explicitly_requested:
user_config = os.path.abspath(user_config)
if debug_config:
print "notice: Loading explicitly specified user configuration file:"
print " " + user_config
load_config('user-config', os.path.basename(user_config), [os.path.dirname(user_config)], True)
else:
load_config('user-config', os.path.basename(user_config), user_path)
else:
if debug_config:
print "notice: User configuration file loading explicitly disabled."
# We look for project-config.jam from "." upward. I am not sure this is
# 100% right decision, we might as well check for it only alongside the
# Jamroot file. However:
# - We need to load project-config.jam before Jamroot
# - We probably need to load project-config.jam even if there is no Jamroot
# - e.g. to implement automake-style out-of-tree builds.
if os.path.exists("project-config.jam"):
file = ["project-config.jam"]
else:
file = b2.util.path.glob_in_parents(".", ["project-config.jam"])
if file:
initialize_config_module('project-config', os.path.dirname(file[0]))
load_config('project-config', "project-config.jam", [os.path.dirname(file[0])], True)
# Autoconfigure toolsets based on any instances of --toolset=xx,yy,...zz or
# toolset=xx,yy,...zz in the command line. May return additional properties to
# be processed as if they had been specified by the user.
#
def process_explicit_toolset_requests():
extra_properties = []
option_toolsets = [e for option in b2.util.regex.transform(sys.argv, "^--toolset=(.*)$")
for e in option.split(',')]
feature_toolsets = [e for option in b2.util.regex.transform(sys.argv, "^toolset=(.*)$")
for e in option.split(',')]
for t in option_toolsets + feature_toolsets:
# Parse toolset-version/properties.
(toolset_version, toolset, version) = re.match("(([^-/]+)-?([^/]+)?)/?.*", t).groups()
if debug_config:
print "notice: [cmdline-cfg] Detected command-line request for '%s': toolset= %s version=%s" \
% (toolset_version, toolset, version)
# If the toolset is not known, configure it now.
known = False
if toolset in feature.values("toolset"):
known = True
if known and version and not feature.is_subvalue("toolset", toolset, "version", version):
known = False
# TODO: we should do 'using $(toolset)' in case no version has been
# specified and there are no versions defined for the given toolset to
# allow the toolset to configure its default version. For this we need
# to know how to detect whether a given toolset has any versions
# defined. An alternative would be to do this whenever version is not
# specified but that would require that toolsets correctly handle the
# case when their default version is configured multiple times which
# should be checked for all existing toolsets first.
if not known:
if debug_config:
print "notice: [cmdline-cfg] toolset '%s' not previously configured; attempting to auto-configure now" % toolset_version
if version is not None:
using(toolset, version)
else:
using(toolset)
else:
if debug_config:
print "notice: [cmdline-cfg] toolset '%s' already configured" % toolset_version
# Make sure we get an appropriate property into the build request in
# case toolset has been specified using the "--toolset=..." command-line
# option form.
if not t in sys.argv and not t in feature_toolsets:
if debug_config:
print "notice: [cmdline-cfg] adding toolset=%s) to the build request." % t ;
extra_properties += "toolset=%s" % t
return extra_properties
# Returns 'true' if the given 'project' is equal to or is a (possibly indirect)
# child to any of the projects requested to be cleaned in this build system run.
# Returns 'false' otherwise. Expects the .project-targets list to have already
# been constructed.
#
@cached
def should_clean_project(project):
if project in project_targets:
return True
else:
parent = get_manager().projects().attribute(project, "parent-module")
if parent and parent != "user-config":
return should_clean_project(parent)
else:
return False
################################################################################
#
# main()
# ------
#
################################################################################
def main():
sys.argv = bjam.variable("ARGV")
# FIXME: document this option.
if "--profiling" in sys.argv:
import cProfile
r = cProfile.runctx('main_real()', globals(), locals(), "stones.prof")
import pstats
stats = pstats.Stats("stones.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_callers(20)
return r
else:
try:
return main_real()
except ExceptionWithUserContext, e:
e.report()
def main_real():
global debug_config, out_xml
debug_config = "--debug-configuration" in sys.argv
out_xml = any(re.match("^--out-xml=(.*)$", a) for a in sys.argv)
engine = Engine()
global_build_dir = option.get("build-dir")
manager = Manager(engine, global_build_dir)
import b2.build.configure as configure
if "--version" in sys.argv:
from b2.build import version
version.report()
return
# This module defines types and generator and what not,
# and depends on manager's existence
import b2.tools.builtin
b2.tools.common.init(manager)
load_configuration_files()
# Load explicitly specified toolset modules.
extra_properties = process_explicit_toolset_requests()
# Load the actual project build script modules. We always load the project
# in the current folder so 'use-project' directives have any chance of
# being seen. Otherwise, we would not be able to refer to subprojects using
# target ids.
current_project = None
projects = get_manager().projects()
if projects.find(".", "."):
current_project = projects.target(projects.load("."))
# Load the default toolset module if no other has already been specified.
if not feature.values("toolset"):
dt = default_toolset
dtv = None
if default_toolset:
dtv = default_toolset_version
else:
dt = "gcc"
if os.name == 'nt':
dt = "msvc"
# FIXME:
#else if [ os.name ] = MACOSX
#{
# default-toolset = darwin ;
#}
print "warning: No toolsets are configured."
print "warning: Configuring default toolset '%s'." % dt
print "warning: If the default is wrong, your build may not work correctly."
print "warning: Use the \"toolset=xxxxx\" option to override our guess."
print "warning: For more configuration options, please consult"
print "warning: http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html"
using(dt, dtv)
# Parse command line for targets and properties. Note that this requires
# that all project files already be loaded.
(target_ids, properties) = build_request.from_command_line(sys.argv[1:] + extra_properties)
# Check that we actually found something to build.
if not current_project and not target_ids:
get_manager().errors()("no Jamfile in current directory found, and no target references specified.")
# FIXME:
# EXIT
# Flags indicating that this build system run has been started in order to
# clean existing instead of create new targets. Note that these are not the
# final flag values as they may get changed later on due to some special
# targets being specified on the command line.
clean = "--clean" in sys.argv
cleanall = "--clean-all" in sys.argv
# List of explicitly requested files to build. Any target references read
# from the command line parameter not recognized as one of the targets
# defined in the loaded Jamfiles will be interpreted as an explicitly
# requested file to build. If any such files are explicitly requested then
# only those files and the targets they depend on will be built and they
# will be searched for among targets that would have been built had there
# been no explicitly requested files.
explicitly_requested_files = []
# List of Boost Build meta-targets, virtual-targets and actual Jam targets
# constructed in this build system run.
targets = []
virtual_targets = []
actual_targets = []
explicitly_requested_files = []
# Process each target specified on the command-line and convert it into
# internal Boost Build target objects. Detect special clean target. If no
# main Boost Build targets were explictly requested use the current project
# as the target.
for id in target_ids:
if id == "clean":
clean = 1
else:
t = None
if current_project:
t = current_project.find(id, no_error=1)
else:
t = find_target(id)
if not t:
print "notice: could not find main target '%s'" % id
print "notice: assuming it's a name of file to create " ;
explicitly_requested_files.append(id)
else:
targets.append(t)
if not targets:
targets = [projects.target(projects.module_name("."))]
# FIXME: put this BACK.
## if [ option.get dump-generators : : true ]
## {
## generators.dump ;
## }
# We wish to put config.log in the build directory corresponding
# to Jamroot, so that the location does not differ depending on
# directory where we do build. The amount of indirection necessary
# here is scary.
first_project = targets[0].project()
first_project_root_location = first_project.get('project-root')
first_project_root_module = manager.projects().load(first_project_root_location)
first_project_root = manager.projects().target(first_project_root_module)
first_build_build_dir = first_project_root.build_dir()
configure.set_log_file(os.path.join(first_build_build_dir, "config.log"))
virtual_targets = []
global results_of_main_targets
# Expand properties specified on the command line into multiple property
# sets consisting of all legal property combinations. Each expanded property
# set will be used for a single build run. E.g. if multiple toolsets are
# specified then requested targets will be built with each of them.
# The expansion is being performed as late as possible so that the feature
# validation is performed after all necessary modules (including project targets
# on the command line) have been loaded.
if properties:
expanded = []
for p in properties:
expanded.extend(build_request.convert_command_line_element(p))
expanded = build_request.expand_no_defaults(expanded)
else:
expanded = [property_set.empty()]
# Now that we have a set of targets to build and a set of property sets to
# build the targets with, we can start the main build process by using each
# property set to generate virtual targets from all of our listed targets
# and any of their dependants.
for p in expanded:
manager.set_command_line_free_features(property_set.create(p.free()))
for t in targets:
try:
g = t.generate(p)
if not isinstance(t, ProjectTarget):
results_of_main_targets.extend(g.targets())
virtual_targets.extend(g.targets())
except ExceptionWithUserContext, e:
e.report()
except Exception:
raise
# Convert collected virtual targets into actual raw Jam targets.
for t in virtual_targets:
actual_targets.append(t.actualize())
# FIXME: restore
## # If XML data output has been requested prepare additional rules and targets
## # so we can hook into Jam to collect build data while its building and have
## # it trigger the final XML report generation after all the planned targets
## # have been built.
## if $(.out-xml)
## {
## # Get a qualified virtual target name.
## rule full-target-name ( target )
## {
## local name = [ $(target).name ] ;
## local project = [ $(target).project ] ;
## local project-path = [ $(project).get location ] ;
## return $(project-path)//$(name) ;
## }
## # Generate an XML file containing build statistics for each constituent.
## #
## rule out-xml ( xml-file : constituents * )
## {
## # Prepare valid XML header and footer with some basic info.
## local nl = "
## " ;
## local jam = [ version.jam ] ;
## local os = [ modules.peek : OS OSPLAT JAMUNAME ] "" ;
## local timestamp = [ modules.peek : JAMDATE ] ;
## local cwd = [ PWD ] ;
## local command = $(.sys.argv) ;
## local bb-version = [ version.boost-build ] ;
## .header on $(xml-file) =
## "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
## "$(nl)<build format=\"1.0\" version=\"$(bb-version)\">"
## "$(nl) <jam version=\"$(jam:J=.)\" />"
## "$(nl) <os name=\"$(os[1])\" platform=\"$(os[2])\"><![CDATA[$(os[3-]:J= )]]></os>"
## "$(nl) <timestamp><![CDATA[$(timestamp)]]></timestamp>"
## "$(nl) <directory><![CDATA[$(cwd)]]></directory>"
## "$(nl) <command><![CDATA[\"$(command:J=\" \")\"]]></command>"
## ;
## .footer on $(xml-file) =
## "$(nl)</build>" ;
## # Generate the target dependency graph.
## .contents on $(xml-file) +=
## "$(nl) <targets>" ;
## for local t in [ virtual-target.all-targets ]
## {
## local action = [ $(t).action ] ;
## if $(action)
## # If a target has no action, it has no dependencies.
## {
## local name = [ full-target-name $(t) ] ;
## local sources = [ $(action).sources ] ;
## local dependencies ;
## for local s in $(sources)
## {
## dependencies += [ full-target-name $(s) ] ;
## }
## local path = [ $(t).path ] ;
## local jam-target = [ $(t).actual-name ] ;
## .contents on $(xml-file) +=
## "$(nl) <target>"
## "$(nl) <name><![CDATA[$(name)]]></name>"
## "$(nl) <dependencies>"
## "$(nl) <dependency><![CDATA[$(dependencies)]]></dependency>"
## "$(nl) </dependencies>"
## "$(nl) <path><![CDATA[$(path)]]></path>"
## "$(nl) <jam-target><![CDATA[$(jam-target)]]></jam-target>"
## "$(nl) </target>"
## ;
## }
## }
## .contents on $(xml-file) +=
## "$(nl) </targets>" ;
## # Build $(xml-file) after $(constituents). Do so even if a
## # constituent action fails and regenerate the xml on every bjam run.
## INCLUDES $(xml-file) : $(constituents) ;
## ALWAYS $(xml-file) ;
## __ACTION_RULE__ on $(xml-file) = build-system.out-xml.generate-action ;
## out-xml.generate $(xml-file) ;
## }
## # The actual build actions are here; if we did this work in the actions
## # clause we would have to form a valid command line containing the
## # result of @(...) below (the name of the XML file).
## #
## rule out-xml.generate-action ( args * : xml-file
## : command status start end user system : output ? )
## {
## local contents =
## [ on $(xml-file) return $(.header) $(.contents) $(.footer) ] ;
## local f = @($(xml-file):E=$(contents)) ;
## }
## # Nothing to do here; the *real* actions happen in
## # out-xml.generate-action.
## actions quietly out-xml.generate { }
## # Define the out-xml file target, which depends on all the targets so
## # that it runs the collection after the targets have run.
## out-xml $(.out-xml) : $(actual-targets) ;
## # Set up a global __ACTION_RULE__ that records all the available
## # statistics about each actual target in a variable "on" the --out-xml
## # target.
## #
## rule out-xml.collect ( xml-file : target : command status start end user
## system : output ? )
## {
## local nl = "
## " ;
## # Open the action with some basic info.
## .contents on $(xml-file) +=
## "$(nl) <action status=\"$(status)\" start=\"$(start)\" end=\"$(end)\" user=\"$(user)\" system=\"$(system)\">" ;
## # If we have an action object we can print out more detailed info.
## local action = [ on $(target) return $(.action) ] ;
## if $(action)
## {
## local action-name = [ $(action).action-name ] ;
## local action-sources = [ $(action).sources ] ;
## local action-props = [ $(action).properties ] ;
## # The qualified name of the action which we created the target.
## .contents on $(xml-file) +=
## "$(nl) <name><![CDATA[$(action-name)]]></name>" ;
## # The sources that made up the target.
## .contents on $(xml-file) +=
## "$(nl) <sources>" ;
## for local source in $(action-sources)
## {
## local source-actual = [ $(source).actual-name ] ;
## .contents on $(xml-file) +=
## "$(nl) <source><![CDATA[$(source-actual)]]></source>" ;
## }
## .contents on $(xml-file) +=
## "$(nl) </sources>" ;
## # The properties that define the conditions under which the
## # target was built.
## .contents on $(xml-file) +=
## "$(nl) <properties>" ;
## for local prop in [ $(action-props).raw ]
## {
## local prop-name = [ MATCH ^<(.*)>$ : $(prop:G) ] ;
## .contents on $(xml-file) +=
## "$(nl) <property name=\"$(prop-name)\"><![CDATA[$(prop:G=)]]></property>" ;
## }
## .contents on $(xml-file) +=
## "$(nl) </properties>" ;
## }
## local locate = [ on $(target) return $(LOCATE) ] ;
## locate ?= "" ;
## .contents on $(xml-file) +=
## "$(nl) <jam-target><![CDATA[$(target)]]></jam-target>"
## "$(nl) <path><![CDATA[$(target:G=:R=$(locate))]]></path>"
## "$(nl) <command><![CDATA[$(command)]]></command>"
## "$(nl) <output><![CDATA[$(output)]]></output>" ;
## .contents on $(xml-file) +=
## "$(nl) </action>" ;
## }
## # When no __ACTION_RULE__ is set "on" a target, the search falls back to
## # the global module.
## module
## {
## __ACTION_RULE__ = build-system.out-xml.collect
## [ modules.peek build-system : .out-xml ] ;
## }
## IMPORT
## build-system :
## out-xml.collect
## out-xml.generate-action
## : :
## build-system.out-xml.collect
## build-system.out-xml.generate-action
## ;
## }
j = option.get("jobs")
if j:
bjam.call("set-variable", 'PARALLELISM', j)
k = option.get("keep-going", "true", "true")
if k in ["on", "yes", "true"]:
bjam.call("set-variable", "KEEP_GOING", "1")
elif k in ["off", "no", "false"]:
bjam.call("set-variable", "KEEP_GOING", "0")
else:
print "error: Invalid value for the --keep-going option"
sys.exit()
# The 'all' pseudo target is not strictly needed expect in the case when we
# use it below but people often assume they always have this target
# available and do not declare it themselves before use which may cause
# build failures with an error message about not being able to build the
# 'all' target.
bjam.call("NOTFILE", "all")
# And now that all the actual raw Jam targets and all the dependencies
# between them have been prepared all that is left is to tell Jam to update
# those targets.
if explicitly_requested_files:
# Note that this case can not be joined with the regular one when only
# exact Boost Build targets are requested as here we do not build those
# requested targets but only use them to construct the dependency tree
# needed to build the explicitly requested files.
# FIXME: add $(.out-xml)
bjam.call("UPDATE", ["<e>%s" % x for x in explicitly_requested_files])
elif cleanall:
bjam.call("UPDATE", "clean-all")
elif clean:
manager.engine().set_update_action("common.Clean", "clean",
actual_clean_targets(targets))
bjam.call("UPDATE", "clean")
else:
# FIXME:
#configure.print-configure-checks-summary ;
if pre_build_hook:
for h in pre_build_hook:
h()
bjam.call("DEPENDS", "all", actual_targets)
ok = bjam.call("UPDATE_NOW", "all") # FIXME: add out-xml
if post_build_hook:
post_build_hook(ok)
# Prevent automatic update of the 'all' target, now that
# we have explicitly updated what we wanted.
bjam.call("UPDATE")
if manager.errors().count() == 0:
return ["ok"]
else:
return []
| apache-2.0 | 4,794,403,222,127,328,000 | 38.011534 | 136 | 0.584011 | false |
kc4271/batch_downloader | requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| mit | -8,079,003,940,217,450,000 | 38.939394 | 80 | 0.627683 | false |
cryptickp/heat | heat/engine/resources/openstack/glance/glance_image.py | 4 | 4527 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class GlanceImage(resource.Resource):
'''
A resource managing for image in Glance.
'''
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED,
DISK_FORMAT, CONTAINER_FORMAT, LOCATION
) = (
'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected',
'disk_format', 'container_format', 'location'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the image. The name of an image is not '
'unique to a Image Service node.')
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The image ID. Glance will generate a UUID if not specified.')
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Scope of image accessibility. Public or private. '
'Default value is False means private.'),
default=False,
),
MIN_DISK: properties.Schema(
properties.Schema.INTEGER,
_('Amount of disk space (in GB) required to boot image. '
'Default value is 0 if not specified '
'and means no limit on the disk size.'),
constraints=[
constraints.Range(min=0),
]
),
MIN_RAM: properties.Schema(
properties.Schema.INTEGER,
_('Amount of ram (in MB) required to boot image. Default value '
'is 0 if not specified and means no limit on the ram size.'),
constraints=[
constraints.Range(min=0),
]
),
PROTECTED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether the image can be deleted. If the value is True, '
'the image is protected and cannot be deleted.')
),
DISK_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Disk format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'vhd', 'vmdk', 'raw',
'qcow2', 'vdi', 'iso'])
]
),
CONTAINER_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Container format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'bare', 'ova', 'ovf'])
]
),
LOCATION: properties.Schema(
properties.Schema.STRING,
_('URL where the data for this image already resides. For '
'example, if the image data is stored in swift, you could '
'specify "swift://example.com/container/obj".'),
required=True,
),
}
default_client_name = 'glance'
entity = 'images'
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
image_id = self.client().images.create(**args).id
self.resource_id_set(image_id)
return image_id
def check_create_complete(self, image_id):
image = self.client().images.get(image_id)
return image.status == 'active'
def _show_resource(self):
if self.glance().version == 1.0:
return super(GlanceImage, self)._show_resource()
else:
image = self.glance().images.get(self.resource_id)
return dict(image)
def resource_mapping():
return {
'OS::Glance::Image': GlanceImage
}
| apache-2.0 | -143,608,365,873,558,080 | 34.367188 | 78 | 0.559973 | false |
mogoweb/webkit_for_android5.1 | webkit/Tools/Scripts/webkitpy/common/system/stack_utils.py | 215 | 2734 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple routines for logging, obtaining thread stack information."""
import sys
import traceback
def log_thread_state(logger, name, thread_id, msg=''):
"""Log information about the given thread state."""
stack = _find_thread_stack(thread_id)
assert(stack is not None)
logger("")
logger("%s (tid %d) %s" % (name, thread_id, msg))
_log_stack(logger, stack)
logger("")
def _find_thread_stack(thread_id):
"""Returns a stack object that can be used to dump a stack trace for
the given thread id (or None if the id is not found)."""
for tid, stack in sys._current_frames().items():
if tid == thread_id:
return stack
return None
def _log_stack(logger, stack):
"""Log a stack trace to the logger callback."""
for filename, lineno, name, line in traceback.extract_stack(stack):
logger('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
logger(' %s' % line.strip())
def log_traceback(logger, tb):
stack = traceback.extract_tb(tb)
for frame_str in traceback.format_list(stack):
for line in frame_str.split('\n'):
if line:
logger(" %s" % line)
| apache-2.0 | 4,170,367,676,445,243,400 | 39.80597 | 72 | 0.707754 | false |
arbrandes/edx-platform | openedx/core/djangoapps/content_libraries/tests/test_content_libraries.py | 3 | 41306 | """
Tests for Blockstore-based Content Libraries
"""
from uuid import UUID
from unittest.mock import patch
import ddt
from django.conf import settings
from django.contrib.auth.models import Group
from django.test.client import Client
from django.test.utils import override_settings
from organizations.models import Organization
from rest_framework.test import APITestCase
from openedx.core.djangoapps.content_libraries.libraries_index import LibraryBlockIndexer, ContentLibraryIndexer
from openedx.core.djangoapps.content_libraries.tests.base import (
ContentLibrariesRestApiTest,
elasticsearch_test,
URL_BLOCK_METADATA_URL,
URL_BLOCK_RENDER_VIEW,
URL_BLOCK_GET_HANDLER_URL,
URL_BLOCK_XBLOCK_HANDLER,
)
from openedx.core.djangoapps.content_libraries.constants import VIDEO, COMPLEX, PROBLEM, CC_4_BY, ALL_RIGHTS_RESERVED
from common.djangoapps.student.tests.factories import UserFactory
@ddt.ddt
@elasticsearch_test
class ContentLibrariesTest(ContentLibrariesRestApiTest):
"""
General tests for Blockstore-based Content Libraries
These tests use the REST API, which in turn relies on the Python API.
Some tests may use the python API directly if necessary to provide
coverage of any code paths not accessible via the REST API.
In general, these tests should
(1) Use public APIs only - don't directly create data using other methods,
which results in a less realistic test and ties the test suite too
closely to specific implementation details.
(Exception: users can be provisioned using a user factory)
(2) Assert that fields are present in responses, but don't assert that the
entire response has some specific shape. That way, things like adding
new fields to an API response, which are backwards compatible, won't
break any tests, but backwards-incompatible API changes will.
WARNING: every test should have a unique library slug, because even though
the django/mysql database gets reset for each test case, the lookup between
library slug and bundle UUID does not because it's assumed to be immutable
and cached forever.
"""
def setUp(self):
super().setUp()
if settings.ENABLE_ELASTICSEARCH_FOR_TESTS:
ContentLibraryIndexer.remove_all_items()
LibraryBlockIndexer.remove_all_items()
def test_library_crud(self):
"""
Test Create, Read, Update, and Delete of a Content Library
"""
# Create:
lib = self._create_library(
slug="lib-crud", title="A Test Library", description="Just Testing", license_type=CC_4_BY,
)
expected_data = {
"id": "lib:CL-TEST:lib-crud",
"org": "CL-TEST",
"slug": "lib-crud",
"title": "A Test Library",
"description": "Just Testing",
"version": 0,
"type": COMPLEX,
"license": CC_4_BY,
"has_unpublished_changes": False,
"has_unpublished_deletes": False,
}
self.assertDictContainsEntries(lib, expected_data)
# Check that bundle_uuid looks like a valid UUID
UUID(lib["bundle_uuid"]) # will raise an exception if not valid
# Read:
lib2 = self._get_library(lib["id"])
self.assertDictContainsEntries(lib2, expected_data)
# Update:
lib3 = self._update_library(lib["id"], title="New Title")
expected_data["title"] = "New Title"
self.assertDictContainsEntries(lib3, expected_data)
# Delete:
self._delete_library(lib["id"])
# And confirm it is deleted:
self._get_library(lib["id"], expect_response=404)
self._delete_library(lib["id"], expect_response=404)
@ddt.data(VIDEO, PROBLEM, COMPLEX)
def test_library_alternative_type(self, target_type):
"""
Create a library with a specific type
"""
lib = self._create_library(
slug="some-slug", title="Video Library", description="Test Video Library", library_type=target_type,
)
expected_data = {
"id": "lib:CL-TEST:some-slug",
"org": "CL-TEST",
"slug": "some-slug",
"title": "Video Library",
"type": target_type,
"description": "Test Video Library",
"version": 0,
"has_unpublished_changes": False,
"has_unpublished_deletes": False,
"license": ALL_RIGHTS_RESERVED,
}
self.assertDictContainsEntries(lib, expected_data)
# Need to use a different slug each time here. Seems to be a race condition on test cleanup that will break things
# otherwise.
@ddt.data(
('to-video-fail', COMPLEX, VIDEO, (("problem", "problemA"),), 400),
('to-video-empty', COMPLEX, VIDEO, tuple(), 200),
('to-problem', COMPLEX, PROBLEM, (("problem", "problemB"),), 200),
('to-problem-fail', COMPLEX, PROBLEM, (("video", "videoA"),), 400),
('to-problem-empty', COMPLEX, PROBLEM, tuple(), 200),
('to-complex-from-video', VIDEO, COMPLEX, (("video", "videoB"),), 200),
('to-complex-from-problem', PROBLEM, COMPLEX, (("problem", "problemC"),), 200),
('to-complex-from-problem-empty', PROBLEM, COMPLEX, tuple(), 200),
('to-problem-from-video-empty', PROBLEM, VIDEO, tuple(), 200),
)
@ddt.unpack
def test_library_update_type_conversion(self, slug, start_type, target_type, xblock_specs, expect_response):
"""
Test conversion of one library type to another. Restricts options based on type/block matching.
"""
lib = self._create_library(
slug=slug, title="A Test Library", description="Just Testing", library_type=start_type,
)
assert lib['type'] == start_type
for block_type, block_slug in xblock_specs:
self._add_block_to_library(lib['id'], block_type, block_slug)
self._commit_library_changes(lib['id'])
result = self._update_library(lib['id'], type=target_type, expect_response=expect_response)
if expect_response == 200:
assert result['type'] == target_type
assert 'type' in result
else:
lib = self._get_library(lib['id'])
assert lib['type'] == start_type
def test_no_convert_on_unpublished(self):
"""
Verify that you can't change a library's type, even if it would normally be valid,
when there are unpublished changes. This is so that a reversion of blocks won't cause an inconsistency.
"""
lib = self._create_library(
slug='resolute', title="A complex library", description="Unconvertable", library_type=COMPLEX,
)
self._add_block_to_library(lib['id'], "video", 'vid-block')
result = self._update_library(lib['id'], type=VIDEO, expect_response=400)
assert 'type' in result
def test_no_convert_on_pending_deletes(self):
"""
Verify that you can't change a library's type, even if it would normally be valid,
when there are unpublished changes. This is so that a reversion of blocks won't cause an inconsistency.
"""
lib = self._create_library(
slug='still-alive', title="A complex library", description="Unconvertable", library_type=COMPLEX,
)
block = self._add_block_to_library(lib['id'], "video", 'vid-block')
self._commit_library_changes(lib['id'])
self._delete_library_block(block['id'])
result = self._update_library(lib['id'], type=VIDEO, expect_response=400)
assert 'type' in result
def test_library_validation(self):
"""
You can't create a library with the same slug as an existing library,
or an invalid slug.
"""
self._create_library(slug="some-slug", title="Existing Library")
self._create_library(slug="some-slug", title="Duplicate Library", expect_response=400)
self._create_library(slug="Invalid Slug!", title="Library with Bad Slug", expect_response=400)
@ddt.data(True, False)
@patch("openedx.core.djangoapps.content_libraries.views.LibraryApiPagination.page_size", new=2)
def test_list_library(self, is_indexing_enabled):
"""
Test the /libraries API and its pagination
"""
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
lib1 = self._create_library(slug="some-slug-1", title="Existing Library")
lib2 = self._create_library(slug="some-slug-2", title="Existing Library")
if not is_indexing_enabled:
lib1['num_blocks'] = lib2['num_blocks'] = None
lib1['last_published'] = lib2['last_published'] = None
lib1['has_unpublished_changes'] = lib2['has_unpublished_changes'] = None
lib1['has_unpublished_deletes'] = lib2['has_unpublished_deletes'] = None
result = self._list_libraries()
assert len(result) == 2
assert lib1 in result
assert lib2 in result
result = self._list_libraries({'pagination': 'true'})
assert len(result['results']) == 2
assert result['next'] is None
# Create another library which causes number of libraries to exceed the page size
self._create_library(slug="some-slug-3", title="Existing Library")
# Verify that if `pagination` param isn't sent, API still honors the max page size.
# This is for maintaining compatibility with older non pagination-aware clients.
result = self._list_libraries()
assert len(result) == 2
# Pagination enabled:
# Verify total elements and valid 'next' in page 1
result = self._list_libraries({'pagination': 'true'})
assert len(result['results']) == 2
assert 'page=2' in result['next']
assert 'pagination=true' in result['next']
# Verify total elements and null 'next' in page 2
result = self._list_libraries({'pagination': 'true', 'page': '2'})
assert len(result['results']) == 1
assert result['next'] is None
@ddt.data(True, False)
def test_library_filters(self, is_indexing_enabled):
"""
Test the filters in the list libraries API
"""
suffix = str(is_indexing_enabled)
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
self._create_library(
slug=f"test-lib-filter-{suffix}-1", title="Fob", description=f"Bar-{suffix}", library_type=VIDEO,
)
self._create_library(
slug=f"test-lib-filter-{suffix}-2", title=f"Library-Title-{suffix}-2", description=f"Bar-{suffix}-2",
)
self._create_library(
slug=f"l3{suffix}", title=f"Library-Title-{suffix}-3", description="Description", library_type=VIDEO,
)
Organization.objects.get_or_create(
short_name=f"org-test-{suffix}",
defaults={"name": "Content Libraries Tachyon Exploration & Survey Team"},
)
self._create_library(
slug=f"l4-{suffix}", title=f"Library-Title-{suffix}-4",
description="Library-Description", org=f'org-test-{suffix}',
library_type=VIDEO,
)
self._create_library(
slug="l5", title=f"Library-Title-{suffix}-5", description="Library-Description",
org=f'org-test-{suffix}',
)
assert len(self._list_libraries()) == 5
assert len(self._list_libraries({'org': f'org-test-{suffix}'})) == 2
assert len(self._list_libraries({'text_search': f'test-lib-filter-{suffix}'})) == 2
assert len(self._list_libraries({'text_search': f'test-lib-filter-{suffix}', 'type': VIDEO})) == 1
assert len(self._list_libraries({'text_search': f'library-title-{suffix}'})) == 4
assert len(self._list_libraries({'text_search': f'library-title-{suffix}', 'type': VIDEO})) == 2
assert len(self._list_libraries({'text_search': f'bar-{suffix}'})) == 2
assert len(self._list_libraries({'text_search': f'org-test-{suffix}'})) == 2
assert len(self._list_libraries({'org': f'org-test-{suffix}',
'text_search': f'library-title-{suffix}-4'})) == 1
assert len(self._list_libraries({'type': VIDEO})) == 3
# General Content Library XBlock tests:
def test_library_blocks(self):
"""
Test the happy path of creating and working with XBlocks in a content
library.
"""
lib = self._create_library(slug="testlib1", title="A Test Library", description="Testing XBlocks")
lib_id = lib["id"]
assert lib['has_unpublished_changes'] is False
# A library starts out empty:
assert self._get_library_blocks(lib_id) == []
# Add a 'problem' XBlock to the library:
block_data = self._add_block_to_library(lib_id, "problem", "problem1")
self.assertDictContainsEntries(block_data, {
"id": "lb:CL-TEST:testlib1:problem:problem1",
"display_name": "Blank Advanced Problem",
"block_type": "problem",
"has_unpublished_changes": True,
})
block_id = block_data["id"]
# Confirm that the result contains a definition key, but don't check its value,
# which for the purposes of these tests is an implementation detail.
assert 'def_key' in block_data
# now the library should contain one block and have unpublished changes:
assert self._get_library_blocks(lib_id) == [block_data]
assert self._get_library(lib_id)['has_unpublished_changes'] is True
# Publish the changes:
self._commit_library_changes(lib_id)
assert self._get_library(lib_id)['has_unpublished_changes'] is False
# And now the block information should also show that block has no unpublished changes:
block_data["has_unpublished_changes"] = False
self.assertDictContainsEntries(self._get_library_block(block_id), block_data)
assert self._get_library_blocks(lib_id) == [block_data]
# Now update the block's OLX:
orig_olx = self._get_library_block_olx(block_id)
assert '<problem' in orig_olx
new_olx = """
<problem display_name="New Multi Choice Question" max_attempts="5">
<multiplechoiceresponse>
<p>This is a normal capa problem with unicode 🔥. It has "maximum attempts" set to **5**.</p>
<label>Blockstore is designed to store.</label>
<choicegroup type="MultipleChoice">
<choice correct="false">XBlock metadata only</choice>
<choice correct="true">XBlock data/metadata and associated static asset files</choice>
<choice correct="false">Static asset files for XBlocks and courseware</choice>
<choice correct="false">XModule metadata only</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""".strip()
self._set_library_block_olx(block_id, new_olx)
# now reading it back, we should get that exact OLX (no change to whitespace etc.):
assert self._get_library_block_olx(block_id) == new_olx
# And the display name and "unpublished changes" status of the block should be updated:
self.assertDictContainsEntries(self._get_library_block(block_id), {
"display_name": "New Multi Choice Question",
"has_unpublished_changes": True,
})
# Now view the XBlock's student_view (including draft changes):
fragment = self._render_block_view(block_id, "student_view")
assert 'resources' in fragment
assert 'Blockstore is designed to store.' in fragment['content']
# Also call a handler to make sure that's working:
handler_url = self._get_block_handler_url(block_id, "xmodule_handler") + "problem_get"
problem_get_response = self.client.get(handler_url)
assert problem_get_response.status_code == 200
assert 'You have used 0 of 5 attempts' in problem_get_response.content.decode('utf-8')
# Now delete the block:
assert self._get_library(lib_id)['has_unpublished_deletes'] is False
self._delete_library_block(block_id)
# Confirm it's deleted:
self._render_block_view(block_id, "student_view", expect_response=404)
self._get_library_block(block_id, expect_response=404)
assert self._get_library(lib_id)['has_unpublished_deletes'] is True
# Now revert all the changes back until the last publish:
self._revert_library_changes(lib_id)
assert self._get_library(lib_id)['has_unpublished_deletes'] is False
assert self._get_library_block_olx(block_id) == orig_olx
# fin
@ddt.data(True, False)
@patch("openedx.core.djangoapps.content_libraries.views.LibraryApiPagination.page_size", new=2)
def test_list_library_blocks(self, is_indexing_enabled):
"""
Test the /libraries/{lib_key_str}/blocks API and its pagination
"""
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
lib = self._create_library(slug="list_blocks-slug" + str(is_indexing_enabled), title="Library 1")
block1 = self._add_block_to_library(lib["id"], "problem", "problem1")
block2 = self._add_block_to_library(lib["id"], "unit", "unit1")
self._add_block_to_library(lib["id"], "problem", "problem2", parent_block=block2["id"])
result = self._get_library_blocks(lib["id"])
assert len(result) == 2
assert block1 in result
result = self._get_library_blocks(lib["id"], {'pagination': 'true'})
assert len(result['results']) == 2
assert result['next'] is None
self._add_block_to_library(lib["id"], "problem", "problem3")
# Test pagination
result = self._get_library_blocks(lib["id"])
assert len(result) == 3
result = self._get_library_blocks(lib["id"], {'pagination': 'true'})
assert len(result['results']) == 2
assert 'page=2' in result['next']
assert 'pagination=true' in result['next']
result = self._get_library_blocks(lib["id"], {'pagination': 'true', 'page': '2'})
assert len(result['results']) == 1
assert result['next'] is None
@ddt.data(True, False)
def test_library_blocks_filters(self, is_indexing_enabled):
"""
Test the filters in the list libraries API
"""
with override_settings(FEATURES={**settings.FEATURES, 'ENABLE_CONTENT_LIBRARY_INDEX': is_indexing_enabled}):
lib = self._create_library(slug="test-lib-blocks" + str(is_indexing_enabled), title="Title")
block1 = self._add_block_to_library(lib["id"], "problem", "foo-bar")
self._add_block_to_library(lib["id"], "video", "vid-baz")
self._add_block_to_library(lib["id"], "html", "html-baz")
self._add_block_to_library(lib["id"], "problem", "foo-baz")
self._add_block_to_library(lib["id"], "problem", "bar-baz")
self._set_library_block_olx(block1["id"], "<problem display_name=\"DisplayName\"></problem>")
assert len(self._get_library_blocks(lib['id'])) == 5
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Foo'})) == 2
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Display'})) == 1
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Video'})) == 1
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Foo', 'block_type': 'video'})) == 0
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Baz', 'block_type': 'video'})) == 1
assert len(self._get_library_blocks(lib['id'], {'text_search': 'Baz', 'block_type': ['video', 'html']})) ==\
2
assert len(self._get_library_blocks(lib['id'], {'block_type': 'video'})) == 1
assert len(self._get_library_blocks(lib['id'], {'block_type': 'problem'})) == 3
assert len(self._get_library_blocks(lib['id'], {'block_type': 'squirrel'})) == 0
@ddt.data(
('video-problem', VIDEO, 'problem', 400),
('video-video', VIDEO, 'video', 200),
('problem-problem', PROBLEM, 'problem', 200),
('problem-video', PROBLEM, 'video', 400),
('complex-video', COMPLEX, 'video', 200),
('complex-problem', COMPLEX, 'problem', 200),
)
@ddt.unpack
def test_library_blocks_type_constrained(self, slug, library_type, block_type, expect_response):
"""
Test that type-constrained libraries enforce their constraint when adding an XBlock.
"""
lib = self._create_library(
slug=slug, title="A Test Library", description="Testing XBlocks", library_type=library_type,
)
lib_id = lib["id"]
# Add a 'problem' XBlock to the library:
self._add_block_to_library(lib_id, block_type, 'test-block', expect_response=expect_response)
def test_library_blocks_with_hierarchy(self):
"""
Test library blocks with children
"""
lib = self._create_library(slug="hierarchy_test_lib", title="A Test Library")
lib_id = lib["id"]
# Add a 'unit' XBlock to the library:
unit_block = self._add_block_to_library(lib_id, "unit", "unit1")
# Add an HTML child block:
child1 = self._add_block_to_library(lib_id, "html", "html1", parent_block=unit_block["id"])
self._set_library_block_olx(child1["id"], "<html>Hello world</html>")
# Add a problem child block:
child2 = self._add_block_to_library(lib_id, "problem", "problem1", parent_block=unit_block["id"])
self._set_library_block_olx(child2["id"], """
<problem><multiplechoiceresponse>
<p>What is an even number?</p>
<choicegroup type="MultipleChoice">
<choice correct="false">3</choice>
<choice correct="true">2</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
# Check the resulting OLX of the unit:
assert self._get_library_block_olx(unit_block['id']) ==\
'<unit xblock-family="xblock.v1">\n <xblock-include definition="html/html1"/>\n' \
' <xblock-include definition="problem/problem1"/>\n</unit>\n'
# The unit can see and render its children:
fragment = self._render_block_view(unit_block["id"], "student_view")
assert 'Hello world' in fragment['content']
assert 'What is an even number?' in fragment['content']
# We cannot add a duplicate ID to the library, either at the top level or as a child:
self._add_block_to_library(lib_id, "problem", "problem1", expect_response=400)
self._add_block_to_library(lib_id, "problem", "problem1", parent_block=unit_block["id"], expect_response=400)
# Test that permissions are enforced for content libraries
def test_library_permissions(self): # pylint: disable=too-many-statements
"""
Test that permissions are enforced for content libraries, and that
permissions can be read and manipulated using the REST API (which in
turn tests the python API).
This is a single giant test case, because that optimizes for the fastest
test run time, even though it can make debugging failures harder.
"""
# Create a few users to use for all of these tests:
admin = UserFactory.create(username="Admin", email="[email protected]")
author = UserFactory.create(username="Author", email="[email protected]")
reader = UserFactory.create(username="Reader", email="[email protected]")
group = Group.objects.create(name="group1")
author_group_member = UserFactory.create(username="GroupMember", email="[email protected]")
author_group_member.groups.add(group)
random_user = UserFactory.create(username="Random", email="[email protected]")
never_added = UserFactory.create(username="Never", email="[email protected]")
# Library CRUD #########################################################
# Create a library, owned by "Admin"
with self.as_user(admin):
lib = self._create_library(slug="permtest", title="Permission Test Library", description="Testing")
lib_id = lib["id"]
# By default, "public learning" and public read access are disallowed.
assert lib['allow_public_learning'] is False
assert lib['allow_public_read'] is False
# By default, the creator of a new library is the only admin
data = self._get_library_team(lib_id)
assert len(data) == 1
self.assertDictContainsEntries(data[0], {
"username": admin.username, "group_name": None, "access_level": "admin",
})
# Add the other users to the content library:
self._set_user_access_level(lib_id, author.username, access_level="author")
# Delete it, add it again.
self._remove_user_access(lib_id, author.username)
self._set_user_access_level(lib_id, author.username, access_level="author")
# Add one of them via the email-based creation endpoint.
self._add_user_by_email(lib_id, reader.email, access_level="read")
self._set_group_access_level(lib_id, group.name, access_level="author")
team_response = self._get_library_team(lib_id)
assert len(team_response) == 4
# We'll use this one later.
reader_grant = {"username": reader.username, "group_name": None, "access_level": "read"}
# The response should also always be sorted in a specific order (by username and group name):
expected_response = [
{"username": None, "group_name": "group1", "access_level": "author"},
{"username": admin.username, "group_name": None, "access_level": "admin"},
{"username": author.username, "group_name": None, "access_level": "author"},
reader_grant,
]
for entry, expected in zip(team_response, expected_response):
self.assertDictContainsEntries(entry, expected)
# A random user cannot get the library nor its team:
with self.as_user(random_user):
self._get_library(lib_id, expect_response=403)
self._get_library_team(lib_id, expect_response=403)
self._add_user_by_email(lib_id, never_added.email, access_level="read", expect_response=403)
# But every authorized user can:
for user in [admin, author, author_group_member]:
with self.as_user(user):
self._get_library(lib_id)
data = self._get_library_team(lib_id)
assert data == team_response
data = self._get_user_access_level(lib_id, reader.username)
assert data == {**reader_grant, 'username': 'Reader', 'email': '[email protected]'}
# A user with only read permission can get data about the library but not the team:
with self.as_user(reader):
self._get_library(lib_id)
self._get_library_team(lib_id, expect_response=403)
self._get_user_access_level(lib_id, author.username, expect_response=403)
self._add_user_by_email(lib_id, never_added.email, access_level="read", expect_response=403)
# Users without admin access cannot delete the library nor change its team:
for user in [author, reader, author_group_member, random_user]:
with self.as_user(user):
self._delete_library(lib_id, expect_response=403)
self._set_user_access_level(lib_id, author.username, access_level="admin", expect_response=403)
self._set_user_access_level(lib_id, admin.username, access_level=None, expect_response=403)
self._set_user_access_level(lib_id, random_user.username, access_level="read", expect_response=403)
self._remove_user_access(lib_id, admin.username, expect_response=403)
self._add_user_by_email(lib_id, never_added.email, access_level="read", expect_response=403)
# Users with author access (or higher) can edit the library's properties:
with self.as_user(author):
self._update_library(lib_id, description="Revised description")
with self.as_user(author_group_member):
self._update_library(lib_id, title="New Library Title")
# But other users cannot:
with self.as_user(reader):
self._update_library(lib_id, description="Prohibited description", expect_response=403)
with self.as_user(random_user):
self._update_library(lib_id, title="I can't set this title", expect_response=403)
# Verify the permitted changes were made:
with self.as_user(admin):
data = self._get_library(lib_id)
assert data['description'] == 'Revised description'
assert data['title'] == 'New Library Title'
# Library XBlock editing ###############################################
# users with read permission or less cannot add blocks:
for user in [reader, random_user]:
with self.as_user(user):
self._add_block_to_library(lib_id, "problem", "problem1", expect_response=403)
# But authors and admins can:
with self.as_user(admin):
self._add_block_to_library(lib_id, "problem", "problem1")
with self.as_user(author):
self._add_block_to_library(lib_id, "problem", "problem2")
with self.as_user(author_group_member):
block3_data = self._add_block_to_library(lib_id, "problem", "problem3")
block3_key = block3_data["id"]
# At this point, the library contains 3 draft problem XBlocks.
# A random user cannot read OLX nor assets (this library has allow_public_read False):
with self.as_user(random_user):
self._get_library_block_olx(block3_key, expect_response=403)
self._get_library_block_assets(block3_key, expect_response=403)
self._get_library_block_asset(block3_key, file_name="whatever.png", expect_response=403)
# But if we grant allow_public_read, then they can:
with self.as_user(admin):
self._update_library(lib_id, allow_public_read=True)
self._set_library_block_asset(block3_key, "whatever.png", b"data")
with self.as_user(random_user):
self._get_library_block_olx(block3_key)
self._get_library_block_assets(block3_key)
self._get_library_block_asset(block3_key, file_name="whatever.png")
# Users without authoring permission cannot edit nor delete XBlocks (this library has allow_public_read False):
for user in [reader, random_user]:
with self.as_user(user):
self._set_library_block_olx(block3_key, "<problem/>", expect_response=403)
self._set_library_block_asset(block3_key, "test.txt", b"data", expect_response=403)
self._delete_library_block(block3_key, expect_response=403)
self._commit_library_changes(lib_id, expect_response=403)
self._revert_library_changes(lib_id, expect_response=403)
# But users with author permission can:
with self.as_user(author_group_member):
olx = self._get_library_block_olx(block3_key)
self._set_library_block_olx(block3_key, olx)
self._get_library_block_assets(block3_key)
self._set_library_block_asset(block3_key, "test.txt", b"data")
self._get_library_block_asset(block3_key, file_name="test.txt")
self._delete_library_block(block3_key)
self._commit_library_changes(lib_id)
self._revert_library_changes(lib_id) # This is a no-op after the commit, but should still have 200 response
def test_no_lockout(self):
"""
Test that administrators cannot be removed if they are the only administrator granted access.
"""
admin = UserFactory.create(username="Admin", email="[email protected]")
successor = UserFactory.create(username="Successor", email="[email protected]")
with self.as_user(admin):
lib = self._create_library(slug="permtest", title="Permission Test Library", description="Testing")
# Fail to downgrade permissions.
self._remove_user_access(lib_key=lib['id'], username=admin.username, expect_response=400)
# Promote another user.
self._set_user_access_level(
lib_key=lib['id'], username=successor.username, access_level="admin",
)
self._remove_user_access(lib_key=lib['id'], username=admin.username)
def test_library_blocks_with_links(self):
"""
Test that libraries can link to XBlocks in other content libraries
"""
# Create a problem bank:
bank_lib = self._create_library(slug="problem_bank", title="Problem Bank")
bank_lib_id = bank_lib["id"]
# Add problem1 to the problem bank:
p1 = self._add_block_to_library(bank_lib_id, "problem", "problem1")
self._set_library_block_olx(p1["id"], """
<problem><multiplechoiceresponse>
<p>What is an even number?</p>
<choicegroup type="MultipleChoice">
<choice correct="false">3</choice>
<choice correct="true">2</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
# Commit the changes, creating version 1:
self._commit_library_changes(bank_lib_id)
# Now update problem 1 and create a new problem 2:
self._set_library_block_olx(p1["id"], """
<problem><multiplechoiceresponse>
<p>What is an odd number?</p>
<choicegroup type="MultipleChoice">
<choice correct="true">3</choice>
<choice correct="false">2</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
p2 = self._add_block_to_library(bank_lib_id, "problem", "problem2")
self._set_library_block_olx(p2["id"], """
<problem><multiplechoiceresponse>
<p>What holds this XBlock?</p>
<choicegroup type="MultipleChoice">
<choice correct="false">A course</choice>
<choice correct="true">A problem bank</choice>
</choicegroup>
</multiplechoiceresponse></problem>
""")
# Commit the changes, creating version 2:
self._commit_library_changes(bank_lib_id)
# At this point, bank_lib contains two problems and has two versions.
# In version 1, problem1 is "What is an event number", and in version 2 it's "What is an odd number".
# Problem2 exists only in version 2 and asks "What holds this XBlock?"
lib = self._create_library(slug="links_test_lib", title="Link Test Library")
lib_id = lib["id"]
# Link to the problem bank:
self._link_to_library(lib_id, "problem_bank", bank_lib_id)
self._link_to_library(lib_id, "problem_bank_v1", bank_lib_id, version=1)
# Add a 'unit' XBlock to the library:
unit_block = self._add_block_to_library(lib_id, "unit", "unit1")
self._set_library_block_olx(unit_block["id"], """
<unit>
<!-- version 2 link to "What is an odd number?" -->
<xblock-include source="problem_bank" definition="problem/problem1"/>
<!-- version 1 link to "What is an even number?" -->
<xblock-include source="problem_bank_v1" definition="problem/problem1" usage="p1v1" />
<!-- link to "What holds this XBlock?" -->
<xblock-include source="problem_bank" definition="problem/problem2"/>
</unit>
""")
# The unit can see and render its children:
fragment = self._render_block_view(unit_block["id"], "student_view")
assert 'What is an odd number?' in fragment['content']
assert 'What is an even number?' in fragment['content']
assert 'What holds this XBlock?' in fragment['content']
# Also check the API for retrieving links:
links_created = self._get_library_links(lib_id)
links_created.sort(key=lambda link: link["id"])
assert len(links_created) == 2
assert links_created[0]['id'] == 'problem_bank'
assert links_created[0]['bundle_uuid'] == bank_lib['bundle_uuid']
assert links_created[0]['version'] == 2
assert links_created[0]['latest_version'] == 2
assert links_created[0]['opaque_key'] == bank_lib_id
assert links_created[1]['id'] == 'problem_bank_v1'
assert links_created[1]['bundle_uuid'] == bank_lib['bundle_uuid']
assert links_created[1]['version'] == 1
assert links_created[1]['latest_version'] == 2
assert links_created[1]['opaque_key'] == bank_lib_id
def test_library_blocks_limit(self):
"""
Test that libraries don't allow more than specified blocks
"""
with self.settings(MAX_BLOCKS_PER_CONTENT_LIBRARY=1):
lib = self._create_library(slug="test_lib_limits", title="Limits Test Library", description="Testing XBlocks limits in a library") # lint-amnesty, pylint: disable=line-too-long
lib_id = lib["id"]
block_data = self._add_block_to_library(lib_id, "unit", "unit1")
# Second block should throw error
self._add_block_to_library(lib_id, "problem", "problem1", expect_response=400)
# Also check that limit applies to child blocks too
self._add_block_to_library(lib_id, "html", "html1", parent_block=block_data['id'], expect_response=400)
@ddt.data(
('complex-types', COMPLEX, False),
('video-types', VIDEO, True),
('problem-types', PROBLEM, True),
)
@ddt.unpack
def test_block_types(self, slug, library_type, constrained):
"""
Test that the permitted block types listing for a library change based on type.
"""
lib = self._create_library(slug=slug, title='Test Block Types', library_type=library_type)
types = self._get_library_block_types(lib['id'])
if constrained:
assert len(types) == 1
assert types[0]['block_type'] == library_type
else:
assert len(types) > 1
@ddt.ddt
class ContentLibraryXBlockValidationTest(APITestCase):
"""Tests only focused on service validation, no Blockstore needed."""
@ddt.data(
(URL_BLOCK_METADATA_URL, dict(block_key='totally_invalid_key')),
(URL_BLOCK_RENDER_VIEW, dict(block_key='totally_invalid_key', view_name='random')),
(URL_BLOCK_GET_HANDLER_URL, dict(block_key='totally_invalid_key', handler_name='random')),
)
@ddt.unpack
def test_invalid_key(self, endpoint, endpoint_parameters):
"""Test all xblock related endpoints, when the key is invalid, return 404."""
response = self.client.get(
endpoint.format(**endpoint_parameters),
)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json(), {'detail': "Invalid XBlock key"})
def test_xblock_handler_invalid_key(self):
"""This endpoint is tested separately from the previous ones as it's not a DRF endpoint."""
client = Client()
response = client.get(URL_BLOCK_XBLOCK_HANDLER.format(**dict(
block_key='totally_invalid_key',
handler_name='random',
user_id='random',
secure_token='random',
)))
self.assertEqual(response.status_code, 404)
def test_not_found_fails_correctly(self):
"""Test fails with 404 when xblock key is valid but not found."""
valid_not_found_key = 'lb:valid:key:video:1'
response = self.client.get(URL_BLOCK_METADATA_URL.format(block_key=valid_not_found_key))
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json(), {
'detail': f"XBlock {valid_not_found_key} does not exist, or you don't have permission to view it.",
})
| agpl-3.0 | 5,251,416,424,670,562,000 | 49.616422 | 189 | 0.607075 | false |
litlpoet/rl-library | system/common/libs/mwclient/simplejson/decoder.py | 4 | 9113 | """
Implementation of JSONDecoder
"""
import re
from simplejson.scanner import Scanner, pattern
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
return c[match.group(0)], None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
res = float(integer + (frac or '') + (exp or ''))
else:
res = int(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
try:
m = unichr(int(esc, 16))
if len(esc) != 4 or not esc.isalnum():
raise ValueError
except ValueError:
raise ValueError(errmsg("Invalid \\uXXXX escape", s, end))
end += 5
_append(m)
return u''.join(chunks), end
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
return scanstring(match.string, match.end(), encoding)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
"""
self.encoding = encoding
self.object_hook = object_hook
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
__all__ = ['JSONDecoder']
| apache-2.0 | -4,882,981,806,272,009,000 | 31.380952 | 77 | 0.501701 | false |
pgmillon/ansible | lib/ansible/modules/cloud/amazon/aws_waf_rule.py | 16 | 13360 | #!/usr/bin/python
# Copyright (c) 2017 Will Thames
# Copyright (c) 2015 Mike Mochan
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: aws_waf_rule
short_description: create and delete WAF Rules
description:
- Read the AWS documentation for WAF
U(https://aws.amazon.com/documentation/waf/)
version_added: "2.5"
author:
- Mike Mochan (@mmochan)
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
options:
name:
description: Name of the Web Application Firewall rule
required: yes
metric_name:
description:
- A friendly name or description for the metrics for the rule
- The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- You can't change metric_name after you create the rule
- Defaults to the same as name with disallowed characters removed
state:
description: whether the rule should be present or absent
choices:
- present
- absent
default: present
conditions:
description: >
list of conditions used in the rule. Each condition should
contain I(type): which is one of [C(byte), C(geo), C(ip), C(size), C(sql) or C(xss)]
I(negated): whether the condition should be negated, and C(condition),
the name of the existing condition. M(aws_waf_condition) can be used to
create new conditions
purge_conditions:
description:
- Whether or not to remove conditions that are not passed when updating `conditions`.
default: false
type: bool
waf_regional:
description: Whether to use waf_regional module. Defaults to false
default: false
required: no
type: bool
version_added: "2.9"
'''
EXAMPLES = '''
- name: create WAF rule
aws_waf_rule:
name: my_waf_rule
conditions:
- name: my_regex_condition
type: regex
negated: no
- name: my_geo_condition
type: geo
negated: no
- name: my_byte_condition
type: byte
negated: yes
- name: remove WAF rule
aws_waf_rule:
name: "my_waf_rule"
state: absent
'''
RETURN = '''
rule:
description: WAF rule contents
returned: always
type: complex
contains:
metric_name:
description: Metric name for the rule
returned: always
type: str
sample: ansibletest1234rule
name:
description: Friendly name for the rule
returned: always
type: str
sample: ansible-test-1234_rule
predicates:
description: List of conditions used in the rule
returned: always
type: complex
contains:
data_id:
description: ID of the condition
returned: always
type: str
sample: 8251acdb-526c-42a8-92bc-d3d13e584166
negated:
description: Whether the sense of the condition is negated
returned: always
type: bool
sample: false
type:
description: type of the condition
returned: always
type: str
sample: ByteMatch
rule_id:
description: ID of the WAF rule
returned: always
type: str
sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261
'''
import re
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, list_rules_with_backoff, list_regional_rules_with_backoff, MATCH_LOOKUP
from ansible.module_utils.aws.waf import get_web_acl_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff
def get_rule_by_name(client, module, name):
rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name]
if rules:
return rules[0]
def get_rule(client, module, rule_id):
try:
return client.get_rule(RuleId=rule_id)['Rule']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get WAF rule')
def list_rules(client, module):
if client.__class__.__name__ == 'WAF':
try:
return list_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF rules')
elif client.__class__.__name__ == 'WAFRegional':
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF Regional rules')
def list_regional_rules(client, module):
try:
return list_regional_rules_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list WAF rules')
def find_and_update_rule(client, module, rule_id):
rule = get_rule(client, module, rule_id)
rule_id = rule['RuleId']
existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
all_conditions = dict()
for condition_type in MATCH_LOOKUP:
method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's'
all_conditions[condition_type] = dict()
try:
paginator = client.get_paginator(method)
func = paginator.paginate().build_full_result
except (KeyError, botocore.exceptions.OperationNotPageableError):
# list_geo_match_sets and list_regex_match_sets do not have a paginator
# and throw different exceptions
func = getattr(client, method)
try:
pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type)
for pred in pred_results:
pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id']
all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred)
all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred)
for condition in module.params['conditions']:
desired_conditions[condition['type']][condition['name']] = condition
reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items())
for condition in rule['Predicates']:
existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition)
insertions = list()
deletions = list()
for condition_type in desired_conditions:
for (condition_name, condition) in desired_conditions[condition_type].items():
if condition_name not in all_conditions[condition_type]:
module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type))
condition['data_id'] = all_conditions[condition_type][condition_name]['data_id']
if condition['data_id'] not in existing_conditions[condition_type]:
insertions.append(format_for_insertion(condition))
if module.params['purge_conditions']:
for condition_type in existing_conditions:
deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values()
if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]])
changed = bool(insertions or deletions)
update = {
'RuleId': rule_id,
'Updates': insertions + deletions
}
if changed:
try:
run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not update rule conditions')
return changed, get_rule(client, module, rule_id)
def format_for_insertion(condition):
return dict(Action='INSERT',
Predicate=dict(Negated=condition['negated'],
Type=MATCH_LOOKUP[condition['type']]['type'],
DataId=condition['data_id']))
def format_for_deletion(condition):
return dict(Action='DELETE',
Predicate=dict(Negated=condition['negated'],
Type=condition['type'],
DataId=condition['data_id']))
def remove_rule_conditions(client, module, rule_id):
conditions = get_rule(client, module, rule_id)['Predicates']
updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions]
try:
run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not remove rule conditions')
def ensure_rule_present(client, module):
name = module.params['name']
rule_id = get_rule_by_name(client, module, name)
params = dict()
if rule_id:
return find_and_update_rule(client, module, rule_id)
else:
params['Name'] = module.params['name']
metric_name = module.params['metric_name']
if not metric_name:
metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name'])
params['MetricName'] = metric_name
try:
new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not create rule')
return find_and_update_rule(client, module, new_rule['RuleId'])
def find_rule_in_web_acls(client, module, rule_id):
web_acls_in_use = []
try:
if client.__class__.__name__ == 'WAF':
all_web_acls = list_web_acls_with_backoff(client)
elif client.__class__.__name__ == 'WAFRegional':
all_web_acls = list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not list Web ACLs')
for web_acl in all_web_acls:
try:
web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not get Web ACL details')
if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]:
web_acls_in_use.append(web_acl_details['Name'])
return web_acls_in_use
def ensure_rule_absent(client, module):
rule_id = get_rule_by_name(client, module, module.params['name'])
in_use_web_acls = find_rule_in_web_acls(client, module, rule_id)
if in_use_web_acls:
web_acl_names = ', '.join(in_use_web_acls)
module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" %
(module.params['name'], web_acl_names))
if rule_id:
remove_rule_conditions(client, module, rule_id)
try:
return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Could not delete rule')
return False, {}
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
metric_name=dict(),
state=dict(default='present', choices=['present', 'absent']),
conditions=dict(type='list'),
purge_conditions=dict(type='bool', default=False),
waf_regional=dict(type='bool', default=False),
),
)
module = AnsibleAWSModule(argument_spec=argument_spec)
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
client = boto3_conn(module, conn_type='client', resource=resource, region=region, endpoint=ec2_url, **aws_connect_kwargs)
if state == 'present':
(changed, results) = ensure_rule_present(client, module)
else:
(changed, results) = ensure_rule_absent(client, module)
module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
| gpl-3.0 | -3,794,529,515,301,552,000 | 38.178886 | 148 | 0.641317 | false |
hanicker/odoo | addons/crm/sales_team.py | 321 | 5053 | # -*- coding: utf-8 -*-
import calendar
from datetime import date
from dateutil import relativedelta
import json
from openerp import tools
from openerp.osv import fields, osv
class crm_case_section(osv.Model):
_inherit = 'crm.case.section'
_inherits = {'mail.alias': 'alias_id'}
def _get_opportunities_data(self, cr, uid, ids, field_name, arg, context=None):
""" Get opportunities-related data for salesteam kanban view
monthly_open_leads: number of open lead during the last months
monthly_planned_revenue: planned revenu of opportunities during the last months
"""
obj = self.pool.get('crm.lead')
res = dict.fromkeys(ids, False)
month_begin = date.today().replace(day=1)
date_begin = month_begin - relativedelta.relativedelta(months=self._period_number - 1)
date_end = month_begin.replace(day=calendar.monthrange(month_begin.year, month_begin.month)[1])
lead_pre_domain = [('create_date', '>=', date_begin.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)),
('create_date', '<=', date_end.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)),
('type', '=', 'lead')]
opp_pre_domain = [('date_deadline', '>=', date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)),
('date_deadline', '<=', date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)),
('type', '=', 'opportunity')]
for id in ids:
res[id] = dict()
lead_domain = lead_pre_domain + [('section_id', '=', id)]
opp_domain = opp_pre_domain + [('section_id', '=', id)]
res[id]['monthly_open_leads'] = json.dumps(self.__get_bar_values(cr, uid, obj, lead_domain, ['create_date'], 'create_date_count', 'create_date', context=context))
res[id]['monthly_planned_revenue'] = json.dumps(self.__get_bar_values(cr, uid, obj, opp_domain, ['planned_revenue', 'date_deadline'], 'planned_revenue', 'date_deadline', context=context))
return res
_columns = {
'resource_calendar_id': fields.many2one('resource.calendar', "Working Time", help="Used to compute open days"),
'stage_ids': fields.many2many('crm.case.stage', 'section_stage_rel', 'section_id', 'stage_id', 'Stages'),
'use_leads': fields.boolean('Leads',
help="The first contact you get with a potential customer is a lead you qualify before converting it into a real business opportunity. Check this box to manage leads in this sales team."),
'use_opportunities': fields.boolean('Opportunities', help="Check this box to manage opportunities in this sales team."),
'monthly_open_leads': fields.function(_get_opportunities_data,
type="char", readonly=True, multi='_get_opportunities_data',
string='Open Leads per Month'),
'monthly_planned_revenue': fields.function(_get_opportunities_data,
type="char", readonly=True, multi='_get_opportunities_data',
string='Planned Revenue per Month'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True, help="The email address associated with this team. New emails received will automatically create new leads assigned to the team."),
}
def _auto_init(self, cr, context=None):
"""Installation hook to create aliases for all lead and avoid constraint errors."""
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(crm_case_section, self)._auto_init,
'crm.lead', self._columns['alias_id'], 'name', alias_prefix='Lead+', alias_defaults={}, context=context)
def _get_stage_common(self, cr, uid, context):
ids = self.pool.get('crm.case.stage').search(cr, uid, [('case_default', '=', 1)], context=context)
return ids
_defaults = {
'stage_ids': _get_stage_common,
'use_leads': True,
'use_opportunities': True,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
create_context = dict(context, alias_model_name='crm.lead', alias_parent_model_name=self._name)
section_id = super(crm_case_section, self).create(cr, uid, vals, context=create_context)
section = self.browse(cr, uid, section_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [section.alias_id.id], {'alias_parent_thread_id': section_id, 'alias_defaults': {'section_id': section_id, 'type': 'lead'}}, context=context)
return section_id
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the sales team.
mail_alias = self.pool.get('mail.alias')
alias_ids = [team.alias_id.id for team in self.browse(cr, uid, ids, context=context) if team.alias_id]
res = super(crm_case_section, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
| agpl-3.0 | -4,784,910,810,301,341,000 | 58.447059 | 226 | 0.639422 | false |
Hubert51/AutoGrading | learning/web_Haotian/venv/Lib/site-packages/werkzeug/wsgi.py | 17 | 49347 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import io
try:
import httplib
except ImportError:
from http import client as httplib
import mimetypes
import os
import posixpath
import re
import socket
from datetime import datetime
from functools import partial, update_wrapper
from itertools import chain
from time import mktime, time
from zlib import adler32
from werkzeug._compat import BytesIO, PY2, implements_iterator, iteritems, \
make_literal_wrapper, string_types, text_type, to_bytes, to_unicode, \
try_coerce_native, wsgi_get_bytes
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.filesystem import get_filesystem_encoding
from werkzeug.http import http_date, is_resource_modified, \
is_hop_by_hop_header
from werkzeug.urls import uri_to_iri, url_join, url_parse, url_quote
from werkzeug.datastructures import EnvironHeaders
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here's an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
try:
hostname = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref = _normalize(ref)
except UnicodeError:
return False
if ref == hostname:
return True
if suffix_match and hostname.endswith(b'.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This first checks
the `X-Forwarded-Host` header, then the normal `Host` header, and finally
the `SERVER_NAME` environment variable (using the first one it finds).
Optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
return None
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
If content length is not set, the stream will be empty for safety reasons.
If the WSGI server supports chunked or infinite streams, it should set
the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe_fallback: use an empty stream as a safe fallback when the
content length is not set. Disabling this allows infinite streams,
which can be a denial-of-service risk.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If the request doesn't specify a content length, returning the stream is
# potentially dangerous because it could be infinite, malicious or not. If
# safe_fallback is true, return an empty stream instead for safety.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class ProxyMiddleware(object):
"""This middleware routes some requests to the provided WSGI app and
proxies some requests to an external server. This is not something that
can generally be done on the WSGI layer and some HTTP requests will not
tunnel through correctly (for instance websocket requests cannot be
proxied through WSGI). As a result this is only really useful for some
basic requests that can be forwarded.
Example configuration::
app = ProxyMiddleware(app, {
'/static/': {
'target': 'http://127.0.0.1:5001/',
}
})
For each host options can be specified. The following options are
supported:
``target``:
the target URL to dispatch to
``remove_prefix``:
if set to `True` the prefix is chopped off the URL before
dispatching it to the server.
``host``:
When set to ``'<auto>'`` which is the default the host header is
automatically rewritten to the URL of the target. If set to `None`
then the host header is unmodified from the client request. Any
other value overwrites the host header with that value.
``headers``:
An optional dictionary of headers that should be sent with the
request to the target host.
``ssl_context``:
In case this is an HTTPS target host then an SSL context can be
provided here (:class:`ssl.SSLContext`). This can be used for instance
to disable SSL verification.
In this case everything below ``'/static/'`` is proxied to the server on
port 5001. The host header is automatically rewritten and so are request
URLs (eg: the leading `/static/` prefix here gets chopped off).
.. versionadded:: 0.14
"""
def __init__(self, app, targets, chunk_size=2 << 13, timeout=10):
def _set_defaults(opts):
opts.setdefault('remove_prefix', False)
opts.setdefault('host', '<auto>')
opts.setdefault('headers', {})
opts.setdefault('ssl_context', None)
return opts
self.app = app
self.targets = dict(('/%s/' % k.strip('/'), _set_defaults(v))
for k, v in iteritems(targets))
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(self, opts, path, prefix):
target = url_parse(opts['target'])
def application(environ, start_response):
headers = list(EnvironHeaders(environ).items())
headers[:] = [(k, v) for k, v in headers
if not is_hop_by_hop_header(k) and
k.lower() not in ('content-length', 'host')]
headers.append(('Connection', 'close'))
if opts['host'] == '<auto>':
headers.append(('Host', target.ascii_host))
elif opts['host'] is None:
headers.append(('Host', environ['HTTP_HOST']))
else:
headers.append(('Host', opts['host']))
headers.extend(opts['headers'].items())
remote_path = path
if opts['remove_prefix']:
remote_path = '%s/%s' % (
target.path.rstrip('/'),
remote_path[len(prefix):].lstrip('/')
)
content_length = environ.get('CONTENT_LENGTH')
chunked = False
if content_length not in ('', None):
headers.append(('Content-Length', content_length))
elif content_length is not None:
headers.append(('Transfer-Encoding', 'chunked'))
chunked = True
try:
if target.scheme == 'http':
con = httplib.HTTPConnection(
target.ascii_host, target.port or 80,
timeout=self.timeout)
elif target.scheme == 'https':
con = httplib.HTTPSConnection(
target.ascii_host, target.port or 443,
timeout=self.timeout,
context=opts['ssl_context'])
con.connect()
con.putrequest(environ['REQUEST_METHOD'], url_quote(remote_path),
skip_host=True)
for k, v in headers:
if k.lower() == 'connection':
v = 'close'
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while 1:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b'%x\r\n%s\r\n' % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except socket.error:
from werkzeug.exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response('%d %s' % (resp.status, resp.reason),
[(k.title(), v) for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)])
def read():
while 1:
try:
data = resp.read(self.chunk_size)
except socket.error:
break
if not data:
break
yield data
return read()
return application
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
app = self.app
for prefix, opts in iteritems(self.targets):
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = []
self.cache = cache
self.cache_timeout = cache_timeout
if hasattr(exports, 'items'):
exports = iteritems(exports)
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
s = provider.get_resource_string(manager, path)
return basename, lambda: (
BytesIO(s),
loadtime,
len(s)
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(get_filesystem_encoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(get_filesystem_encoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/' + '/'.join(x for x in cleaned_path.split('/')
if x and x != '..')
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit('/', 1)
path_info = '/%s%s' % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def seekable(self):
if hasattr(self.file, 'seekable'):
return self.file.seekable()
if hasattr(self.file, 'seek'):
return True
return False
def seek(self, *args):
if hasattr(self.file, 'seek'):
self.file.seek(*args)
def tell(self):
if hasattr(self.file, 'tell'):
return self.file.tell()
return None
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
@implements_iterator
class _RangeWrapper(object):
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = self.start_byte + self.byte_range
self.read_length = 0
self.seekable = hasattr(iterable, 'seekable') and iterable.seekable()
self.end_reached = False
def __iter__(self):
return self
def _next_chunk(self):
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self):
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte)
self.read_length = self.iterable.tell()
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length:]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self):
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[:self.end_byte - contextual_read_length]
return chunk
def __next__(self):
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self):
if hasattr(self.iterable, 'close'):
self.iterable.close()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(io.IOBase):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readable(self):
return True
| mit | -6,111,313,200,067,640,000 | 35.178152 | 81 | 0.591262 | false |
CVML/pycortex | cortex/dataset/braindata.py | 2 | 14173 | import hashlib
import numpy as np
import h5py
from ..database import db
class BrainData(object):
def __init__(self, data, subject, **kwargs):
if isinstance(data, str):
import nibabel
nib = nibabel.load(data)
data = nib.get_data().T
self._data = data
try:
basestring
except NameError:
subject = subject if isinstance(subject, str) else subject.decode('utf-8')
self.subject = subject
super(BrainData, self).__init__(**kwargs)
@property
def data(self):
if isinstance(self._data, h5py.Dataset):
return self._data.value
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def name(self):
'''Name of this BrainData, according to its hash'''
return "__%s"%_hash(self.data)[:16]
def exp(self):
"""Copy of this object with data exponentiated.
"""
return self.copy(np.exp(self.data))
def uniques(self, collapse=False):
yield self
def __hash__(self):
return hash(_hash(self.data))
def _write_hdf(self, h5, name=None):
if name is None:
name = self.name
dgrp = h5.require_group("/data")
if name in dgrp and "__%s"%_hash(dgrp[name].value)[:16] == name:
#don't need to update anything, since it's the same data
return h5.get("/data/%s"%name)
node = _hdf_write(h5, self.data, name=name)
node.attrs['subject'] = self.subject
return node
def to_json(self, simple=False):
sdict = super(BrainData, self).to_json(simple=simple)
if simple:
sdict.update(dict(name=self.name,
subject=self.subject,
min=float(np.nan_to_num(self.data).min()),
max=float(np.nan_to_num(self.data).max()),
))
return sdict
@classmethod
def add_numpy_methods(cls):
"""Adds numpy operator methods (+, -, etc.) to this class to allow
simple manipulation of the data, e.g. with VolumeData v:
v + 1 # Returns new VolumeData with 1 added to data
v ** 2 # Returns new VolumeData with data squared
"""
# Binary operations
npops = ["__add__", "__sub__", "__mul__", "__div__", "__pow__",
"__neg__", "__abs__"]
def make_opfun(op): # function nesting creates closure containing op
def opfun(self, *args):
return self.copy(getattr(self.data, op)(*args))
return opfun
for op in npops:
opfun = make_opfun(op)
opfun.__name__ = op
setattr(cls, opfun.__name__, opfun)
BrainData.add_numpy_methods()
class VolumeData(BrainData):
def __init__(self, data, subject, xfmname, mask=None, **kwargs):
"""Three possible variables: volume, movie, vertex. Enumerated with size:
volume movie: (t, z, y, x)
volume image: (z, y, x)
linear movie: (t, v)
linear image: (v,)
"""
if self.__class__ == VolumeData:
raise TypeError('Cannot directly instantiate VolumeData objects')
super(VolumeData, self).__init__(data, subject, **kwargs)
try:
basestring
except NameError:
xfmname = xfmname if isinstance(xfmname, str) else xfmname.decode('utf-8')
self.xfmname = xfmname
self._check_size(mask)
self.masked = _masker(self)
def to_json(self, simple=False):
if simple:
sdict = super(VolumeData, self).to_json(simple=simple)
sdict["shape"] = self.shape
return sdict
xfm = db.get_xfm(self.subject, self.xfmname, 'coord').xfm
sdict = dict(xfm=[list(np.array(xfm).ravel())], data=[self.name])
sdict.update(super(VolumeData, self).to_json())
return sdict
@classmethod
def empty(cls, subject, xfmname, **kwargs):
xfm = db.get_xfm(subject, xfmname)
shape = xfm.shape
return cls(np.zeros(shape), subject, xfmname, **kwargs)
@classmethod
def random(cls, subject, xfmname, **kwargs):
xfm = db.get_xfm(subject, xfmname)
shape = xfm.shape
return cls(np.random.randn(*shape), subject, xfmname, **kwargs)
def _check_size(self, mask):
if self.data.ndim not in (1, 2, 3, 4):
raise ValueError("Invalid data shape")
self.linear = self.data.ndim in (1, 2)
self.movie = self.data.ndim in (2, 4)
if self.linear:
#Guess the mask
if mask is None:
nvox = self.data.shape[-1]
self._mask, self.mask = _find_mask(nvox, self.subject, self.xfmname)
elif isinstance(mask, str):
self.mask = db.get_mask(self.subject, self.xfmname, mask)
self._mask = mask
elif isinstance(mask, np.ndarray):
self.mask = mask > 0
self._mask = mask > 0
self.shape = self.mask.shape
else:
self._mask = None
shape = self.data.shape
if self.movie:
shape = shape[1:]
xfm = db.get_xfm(self.subject, self.xfmname)
if xfm.shape != shape:
raise ValueError("Volumetric data (shape %s) is not the same shape as reference for transform (shape %s)" % (str(shape), str(xfm.shape)))
self.shape = shape
def map(self, projection="nearest"):
"""Convert this VolumeData into a VertexData using the given sampler
"""
from .. import utils
mapper = utils.get_mapper(self.subject, self.xfmname, projection)
data = mapper(self)
return data
def __repr__(self):
maskstr = "volumetric"
if self.linear:
name = self._mask
if isinstance(self._mask, np.ndarray):
name = "custom"
maskstr = "%s masked"%name
if self.movie:
maskstr += " movie"
maskstr = maskstr[0].upper()+maskstr[1:]
return "<%s data for (%s, %s)>"%(maskstr, self.subject, self.xfmname)
def copy(self, data):
return super(VolumeData, self).copy(data, self.subject, self.xfmname, mask=self._mask)
@property
def volume(self):
"""Standardizes the VolumeData, ensuring that masked data are unmasked"""
from .. import volume
if self.linear:
data = volume.unmask(self.mask, self.data[:])
else:
data = self.data[:]
if not self.movie:
data = data[np.newaxis]
return data
def save(self, filename, name=None):
"""Save the dataset into an hdf file with the provided name
"""
import os
if isinstance(filename, str):
fname, ext = os.path.splitext(filename)
if ext in (".hdf", ".h5",".hf5"):
h5 = h5py.File(filename, "a")
self._write_hdf(h5, name=name)
h5.close()
else:
raise TypeError('Unknown file type')
elif isinstance(filename, h5py.Group):
self._write_hdf(filename, name=name)
def _write_hdf(self, h5, name=None):
node = super(VolumeData, self)._write_hdf(h5, name=name)
#write the mask into the file, as necessary
if self._mask is not None:
mask = self._mask
if isinstance(self._mask, np.ndarray):
mgrp = "/subjects/{subj}/transforms/{xfm}/masks/"
mgrp = mgrp.format(subj=self.subject, xfm=self.xfmname)
mname = "__%s" % _hash(self._mask)[:8]
_hdf_write(h5, self._mask, name=mname, group=mgrp)
mask = mname
node.attrs['mask'] = mask
return node
def save_nii(self, filename):
"""Save as a nifti file at the given filename. Nifti headers are
copied from the reference nifti file.
"""
xfm = db.get_xfm(self.subject, self.xfmname)
affine = xfm.reference.get_affine()
import nibabel
new_nii = nibabel.Nifti1Image(self.volume.T, affine)
nibabel.save(new_nii, filename)
class VertexData(BrainData):
def __init__(self, data, subject, **kwargs):
"""Represents `data` at each vertex on a `subject`s cortex.
`data` shape possibilities:
reg linear movie: (t, v)
reg linear image: (v,)
None: creates zero-filled VertexData
where t is the number of time points, c is colors (i.e. RGB), and v is the
number of vertices (either in both hemispheres or one hemisphere).
"""
if self.__class__ == VertexData:
raise TypeError('Cannot directly instantiate VertexData objects')
super(VertexData, self).__init__(data, subject, **kwargs)
try:
left, right = db.get_surf(self.subject, "wm")
except IOError:
left, right = db.get_surf(self.subject, "fiducial")
self.llen = len(left[0])
self.rlen = len(right[0])
self._set_data(data)
@classmethod
def empty(cls, subject, **kwargs):
try:
left, right = db.get_surf(subject, "wm")
except IOError:
left, right = db.get_surf(subject, "fiducial")
nverts = len(left[0]) + len(right[0])
return cls(np.zeros((nverts,)), subject, **kwargs)
@classmethod
def random(cls, subject, **kwargs):
try:
left, right = db.get_surf(subject, "wm")
except IOError:
left, right = db.get_surf(subject, "fiducial")
nverts = len(left[0]) + len(right[0])
return cls(np.random.randn(nverts), subject, **kwargs)
def _set_data(self, data):
"""Stores data for this VertexData. Also sets flags if `data` appears to
be in 'movie' or 'raw' format. See __init__ for `data` shape possibilities.
"""
if data is None:
data = np.zeros((self.llen + self.rlen,))
self._data = data
self.movie = self.data.ndim > 1
self.nverts = self.data.shape[-1]
if self.llen == self.nverts:
# Just data for left hemisphere
self.hem = "left"
rshape = list(self.data.shape)
rshape[1 if self.movie else 0] = self.rlen
self._data = np.hstack([self.data, np.zeros(rshape, dtype=self.data.dtype)])
elif self.rlen == self.nverts:
# Just data for right hemisphere
self.hem = "right"
lshape = list(self.data.shape)
lshape[1 if self.movie else 0] = self.llen
self._data = np.hstack([np.zeros(lshape, dtype=self.data.dtype), self.data])
elif self.llen + self.rlen == self.nverts:
# Data for both hemispheres
self.hem = "both"
else:
raise ValueError('Invalid number of vertices for subject (given %d, should be %d for left hem, %d for right hem, or %d for both)' % (self.nverts, self.llen, self.rlen, self.llen+self.rlen))
def copy(self, data):
return super(VertexData, self).copy(data, self.subject)
def volume(self, xfmname, projection='nearest', **kwargs):
import warnings
warnings.warn('Inverse mapping cannot be accurate')
from .. import utils
mapper = utils.get_mapper(self.subject, xfmname, projection)
return mapper.backwards(self, **kwargs)
def __repr__(self):
maskstr = ""
if self.movie:
maskstr = "movie "
return "<Vertex %sdata for %s>"%(maskstr, self.subject)
def __getitem__(self, idx):
if not self.movie:
raise TypeError("Cannot index non-movie data")
#return VertexData(self.data[idx], self.subject, **self.attrs)
return self.copy(self.data[idx])
def to_json(self, simple=False):
if simple:
sdict = dict(split=self.llen, frames=self.vertices.shape[0])
sdict.update(super(VertexData, self).to_json(simple=simple))
return sdict
sdict = dict(data=[self.name])
sdict.update(super(VertexData, self).to_json())
return sdict
@property
def vertices(self):
verts = self.data
if not self.movie:
verts = verts[np.newaxis]
return verts
@property
def left(self):
if self.movie:
return self.data[:,:self.llen]
else:
return self.data[:self.llen]
@property
def right(self):
if self.movie:
return self.data[:,self.llen:]
else:
return self.data[self.llen:]
def _find_mask(nvox, subject, xfmname):
import os
import re
import glob
import nibabel
files = db.get_paths(subject)['masks'].format(xfmname=xfmname, type="*")
for fname in glob.glob(files):
nib = nibabel.load(fname)
mask = nib.get_data().T != 0
if nvox == np.sum(mask):
fname = os.path.split(fname)[1]
name = re.compile(r'mask_([\w]+).nii.gz').search(fname)
return name.group(1), mask
raise ValueError('Cannot find a valid mask')
class _masker(object):
def __init__(self, dv):
self.dv = dv
self.data = None
if dv.linear:
self.data = dv.data
def __getitem__(self, masktype):
try:
mask = db.get_mask(self.dv.subject, self.dv.xfmname, masktype)
return self.dv.copy(self.dv.volume[:,mask].squeeze())
except:
self.dv.copy(self.dv.volume[:, mask].squeeze())
def _hash(array):
'''A simple numpy hash function'''
return hashlib.sha1(array.tostring()).hexdigest()
def _hdf_write(h5, data, name="data", group="/data"):
try:
node = h5.require_dataset("%s/%s"%(group, name), data.shape, data.dtype, exact=True)
except TypeError:
del h5[group][name]
node = h5.create_dataset("%s/%s"%(group, name), data.shape, data.dtype, exact=True)
node[:] = data
return node
| bsd-2-clause | -6,015,160,943,890,405,000 | 33.400485 | 201 | 0.558103 | false |
lifeisstillgood/kashflo | setup.py | 1 | 1202 | #!/usr/bin/env python
#! -*- coding: utf-8 -*-
###
# Copyright (c) Paul Brian 2013
# This software is subject to
# the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
###
"""
setup for HomeSpendWatch
"""
from setuptools import setup, find_packages
import os, glob
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
def get_version():
""" return a version number, or error string.
We are assuming a file version.txt always exists. By convention
populate that file with output of git describe
"""
try:
v = open("version.txt").read().strip()
except:
v = "UNABLE_TO_FIND_RELEASE_VERSION_FILE"
return v
setup(
name='homespendwatch',
version=get_version(),
packages=find_packages(),
author='See AUTHORS.txt',
author_email='[email protected]',
long_description=README,
license='LICENSE.txt',
description="Simple Home Accounts spending tracker "\
"to work with any bank",
entry_points = """\
[console_scripts]
homespendwatch-run = homespendwatch.run:main
""",
)
| agpl-3.0 | 2,499,044,890,228,494,300 | 20.464286 | 67 | 0.647255 | false |
projectcalico/calico-nova | nova/api/openstack/compute/plugins/v3/image_size.py | 24 | 2270 | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "image-size"
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class ImageSizeController(wsgi.Controller):
def _extend_image(self, image, image_cache):
key = "OS-EXT-IMG-SIZE:size"
image[key] = image_cache['size']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ["nova.context"]
if authorize(context):
image_resp = resp_obj.obj['image']
# image guaranteed to be in the cache due to the core API adding
# it in its 'show' method
image_cached = req.get_db_item('images', image_resp['id'])
self._extend_image(image_resp, image_cached)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
images_resp = list(resp_obj.obj['images'])
# images guaranteed to be in the cache due to the core API adding
# it in its 'detail' method
for image in images_resp:
image_cached = req.get_db_item('images', image['id'])
self._extend_image(image, image_cached)
class ImageSize(extensions.V3APIExtensionBase):
"""Adds image size to image listings."""
name = "ImageSize"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ImageSizeController()
extension = extensions.ControllerExtension(self, 'images', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 | 4,726,079,810,243,462,000 | 33.923077 | 78 | 0.652423 | false |
OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/django/core/exceptions.py | 486 | 5276 | """
Global Django exception and warning classes.
"""
from django.utils import six
from django.utils.encoding import force_text
class FieldDoesNotExist(Exception):
"""The requested model field does not exist"""
pass
class DjangoRuntimeWarning(RuntimeWarning):
pass
class AppRegistryNotReady(Exception):
"""The django.apps registry is not populated yet"""
pass
class ObjectDoesNotExist(Exception):
"""The requested object does not exist"""
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"""The query returned multiple objects when only one was expected."""
pass
class SuspiciousOperation(Exception):
"""The user did something suspicious"""
class SuspiciousMultipartForm(SuspiciousOperation):
"""Suspect MIME request in multipart form data"""
pass
class SuspiciousFileOperation(SuspiciousOperation):
"""A Suspicious filesystem operation was attempted"""
pass
class DisallowedHost(SuspiciousOperation):
"""HTTP_HOST header contains invalid value"""
pass
class DisallowedRedirect(SuspiciousOperation):
"""Redirect to scheme not in allowed list"""
pass
class PermissionDenied(Exception):
"""The user did not have permission to do that"""
pass
class ViewDoesNotExist(Exception):
"""The requested view does not exist"""
pass
class MiddlewareNotUsed(Exception):
"""This middleware is not used in this server configuration"""
pass
class ImproperlyConfigured(Exception):
"""Django is somehow improperly configured"""
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
"""
The `message` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set, and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
"""
# PY2 can't pickle naive exception: http://bugs.python.org/issue1692335.
super(ValidationError, self).__init__(message, code, params)
if isinstance(message, ValidationError):
if hasattr(message, 'error_dict'):
message = message.error_dict
# PY2 has a `message` property which is always there so we can't
# duck-type on it. It was introduced in Python 2.5 and already
# deprecated in Python 2.6.
elif not hasattr(message, 'message' if six.PY3 else 'code'):
message = message.error_list
else:
message, code, params = message.message, message.code, message.params
if isinstance(message, dict):
self.error_dict = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
self.error_dict[field] = messages.error_list
elif isinstance(message, list):
self.error_list = []
for message in message:
# Normalize plain strings to instances of ValidationError.
if not isinstance(message, ValidationError):
message = ValidationError(message)
if hasattr(message, 'error_dict'):
self.error_list.extend(sum(message.error_dict.values(), []))
else:
self.error_list.extend(message.error_list)
else:
self.message = message
self.code = code
self.params = params
self.error_list = [self]
@property
def message_dict(self):
# Trigger an AttributeError if this ValidationError
# doesn't have an error_dict.
getattr(self, 'error_dict')
return dict(self)
@property
def messages(self):
if hasattr(self, 'error_dict'):
return sum(dict(self).values(), [])
return list(self)
def update_error_dict(self, error_dict):
if hasattr(self, 'error_dict'):
for field, error_list in self.error_dict.items():
error_dict.setdefault(field, []).extend(error_list)
else:
error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list)
return error_dict
def __iter__(self):
if hasattr(self, 'error_dict'):
for field, errors in self.error_dict.items():
yield field, list(ValidationError(errors))
else:
for error in self.error_list:
message = error.message
if error.params:
message %= error.params
yield force_text(message)
def __str__(self):
if hasattr(self, 'error_dict'):
return repr(dict(self))
return repr(list(self))
def __repr__(self):
return 'ValidationError(%s)' % self
| apache-2.0 | -5,969,876,282,694,283,000 | 29.49711 | 85 | 0.623768 | false |
abhattad4/Digi-Menu | digimenu2/django/contrib/admin/sites.py | 77 | 22052 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_class.check(model))
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in six.iteritems(self._registry):
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_name = apps.get_app_config(app_label).verbose_name
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
raise PermissionDenied
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_name,
'app_label': app_label,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause | -1,840,328,331,321,345,500 | 40.844402 | 117 | 0.573508 | false |
kissbac/upm | examples/python/vcap.py | 2 | 2281 | #!/usr/bin/python
# Author: Jon Trulson <[email protected]>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_vcap as sensorObj
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print "Exiting..."
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
defaultDev = "/dev/video0"
# if an argument was specified, use it as the device instead
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print "Using device", defaultDev
print "Initializing..."
# Instantiate an VCAP instance, using the specified video device
sensor = sensorObj.VCAP(defaultDev)
# enable some debug/verbose output
sensor.setDebug(True);
# This is just a hint. The kernel can change this to a lower
# resolution that the hardware supports. Use getWidth() and
# getHeight() methods to see what the kernel actually chose if you
# care.
sensor.setResolution(1920, 1080);
# capture an image
sensor.captureImage();
# convert and save it as a jpeg
sensor.saveImage("video-img1.jpg");
| mit | 683,654,417,358,213,100 | 33.044776 | 78 | 0.762823 | false |
SevInf/IEDriver | py/selenium/webdriver/phantomjs/service.py | 14 | 3533 | #!/usr/bin/python
#
# Copyright 2012 Software Freedom Conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import signal
import subprocess
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of PhantomJS / Ghostdriver
"""
def __init__(self, executable_path, port=0, service_args=None, log_path=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to PhantomJS binary
- port : Port the service is running on
- service_args : A List of other command line options to pass to PhantomJS
- log_path: Path for PhantomJS service to log to
"""
self.port = port
self.path = executable_path
self.service_args= service_args
if self.port == 0:
self.port = utils.free_port()
if self.service_args is None:
self.service_args = []
else:
self.service_args=service_args[:]
self.service_args.insert(0, self.path)
self.service_args.append("--webdriver=%d" % self.port)
if not log_path:
log_path = "ghostdriver.log"
self._log = open(log_path, 'w')
def __del__(self):
# subprocess.Popen doesn't send signal on __del__;
# we have to try to stop the launched process.
self.stop()
def start(self):
"""
Starts PhantomJS with GhostDriver.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
self.process = subprocess.Popen(self.service_args, stdin=subprocess.PIPE,
close_fds=platform.system() != 'Windows',
stdout=self._log, stderr=self._log)
except Exception as e:
raise WebDriverException("Unable to start phantomjs with ghostdriver.", e)
count = 0
while not utils.is_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to GhostDriver")
@property
def service_url(self):
"""
Gets the url of the GhostDriver Service
"""
return "http://localhost:%d/wd/hub" % self.port
def stop(self):
"""
Cleans up the process
"""
if self._log:
self._log.close()
self._log = None
#If its dead dont worry
if self.process is None:
return
#Tell the Server to properly die in case
try:
if self.process:
self.process.send_signal(signal.SIGTERM)
self.process.wait()
except OSError:
# kill may not be available under windows environment
pass
| apache-2.0 | 6,322,043,488,904,468,000 | 32.018692 | 86 | 0.597226 | false |
privacyidea/privacyidea | tests/test_lib_usercache.py | 1 | 26733 | # coding: utf-8
"""
This test file tests the lib.usercache
The lib.usercache.py only depends on the database model
"""
from contextlib import contextmanager
from mock import patch
from privacyidea.lib.error import UserError
from tests import ldap3mock
from tests.test_mock_ldap3 import LDAPDirectory
from .base import MyTestCase
from privacyidea.lib.resolver import (save_resolver, delete_resolver, get_resolver_object)
from privacyidea.lib.realm import (set_realm, delete_realm)
from privacyidea.lib.user import (User, get_username, create_user)
from privacyidea.lib.usercache import (get_cache_time,
cache_username, delete_user_cache,
EXPIRATION_SECONDS, retrieve_latest_entry, is_cache_enabled)
from privacyidea.lib.config import set_privacyidea_config
from datetime import timedelta
from datetime import datetime
from privacyidea.models import UserCache
class UserCacheTestCase(MyTestCase):
"""
Test the user on the database level
"""
PWFILE = "tests/testdata/passwd"
resolvername1 = "resolver1"
realm1 = "realm1"
username = "root"
uid = "0"
sql_realm = "sqlrealm"
sql_resolver = "SQL1"
sql_parameters = {'Driver': 'sqlite',
'Server': '/tests/testdata/',
'Database': "testusercache.sqlite",
'Table': 'users',
'Encoding': 'utf8',
'Map': '{ "username": "username", \
"userid" : "id", \
"email" : "email", \
"surname" : "name", \
"givenname" : "givenname", \
"password" : "password", \
"phone": "phone", \
"mobile": "mobile"}',
'resolver': sql_resolver,
'type': 'sqlresolver',
}
def _create_realm(self):
rid = save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": self.PWFILE,
"type.fileName": "string",
"desc.fileName": "The name of the file"})
self.assertTrue(rid > 0, rid)
added, failed = set_realm(realm=self.realm1, resolvers=[self.resolvername1])
self.assertTrue(len(added) > 0, added)
self.assertEqual(len(failed), 0)
def _delete_realm(self):
delete_realm(self.realm1)
delete_resolver(self.resolvername1)
def test_00_set_config(self):
# Save wrong data in EXPIRATION_SECONDS
set_privacyidea_config(EXPIRATION_SECONDS, "wrong")
exp_delta = get_cache_time()
self.assertEqual(exp_delta, timedelta(seconds=0))
self.assertFalse(is_cache_enabled())
# Save empty data in EXPIRATION_SECONDS
set_privacyidea_config(EXPIRATION_SECONDS, "")
exp_delta = get_cache_time()
self.assertEqual(exp_delta, timedelta(seconds=0))
self.assertFalse(is_cache_enabled())
# Save real data in EXPIRATION_SECONDS
set_privacyidea_config(EXPIRATION_SECONDS, 600)
exp_delta = get_cache_time()
self.assertEqual(exp_delta, timedelta(seconds=600))
self.assertTrue(is_cache_enabled())
def test_01_get_username_from_cache(self):
# If a username is already contained in the cache, the function
# lib.user.get_username will return the cache value
username = "cached_user"
resolver = "resolver1"
uid = "1"
expiration_delta = get_cache_time()
r = UserCache(username, username, resolver, uid, datetime.now()).save()
u_name = get_username(uid, resolver)
self.assertEqual(u_name, username)
# A non-existing user is not in the cache and returns and empty username
u_name = get_username(uid, "resolver_does_not_exist")
self.assertEqual(u_name, "")
def test_02_get_resolvers(self):
# enable user cache
set_privacyidea_config(EXPIRATION_SECONDS, 600)
# create realm
self._create_realm()
# delete user_cache
r = delete_user_cache()
self.assertTrue(r >= 0)
# The username is not in the cache. It is fetched from the resolver
# At the same time the cache is filled.
user = User(self.username, self.realm1)
self.assertEqual(user.login, self.username)
# The user ID is fetched from the resolver
self.assertEqual(user.uid, self.uid)
# Now, the cache should have exactly one entry
entry = UserCache.query.one()
self.assertEqual(entry.user_id, self.uid)
self.assertEqual(entry.username, self.username)
self.assertEqual(entry.resolver, self.resolvername1)
ts = entry.timestamp
# delete the resolver, which also purges the cache
self._delete_realm()
# manually re-add the entry from above
UserCache(self.username, self.username, self.resolvername1,
self.uid, ts).save()
# the username is fetched from the cache
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, self.username)
# delete the cache
r = delete_user_cache()
# try to fetch the username. It is not in the cache and the
# resolver does not exist anymore.
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, "")
def test_03_get_identifiers(self):
# create realm
self._create_realm()
# delete user_cache
r = delete_user_cache()
self.assertTrue(r >= 0)
# The username is not in the cache. It is fetched from the resolver
# At the same time the cache is filled. Implicitly we test the
# _get_resolvers!
user = User(self.username, self.realm1, self.resolvername1)
uids = user.get_user_identifiers()
self.assertEqual(user.login, self.username)
self.assertEqual(user.uid, self.uid)
# Now, the cache should have exactly one entry
entry = UserCache.query.one()
self.assertEqual(entry.user_id, self.uid)
self.assertEqual(entry.username, self.username)
self.assertEqual(entry.resolver, self.resolvername1)
ts = entry.timestamp
# delete the resolver, which also purges the cache
self._delete_realm()
# manually re-add the entry from above
UserCache(self.username, self.username, self.resolvername1,
self.uid, ts).save()
# the username is fetched from the cache
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, self.username)
# The `User` class also fetches the UID from the cache
user2 = User(self.username, self.realm1, self.resolvername1)
self.assertEqual(user2.uid, self.uid)
# delete the cache
r = delete_user_cache()
# try to fetch the username. It is not in the cache and the
# resolver does not exist anymore.
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(u_name, "")
# similar case for the `User` class
# The `User` class also tries to fetch the UID from the cache
with self.assertRaises(UserError):
user3 = User(self.username, self.realm1, self.resolvername1)
def test_04_delete_cache(self):
now = datetime.now()
UserCache("hans1", "hans1", "resolver1", "uid1", now).save()
UserCache("hans2", "hans1", "resolver2", "uid2", now).save()
r = UserCache.query.filter(UserCache.username == "hans1").first()
self.assertTrue(r)
r = UserCache.query.filter(UserCache.username == "hans2").first()
self.assertTrue(r)
# delete hans1
delete_user_cache(username="hans1")
r = UserCache.query.filter(UserCache.username == "hans1").first()
self.assertFalse(r)
r = UserCache.query.filter(UserCache.username == "hans2").first()
self.assertTrue(r)
# delete resolver2
delete_user_cache(resolver="resolver2")
r = UserCache.query.filter(UserCache.username == "hans1").first()
self.assertFalse(r)
r = UserCache.query.filter(UserCache.username == "hans2").first()
self.assertFalse(r)
def test_05_multiple_entries(self):
# two consistent entries
now = datetime.now()
UserCache("hans1", "hans1", "resolver1", "uid1", now - timedelta(seconds=60)).save()
UserCache("hans1", "hans1", "resolver1", "uid1", now).save()
r = UserCache.query.filter(UserCache.username == "hans1", UserCache.resolver == "resolver1")
self.assertEqual(r.count(), 2)
u_name = get_username("uid1", "resolver1")
self.assertEqual(u_name, "hans1")
r = delete_user_cache()
# two inconsistent entries: most recent entry (ordered by datetime) wins
UserCache("hans2", "hans2", "resolver1", "uid1", now).save()
UserCache("hans1", "hans1", "resolver1", "uid1", now - timedelta(seconds=60)).save()
r = UserCache.query.filter(UserCache.user_id == "uid1", UserCache.resolver == "resolver1")
self.assertEqual(r.count(), 2)
u_name = get_username("uid1", "resolver1")
self.assertEqual(u_name, "hans2")
# Clean up the cache
r = delete_user_cache()
def test_06_implicit_cache_population(self):
self._create_realm()
# testing `get_username`
self.assertEqual(UserCache.query.count(), 0)
# the cache is empty, so the username is read from the resolver
u_name = get_username(self.uid, self.resolvername1)
self.assertEqual(self.username, u_name)
# it should be part of the cache now
r = UserCache.query.filter(UserCache.user_id == self.uid, UserCache.resolver == self.resolvername1).one()
self.assertEqual(self.username, r.username)
# Apart from that, the cache should be empty.
self.assertEqual(UserCache.query.count(), 1)
r = delete_user_cache()
# testing `User()`, but this time we add an already-expired entry to the cache
self.assertEqual(UserCache.query.count(), 0)
UserCache(self.username, self.username,
self.resolvername1, 'fake_uid', datetime.now() - timedelta(weeks=50)).save()
# cache contains an expired entry, uid is read from the resolver (we can verify
# that the cache entry is indeed not queried as it contains 'fake_uid' instead of the correct uid)
user = User(self.username, self.realm1, self.resolvername1)
self.assertEqual(user.uid, self.uid)
# a new entry should have been added to the cache now
r = retrieve_latest_entry((UserCache.username == self.username) & (UserCache.resolver == self.resolvername1))
self.assertEqual(self.uid, r.user_id)
# But the expired entry is also still in the cache
self.assertEqual(UserCache.query.count(), 2)
r = delete_user_cache()
self._delete_realm()
def _populate_cache(self):
self.assertEqual(UserCache.query.count(), 0)
# initially populate the cache with three entries
timestamp = datetime.now()
UserCache("hans1", "hans1", self.resolvername1, "uid1", timestamp).save()
UserCache("hans2", "hans2", self.resolvername1, "uid2", timestamp - timedelta(weeks=50)).save()
UserCache("hans3", "hans3", "resolver2", "uid2", timestamp).save()
self.assertEqual(UserCache.query.count(), 3)
def test_07_invalidate_save_resolver(self):
self._create_realm()
self._populate_cache()
# call save_resolver on resolver1, which should invalidate all entries of "resolver1"
# (even the expired 'hans2' one)
save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": self.PWFILE,
"type.fileName": "string",
"desc.fileName": "Some change"
})
self.assertEqual(UserCache.query.count(), 1)
# Only hans3 in resolver2 should still be in the cache
# We can use get_username to ensure it is fetched from the cache
# because resolver2 does not actually exist
u_name = get_username("uid2", "resolver2")
self.assertEqual("hans3", u_name)
delete_user_cache()
self._delete_realm()
def test_08_invalidate_delete_resolver(self):
self._create_realm()
self._populate_cache()
# call delete_resolver on resolver1, which should invalidate all of its entries
self._delete_realm()
self.assertEqual(UserCache.query.count(), 1)
# Only hans3 in resolver2 should still be in the cache
u_name = get_username("uid2", "resolver2")
self.assertEqual("hans3", u_name)
delete_user_cache()
def _create_sql_realm(self):
rid = save_resolver(self.sql_parameters)
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.sql_realm, [self.sql_resolver])
self.assertEqual(len(failed), 0)
self.assertEqual(len(added), 1)
def _delete_sql_realm(self):
delete_realm(self.sql_realm)
delete_resolver(self.sql_resolver)
def test_09_invalidate_edit_user(self):
# Validate that editing users actually invalidates the cache. For that, we first need an editable resolver
self._create_sql_realm()
# The cache is initially empty
self.assertEqual(UserCache.query.count(), 0)
# The following adds an entry to the cache
user = User(login="wordpressuser", realm=self.sql_realm)
self.assertEqual(UserCache.query.count(), 1)
uinfo = user.info
self.assertEqual(uinfo.get("givenname", ""), "")
user.update_user_info({"givenname": "wordy"})
uinfo = user.info
self.assertEqual(uinfo.get("givenname"), "wordy")
# This should have removed the entry from the cache
self.assertEqual(UserCache.query.count(), 0)
# But now it gets added again
user2 = User(login="wordpressuser", realm=self.sql_realm)
self.assertEqual(UserCache.query.count(), 1)
# Change it back for the other tests
user.update_user_info({"givenname": ""})
uinfo = user.info
self.assertEqual(uinfo.get("givenname", ""), "")
self.assertEqual(UserCache.query.count(), 0)
self._delete_sql_realm()
def test_10_invalidate_delete_user(self):
# Validate that deleting users actually invalidates the cache. For that, we first need an editable resolver
self._create_sql_realm()
# The cache is initially empty
self.assertEqual(UserCache.query.count(), 0)
# The following adds an entry to the cache
user = User(login="wordpressuser", realm=self.sql_realm)
self.assertEqual(UserCache.query.count(), 1)
uinfo = user.info
user.delete()
# This should have removed the entry from the cache
self.assertEqual(UserCache.query.count(), 0)
# We add the user again for the other tests
create_user(self.sql_resolver, uinfo)
self.assertEqual(UserCache.query.count(), 0)
self._delete_sql_realm()
@contextmanager
def _patch_datetime_now(self, target, delta=timedelta(days=1)):
with patch(target) as mock_datetime:
mock_datetime.now.side_effect = lambda: datetime.now() + delta
mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)
yield mock_datetime
def test_11_cache_expiration(self):
# delete user_cache
r = delete_user_cache()
self.assertTrue(r >= 0)
# populate the cache with artificial, somewhat "old", but still relevant data
timestamp = datetime.now() - timedelta(seconds=300)
UserCache("hans1", "hans1", "resolver1", "uid1", timestamp).save()
UserCache("hans2", "hans2", "resolver1", "uid2", timestamp).save()
# check that the cache is indeed queried
self.assertEqual(get_username("uid1", "resolver1"), "hans1")
self.assertEqual(User("hans2", "realm1", "resolver1").uid, "uid2")
# check that the (non-existent) resolver is queried
# for entries not contained in the cache
self.assertEqual(get_username("uid3", "resolver1"), "")
# TODO: Interestingly, if we mock `datetime` here to increase the time by one
# day, this test works, but a subsequent test (test_ui_certificate) will fail
# with weird error messages. So we do not use the datetime mock for now.
#with self._patch_datetime_now('privacyidea.lib.usercache.datetime.datetime') as mock_datetime:
with patch('privacyidea.lib.usercache.get_cache_time') as mock_get_cache_time:
# Instead, we just decrease the cache time from 600 to 60 seconds,
# which causes the entries above to be considered expired
mock_get_cache_time.return_value = timedelta(seconds=60)
# check that the cached entries are not queried anymore
self.assertEqual(UserCache.query.count(), 2)
self.assertEqual(get_username("uid1", "resolver1"), "")
with self.assertRaises(UserError):
User("hans2", "realm1", "resolver1")
self.assertEqual(get_username("uid3", "resolver1"), "")
# We add another, "current" entry
UserCache("hans4", "hans4", "resolver1", "uid4", datetime.now()).save()
self.assertEqual(UserCache.query.count(), 3)
# we now remove old entries, only the newest remains
delete_user_cache(expired=True)
self.assertEqual(UserCache.query.count(), 1)
self.assertEqual(UserCache.query.one().user_id, "uid4")
# clean up
delete_user_cache()
def test_12_multiple_resolvers(self):
# one realm, two SQL resolvers
parameters_a = self.sql_parameters.copy()
# first resolver only contains users with phone numbers
parameters_a['Where'] = 'phone LIKE %'
parameters_a['resolver'] = 'reso_a'
rid_a = save_resolver(parameters_a)
self.assertTrue(rid_a > 0, rid_a)
# second resolver contains all users
parameters_b = self.sql_parameters.copy()
parameters_b['resolver'] = 'reso_b'
rid_b = save_resolver(parameters_b)
self.assertTrue(rid_b > 0, rid_b)
# First ask reso_a, then reso_b
(added, failed) = set_realm(self.sql_realm, ['reso_a', 'reso_b'], {
'reso_a': 1,
'reso_b': 2
})
self.assertEqual(len(failed), 0)
self.assertEqual(len(added), 2)
# Now, query the user and populate the cache
self.assertEqual(UserCache.query.count(), 0)
user1 = User('wordpressuser', self.sql_realm)
self.assertEqual(user1.uid, '6')
# Assert it was found in reso_b (as it does not have a phone number)!
self.assertEqual(user1.resolver, 'reso_b')
self.assertEqual(UserCache.query.filter(UserCache.username == 'wordpressuser',
UserCache.user_id == 6).one().resolver,
'reso_b')
# Add a phone number. We do not use the User API to do that to simulate that the change is performed
# out of privacyIDEA's control. Using `update_user_info` would invalidate the cache, which would be unrealistic.
info = user1.info
new_info = info.copy()
new_info['phone'] = '123456'
get_resolver_object('reso_a').update_user(user1.uid, new_info)
# Ensure that the user's association with reso_b is still cached.
self.assertEqual(UserCache.query.filter(UserCache.username == 'wordpressuser',
UserCache.user_id == 6).one().resolver,
'reso_b')
# Now, it should be located in reso_a!
user2 = User('wordpressuser', self.sql_realm)
self.assertEqual(user2.uid, '6')
self.assertEqual(user2.resolver, 'reso_a')
# ... but the cache still contains entries for both!
resolver_query = UserCache.query.filter(UserCache.username == 'wordpressuser',
UserCache.user_id == 6).order_by(UserCache.timestamp.desc())
cached_resolvers = [entry.resolver for entry in resolver_query.all()]
self.assertEqual(cached_resolvers, ['reso_a', 'reso_b'])
# Remove the phone number.
get_resolver_object('reso_a').update_user(user1.uid, {'phone': None})
delete_realm(self.sql_realm)
delete_resolver('reso_a')
delete_resolver('reso_b')
def test_13_cache_username(self):
self.counter = 0
def get_username(uid, resolver):
self.counter += 1
return "user1"
r = cache_username(get_username, "uid1", "reso1")
self.assertEqual(r, "user1")
self.assertEqual(self.counter, 1)
# The second call does not increase the counter, since the result is fetched from the cache
r = cache_username(get_username, "uid1", "reso1")
self.assertEqual(r, "user1")
self.assertEqual(self.counter, 1)
def test_99_unset_config(self):
# Test early exit!
# Assert that the function `retrieve_latest_entry` is called if the cache is enabled
with patch('privacyidea.lib.usercache.retrieve_latest_entry') as mock_retrieve:
mock_retrieve.return_value = None
get_username('some-userid', 'resolver1')
self.assertEqual(mock_retrieve.call_count, 1)
set_privacyidea_config(EXPIRATION_SECONDS, 0)
self.assertFalse(is_cache_enabled())
# Assert that the function `retrieve_latest_entry` is not called anymore
with patch('privacyidea.lib.usercache.retrieve_latest_entry') as mock_retrieve:
mock_retrieve.return_value = None
get_username('some-userid', 'resolver1')
self.assertEqual(mock_retrieve.call_count, 0)
class TestUserCacheMultipleLoginAttributes(MyTestCase):
ldap_realm = "ldaprealm"
ldap_resolver = "ldap1"
ldap_parameters = {'LDAPURI': 'ldap://localhost',
'LDAPBASE': 'o=test',
'BINDDN': 'cn=manager,ou=example,o=test',
'BINDPW': 'ldaptest',
'LOGINNAMEATTRIBUTE': 'cn, email',
'LDAPSEARCHFILTER': '(cn=*)',
'USERINFO': '{"phone" : "telephoneNumber", '
'"mobile" : "mobile"'
', "email" : "email", '
'"surname" : "sn", '
'"givenname" : "givenName" }',
'UIDTYPE': 'DN',
'CACHE_TIMEOUT': 0,
'resolver': ldap_resolver,
'type': 'ldapresolver',
}
def _create_ldap_realm(self):
rid = save_resolver(self.ldap_parameters)
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.ldap_realm, [self.ldap_resolver])
self.assertEqual(len(failed), 0)
self.assertEqual(len(added), 1)
def _delete_ldap_realm(self):
delete_realm(self.ldap_realm)
delete_resolver(self.ldap_resolver)
@classmethod
def setUpClass(cls):
MyTestCase.setUpClass()
set_privacyidea_config(EXPIRATION_SECONDS, 600)
@classmethod
def tearDownClass(cls):
set_privacyidea_config(EXPIRATION_SECONDS, 0)
MyTestCase.tearDownClass()
@ldap3mock.activate
def test_01_secondary_login_attribute(self):
ldap3mock.setLDAPDirectory(LDAPDirectory)
self._create_ldap_realm()
# Populate the user cache, check its contents
user1 = User('alice', self.ldap_realm)
self.assertEqual(user1.resolver, self.ldap_resolver)
self.assertEqual(user1.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user1.login, "alice")
self.assertEqual(user1.used_login, "alice")
entry = UserCache.query.one()
self.assertEqual(entry.user_id, user1.uid)
self.assertEqual(entry.used_login, "alice")
self.assertEqual(entry.username, "alice")
self.assertEqual(entry.resolver, self.ldap_resolver)
# query again, user cache does not change
user2 = User('alice', self.ldap_realm)
self.assertEqual(user2.resolver, self.ldap_resolver)
self.assertEqual(user2.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user2.login, "alice")
self.assertEqual(user2.used_login, "alice")
self.assertEqual(UserCache.query.count(), 1)
# use secondary login attribute, usercache has a new entry with secondary login attribute
user3 = User('[email protected]', self.ldap_realm)
self.assertEqual(user3.resolver, self.ldap_resolver)
self.assertEqual(user3.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user3.login, "alice")
self.assertEqual(user3.used_login, "[email protected]")
entries = UserCache.query.filter_by(user_id="cn=alice,ou=example,o=test").order_by(UserCache.id).all()
self.assertEqual(len(entries), 2)
entry = entries[-1]
self.assertEqual(entry.user_id, user1.uid)
self.assertEqual(entry.used_login, "[email protected]")
self.assertEqual(entry.username, "alice")
self.assertEqual(entry.resolver, self.ldap_resolver)
# use secondary login attribute again, login name is fetched correctly
user4 = User('[email protected]', self.ldap_realm)
self.assertEqual(user4.resolver, self.ldap_resolver)
self.assertEqual(user4.uid, "cn=alice,ou=example,o=test")
self.assertEqual(user4.login, "alice")
self.assertEqual(user4.used_login, "[email protected]")
# still only two entries in the cache
entries = UserCache.query.filter_by(user_id="cn=alice,ou=example,o=test").order_by(UserCache.id).all()
self.assertEqual(len(entries), 2)
# get the primary login name
login_name = get_username("cn=alice,ou=example,o=test", self.ldap_resolver)
self.assertEqual(login_name, "alice")
# still only two entries in the cache
entries = UserCache.query.filter_by(user_id="cn=alice,ou=example,o=test").order_by(UserCache.id).all()
self.assertEqual(len(entries), 2)
self._delete_ldap_realm()
| agpl-3.0 | -1,888,625,452,792,974,600 | 43.186777 | 120 | 0.615307 | false |
eino-makitalo/odoo | addons/hr_payroll_account/wizard/__init__.py | 433 | 1116 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,908,746,169,873,332,000 | 43.64 | 80 | 0.619176 | false |
Just-D/chromium-1 | tools/telemetry/third_party/gsutilz/third_party/protorpc/gen_protorpc.py | 44 | 9886 | #!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command line tool for generating ProtoRPC definitions from descriptors."""
import errno
import logging
import optparse
import os
import sys
from protorpc import descriptor
from protorpc import generate_python
from protorpc import protobuf
from protorpc import registry
from protorpc import transport
from protorpc import util
EXCLUDED_PACKAGES = frozenset(['protorpc.registry',
'protorpc.messages',
'protorpc.descriptor',
'protorpc.message_types',
])
commands = {}
def usage():
"""Print usage help and exit with an error code."""
parser.print_help()
sys.exit(2)
def fatal_error(message):
"""Print fatal error messages exit with an error code.
Args:
message: Message to print to stderr before exit.
"""
sys.stderr.write(message)
sys.exit(1)
def open_input_file(filename):
"""Open file for reading.
Args:
filename: Name of input file to open or None to open stdin.
Returns:
Opened file if string provided, stdin if filename is None.
"""
# TODO(rafek): Detect missing or invalid files, generating user friendly
# error messages.
if filename is None:
return sys.stdin
else:
try:
return open(filename, 'rb')
except IOError, err:
fatal_error(str(err))
@util.positional(1)
def generate_file_descriptor(dest_dir, file_descriptor, force_overwrite):
"""Generate a single file descriptor to destination directory.
Will generate a single Python file from a file descriptor under dest_dir.
The sub-directory where the file is generated is determined by the package
name of descriptor.
Descriptors without package names will not be generated.
Descriptors that are part of the ProtoRPC distribution will not be generated.
Args:
dest_dir: Directory under which to generate files.
file_descriptor: FileDescriptor instance to generate source code from.
force_overwrite: If True, existing files will be overwritten.
"""
package = file_descriptor.package
if not package:
# TODO(rafek): Option to cause an error on this condition.
logging.warn('Will not generate descriptor without package name')
return
if package in EXCLUDED_PACKAGES:
logging.warn('Will not generate main ProtoRPC class %s' % package)
return
package_path = package.split('.')
directory = package_path[:-1]
package_file_name = package_path[-1]
directory_name = os.path.join(dest_dir, *directory)
output_file_name = os.path.join(directory_name,
'%s.py' % (package_file_name,))
try:
os.makedirs(directory_name)
except OSError, err:
if err.errno != errno.EEXIST:
raise
if not force_overwrite and os.path.exists(output_file_name):
logging.warn('Not overwriting %s with package %s',
output_file_name, package)
return
output_file = open(output_file_name, 'w')
logging.info('Writing package %s to %s',
file_descriptor.package, output_file_name)
generate_python.format_python_file(file_descriptor, output_file)
@util.positional(1)
def command(name, required=(), optional=()):
"""Decorator used for declaring commands used on command line.
Each command of this tool can have any number of sequential required
parameters and optional parameters. The required and optional parameters
will be displayed in the command usage. Arguments passed in to the command
are checked to ensure they have at least the required parameters and not
too many parameters beyond the optional ones. When there are not enough
or too few parameters the usage message is generated and the program exits
with an error code.
Functions decorated thus are added to commands by their name.
Resulting decorated functions will have required and optional attributes
assigned to them so that appear in the usage message.
Args:
name: Name of command that will follow the program name on the command line.
required: List of required parameter names as displayed in the usage
message.
optional: List of optional parameter names as displayed in the usage
message.
"""
def check_params_decorator(function):
def check_params_wrapper(options, *args):
if not (len(required) <= len(args) <= len(required) + len(optional)):
sys.stderr.write("Incorrect usage for command '%s'\n\n" % name)
usage()
function(options, *args)
check_params_wrapper.required = required
check_params_wrapper.optional = optional
commands[name] = check_params_wrapper
return check_params_wrapper
return check_params_decorator
@command('file', optional=['input-filename', 'output-filename'])
def file_command(options, input_filename=None, output_filename=None):
"""Generate a single descriptor file to Python.
Args:
options: Parsed command line options.
input_filename: File to read protobuf FileDescriptor from. If None
will read from stdin.
output_filename: File to write Python source code to. If None will
generate to stdout.
"""
with open_input_file(input_filename) as input_file:
descriptor_content = input_file.read()
if output_filename:
output_file = open(output_filename, 'w')
else:
output_file = sys.stdout
file_descriptor = protobuf.decode_message(descriptor.FileDescriptor,
descriptor_content)
generate_python.format_python_file(file_descriptor, output_file)
@command('fileset', optional=['filename'])
def fileset_command(options, input_filename=None):
"""Generate source directory structure from FileSet.
Args:
options: Parsed command line options.
input_filename: File to read protobuf FileSet from. If None will read from
stdin.
"""
with open_input_file(input_filename) as input_file:
descriptor_content = input_file.read()
dest_dir = os.path.expanduser(options.dest_dir)
if not os.path.isdir(dest_dir) and os.path.exists(dest_dir):
fatal_error("Destination '%s' is not a directory" % dest_dir)
file_set = protobuf.decode_message(descriptor.FileSet,
descriptor_content)
for file_descriptor in file_set.files:
generate_file_descriptor(dest_dir, file_descriptor=file_descriptor,
force_overwrite=options.force)
@command('registry',
required=['host'],
optional=['service-name', 'registry-path'])
def registry_command(options,
host,
service_name=None,
registry_path='/protorpc'):
"""Generate source directory structure from remote registry service.
Args:
options: Parsed command line options.
host: Web service host where registry service is located. May include
port.
service_name: Name of specific service to read. Will generate only Python
files that service is dependent on. If None, will generate source code
for all services known by the registry.
registry_path: Path to find registry if not the default 'protorpc'.
"""
dest_dir = os.path.expanduser(options.dest_dir)
url = 'http://%s%s' % (host, registry_path)
reg = registry.RegistryService.Stub(transport.HttpTransport(url))
if service_name is None:
service_names = [service.name for service in reg.services().services]
else:
service_names = [service_name]
file_set = reg.get_file_set(names=service_names).file_set
for file_descriptor in file_set.files:
generate_file_descriptor(dest_dir, file_descriptor=file_descriptor,
force_overwrite=options.force)
def make_opt_parser():
"""Create options parser with automatically generated command help.
Will iterate over all functions in commands and generate an appropriate
usage message for them with all their required and optional parameters.
"""
command_descriptions = []
for name in sorted(commands.iterkeys()):
command = commands[name]
params = ' '.join(['<%s>' % param for param in command.required] +
['[<%s>]' % param for param in command.optional])
command_descriptions.append('%%prog [options] %s %s' % (name, params))
command_usage = 'usage: %s\n' % '\n '.join(command_descriptions)
parser = optparse.OptionParser(usage=command_usage)
parser.add_option('-d', '--dest_dir',
dest='dest_dir',
default=os.getcwd(),
help='Write generated files to DIR',
metavar='DIR')
parser.add_option('-f', '--force',
action='store_true',
dest='force',
default=False,
help='Force overwrite of existing files')
return parser
parser = make_opt_parser()
def main():
# TODO(rafek): Customize verbosity.
logging.basicConfig(level=logging.INFO)
options, positional = parser.parse_args()
if not positional:
usage()
command_name = positional[0]
command = commands.get(command_name)
if not command:
sys.stderr.write("Unknown command '%s'\n\n" % command_name)
usage()
parameters = positional[1:]
command(options, *parameters)
if __name__ == '__main__':
main()
| bsd-3-clause | -8,349,768,606,893,111,000 | 31.953333 | 80 | 0.680761 | false |
gardner/urllib3 | test/test_filepost.py | 27 | 4190 | import unittest
from urllib3.filepost import encode_multipart_formdata, iter_fields
from urllib3.fields import RequestField
from urllib3.packages.six import b, u
BOUNDARY = '!! test boundary !!'
class TestIterfields(unittest.TestCase):
def test_dict(self):
for fieldname, value in iter_fields(dict(a='b')):
self.assertEqual((fieldname, value), ('a', 'b'))
self.assertEqual(
list(sorted(iter_fields(dict(a='b', c='d')))),
[('a', 'b'), ('c', 'd')])
def test_tuple_list(self):
for fieldname, value in iter_fields([('a', 'b')]):
self.assertEqual((fieldname, value), ('a', 'b'))
self.assertEqual(
list(iter_fields([('a', 'b'), ('c', 'd')])),
[('a', 'b'), ('c', 'd')])
class TestMultipartEncoding(unittest.TestCase):
def test_input_datastructures(self):
fieldsets = [
dict(k='v', k2='v2'),
[('k', 'v'), ('k2', 'v2')],
]
for fields in fieldsets:
encoded, _ = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded.count(b(BOUNDARY)), 3)
def test_field_encoding(self):
fieldsets = [
[('k', 'v'), ('k2', 'v2')],
[('k', b'v'), (u('k2'), b'v2')],
[('k', b'v'), (u('k2'), 'v2')],
]
for fields in fieldsets:
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k2"\r\n'
b'\r\n'
b'v2\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
, fields)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_filename(self):
fields = [('k', ('somename', b'v'))]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"; filename="somename"\r\n'
b'Content-Type: application/octet-stream\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_textplain(self):
fields = [('k', ('somefile.txt', b'v'))]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"; filename="somefile.txt"\r\n'
b'Content-Type: text/plain\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_explicit(self):
fields = [('k', ('somefile.txt', b'v', 'image/jpeg'))]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Disposition: form-data; name="k"; filename="somefile.txt"\r\n'
b'Content-Type: image/jpeg\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
self.assertEqual(content_type,
'multipart/form-data; boundary=' + str(BOUNDARY))
def test_request_fields(self):
fields = [RequestField('k', b'v', filename='somefile.txt', headers={'Content-Type': 'image/jpeg'})]
encoded, content_type = encode_multipart_formdata(fields, boundary=BOUNDARY)
self.assertEqual(encoded,
b'--' + b(BOUNDARY) + b'\r\n'
b'Content-Type: image/jpeg\r\n'
b'\r\n'
b'v\r\n'
b'--' + b(BOUNDARY) + b'--\r\n'
)
| mit | 4,462,061,469,082,139,000 | 30.503759 | 105 | 0.508592 | false |
GladeRom/android_external_chromium_org | third_party/jinja2/nodes.py | 623 | 28875 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import next, izip, with_metaclass, text_type, \
method_type, function_type
#: the types we support for context functions
_context_function_types = (function_type, method_type)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| bsd-3-clause | 1,310,450,007,049,595,000 | 30.591904 | 81 | 0.600381 | false |
stefan-caraiman/python-lab | python/solutii/micu_matei/sync/sync.py | 7 | 6480 | #!/usr/bin/env python
"""
Syncronizam doua fisiere
"""
from __future__ import print_function
import os
import argparse
import shutil
from functii_auxiliare import get_hash
from functii_auxiliare import get_last_edit
from functii_auxiliare import write_sync_file
from functii_auxiliare import read_sync_file
from functii_auxiliare import make_dirs
from functii_auxiliare import copy_r
from functii_auxiliare import get_same_file
# standard keys in dict
FULL_PATH = "fullPath"
MD5 = "md5"
BASE_DIR = "baseDir"
LAST_EDIT = "lastEdit"
IS_FILE = "isFile"
def parse_directory(base_dir, path, prefix):
""" returneaza un dict in care cheile sunt pathuri relative la
<path>-ul primit ca parametru, fiecare valoare este alt dict
cu mai multi parametri, cum ar fi :
- full_path
- md5 ( daca este fisier )
- base_dir( directorul in care sincronizam )
- last_modified_date ( ultima data cand a fost modifica
daca e fisier )
- is_file ( True / False )
"""
if os.path.exists(path) and os.path.isdir(path):
info = {}
for item in os.listdir(path):
full_path_info = os.path.join(path, item)
relative_path_info = os.path.join(prefix, item)
if os.path.isfile(full_path_info):
info[relative_path_info] = {
FULL_PATH: full_path_info,
MD5: get_hash(full_path_info),
BASE_DIR: base_dir,
LAST_EDIT: get_last_edit(full_path_info),
IS_FILE: True
}
elif os.path.isdir(full_path_info):
info[relative_path_info] = {
FULL_PATH: full_path_info,
MD5: get_hash(full_path_info),
BASE_DIR: base_dir,
LAST_EDIT: get_last_edit(full_path_info),
IS_FILE: False
}
info_sub = parse_directory(base_dir,
full_path_info, relative_path_info)
info.update(info_sub)
return info
else:
return {}
def sync_new_files(info_a, path_a, info_b, path_b):
""" Sincronizeaza fisierele noi din fisierul A in fisierul B,
folosind informatiile fin info_a si info_b ."""
for item in info_a:
detalii_item = info_a[item]
full_path_item_in_a = os.path.join(path_a, item)
full_path_item_in_b = os.path.join(path_b, item)
if item not in info_b:
if detalii_item[IS_FILE]:
copy_r(full_path_item_in_a, full_path_item_in_b)
else:
make_dirs(full_path_item_in_b)
def sync_deleted_files(info_a, info_b, path_b):
""" Sincronizam fisierele deletate din fisierul A in fisierul B,
folosind informatiile din info_a, info_b si fisierele de
sincronizare daca acestea exista
Eliminam fisierele din B care au fost eliminate in A, daca acestea
existau deja in B inainte """
sync_b = read_sync_file(path_b)
if not sync_b:
return
for item in info_b:
if (item not in info_a) and (item in sync_b):
detalii_item = info_b[item]
if detalii_item[IS_FILE]:
os.remove(detalii_item[FULL_PATH])
else:
shutil.rmtree(detalii_item[FULL_PATH])
def sync_moved_files(info_a, info_b, path_b):
""" Verifica daca un fisier a fost mutat """
for item in info_a:
if info_a[item][IS_FILE]:
if item not in info_b:
old_file = get_same_file(info_a[item], info_b)
if old_file:
old_file = os.path.join(path_b, old_file)
new_path = os.path.join(path_b, item)
shutil.move(old_file, new_path)
def sync_modified_files(info_a, info_b):
""" syncronizam fisierele modificate din A in B"""
for item in info_a:
if item in info_b:
file_a = info_a[item]
file_b = info_b[item]
if file_a[MD5] != file_b[MD5]:
if file_a[LAST_EDIT] > file_b[LAST_EDIT]:
os.remove(file_b[FULL_PATH])
shutil.copy(file_a[FULL_PATH], file_b[FULL_PATH])
else:
os.remove(file_a[FULL_PATH])
shutil.copy(file_b[FULL_PATH], file_a[FULL_PATH])
def parse():
""" parseaza argumentele primite ca parametri """
args = argparse.ArgumentParser()
args.add_argument('firstDir', type=str, help="Primul fisier")
args.add_argument('secondDir', type=str, help="Al doilea fisier")
args = args.parse_args()
return args
def sync(path_a, path_b):
""" syncronizeaza fisierele din cele doua directoare """
# modified files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_modified_files(info_a, info_b)
# sync_modified_files(info_b, info_a)
# moved files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_moved_files(info_a, info_b, path_b)
sync_moved_files(info_b, info_a, path_a)
# delete files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_deleted_files(info_a, info_b, path_b)
sync_deleted_files(info_b, info_a, path_a)
# new files
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
sync_new_files(info_a, path_a, info_b, path_b)
sync_new_files(info_b, path_b, info_a, path_a)
# rescriem fisierele de sincronizare cu ultimele valori
info_a = parse_directory(path_a, path_a, "")
info_b = parse_directory(path_b, path_b, "")
write_sync_file(path_a, info_a)
write_sync_file(path_b, info_b)
def check_path(path):
""" verifica daca path exista si este valida"""
if not os.path.exists(path):
print("EROARE: ", path, " trebuie sa existe")
return False
elif not os.path.isdir(path):
print("EROARE: ", path, " trebuie sa existe")
return False
return True
def main():
""" syncronizeaza pentru todeauna """
args = parse()
path_a = os.path.abspath(args.firstDir)
path_b = os.path.abspath(args.secondDir)
check_path(path_a)
check_path(path_b)
while True:
sync(path_a, path_b)
if __name__ == "__main__":
main()
| mit | -5,219,827,586,258,051,000 | 31.562814 | 78 | 0.577932 | false |
dpac-vlsi/SynchroTrace | tests/quick/se/20.eio-short/test.py | 19 | 1766 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
root.system.cpu.workload = EioProcess(file = binpath('anagram',
'anagram-vshort.eio.gz'))
root.system.cpu.max_insts_any_thread = 500000
| bsd-3-clause | 4,928,746,334,550,370,000 | 55.967742 | 78 | 0.768403 | false |
SymbiFlow/edalize | edalize/xsim.py | 1 | 5680 | # Copyright edalize contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import os
import logging
from collections import OrderedDict
from edalize.edatool import Edatool
logger = logging.getLogger(__name__)
class Xsim(Edatool):
argtypes = ['plusarg', 'vlogdefine', 'vlogparam', 'generic']
MAKEFILE_TEMPLATE="""#Auto generated by Edalize
include config.mk
all: xsim.dir/$(TARGET)/xsimk
xsim.dir/$(TARGET)/xsimk:
xelab $(TOPLEVEL) -prj $(TARGET).prj -snapshot $(TARGET) $(VLOG_DEFINES) $(VLOG_INCLUDES) $(GEN_PARAMS) $(XELAB_OPTIONS)
run: xsim.dir/$(TARGET)/xsimk
xsim -R $(XSIM_OPTIONS) $(TARGET) $(EXTRA_OPTIONS)
run-gui: xsim.dir/$(TARGET)/xsimk
xsim --gui $(XSIM_OPTIONS) $(TARGET) $(EXTRA_OPTIONS)
"""
CONFIG_MK_TEMPLATE = """#Auto generated by Edalize
TARGET = {target}
TOPLEVEL = {toplevel}
VLOG_DEFINES = {vlog_defines}
VLOG_INCLUDES = {vlog_includes}
GEN_PARAMS = {gen_params}
XELAB_OPTIONS = {xelab_options}
XSIM_OPTIONS = {xsim_options}
"""
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
return {'description' : "XSim simulator from the Xilinx Vivado suite",
'members' : [
{'name' : 'compilation_mode',
'type' : 'String',
'desc' : 'Common or separate compilation, sep - for separate compilation, common - for common compilation'}],
'lists' : [
{'name' : 'xelab_options',
'type' : 'String',
'desc' : 'Additional options for compilation with xelab'},
{'name' : 'xsim_options',
'type' : 'String',
'desc' : 'Additional run options for XSim'},
]}
def configure_main(self):
self._write_config_files()
#Check if any VPI modules are present and display warning
if len(self.vpi_modules) > 0:
modules = [m['name'] for m in self.vpi_modules]
logger.error('VPI modules not supported by Xsim: %s' % ', '.join(modules))
def _write_config_files(self):
mfc = self.tool_options.get('compilation_mode') == 'common'
with open(os.path.join(self.work_root, self.name+'.prj'),'w') as f:
mfcu = []
(src_files, self.incdirs) = self._get_fileset_files()
for src_file in src_files:
cmd = ""
if src_file.file_type.startswith("verilogSource"):
cmd = 'verilog'
elif src_file.file_type == 'vhdlSource-2008':
cmd = 'vhdl2008'
elif src_file.file_type.startswith("vhdlSource"):
cmd = 'vhdl'
elif src_file.file_type.startswith("systemVerilogSource"):
if mfc:
mfcu.append(src_file.name)
else:
cmd = 'sv'
elif src_file.file_type in ["user"]:
pass
else:
_s = "{} has unknown file type '{}'"
logger.warning(_s.format(src_file.name, src_file.file_type))
if cmd:
if src_file.logical_name:
lib = src_file.logical_name
else:
lib = 'work'
f.write('{} {} {}\n'.format(cmd, lib, src_file.name))
if mfc:
f.write('sv work ' + ' '.join(mfcu))
with open(os.path.join(self.work_root, 'config.mk'), 'w') as f:
vlog_defines = ' '.join(['--define {}={}'.format(k,self._param_value_str(v)) for k,v, in self.vlogdefine.items()])
vlog_includes = ' '.join(['-i '+k for k in self.incdirs])
# Both parameters and generics use the same --generic_top argument
# so warn if there are overlapping values
common_vals = set(self.vlogparam).intersection(set(self.generic))
if common_vals != set():
_s = "Common values for vlogparam and generic: {}"
logger.warning(_s.format(common_vals))
gen_param = OrderedDict(self.vlogparam)
gen_param.update(self.generic)
gen_param_args = " ".join(
[
"--generic_top {}={}".format(k, self._param_value_str(v))
for k, v in gen_param.items()
]
)
xelab_options = ' '.join(self.tool_options.get('xelab_options', []))
xsim_options = ' '.join(self.tool_options.get('xsim_options' , []))
f.write(self.CONFIG_MK_TEMPLATE.format(target=self.name,
toplevel=self.toplevel,
vlog_defines = vlog_defines,
vlog_includes = vlog_includes,
gen_params = gen_param_args,
xelab_options = xelab_options,
xsim_options = xsim_options))
with open(os.path.join(self.work_root, 'Makefile'), 'w') as f:
f.write(self.MAKEFILE_TEMPLATE)
def run_main(self):
args = ['run']
# Plusargs
if self.plusarg:
_s = '--testplusarg {}={}'
args.append('EXTRA_OPTIONS='+' '.join([_s.format(k, v) for k,v in self.plusarg.items()]))
self._run_tool('make', args)
| bsd-2-clause | -5,065,238,013,934,852,000 | 39.283688 | 134 | 0.501232 | false |
shrids/kubernetes | build/json-extractor.py | 413 | 2111 | #!/usr/bin/env python
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a very simple utility that reads a JSON document from stdin, parses it
# and returns the specified value. The value is described using a simple dot
# notation. If any errors are encountered along the way, an error is output and
# a failure value is returned.
from __future__ import print_function
import json
import sys
def PrintError(*err):
print(*err, file=sys.stderr)
def main():
try:
obj = json.load(sys.stdin)
except Exception, e:
PrintError("Error loading JSON: {0}".format(str(e)))
if len(sys.argv) == 1:
# if we don't have a query string, return success
return 0
elif len(sys.argv) > 2:
PrintError("Usage: {0} <json query>".format(sys.args[0]))
return 1
query_list = sys.argv[1].split('.')
for q in query_list:
if isinstance(obj, dict):
if q not in obj:
PrintError("Couldn't find '{0}' in dict".format(q))
return 1
obj = obj[q]
elif isinstance(obj, list):
try:
index = int(q)
except:
PrintError("Can't use '{0}' to index into array".format(q))
return 1
if index >= len(obj):
PrintError("Index ({0}) is greater than length of list ({1})".format(q, len(obj)))
return 1
obj = obj[index]
else:
PrintError("Trying to query non-queryable object: {0}".format(q))
return 1
if isinstance(obj, str):
print(obj)
else:
print(json.dumps(obj, indent=2))
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | 1,339,634,785,440,145,400 | 29.157143 | 90 | 0.662719 | false |
piagarwal11/GDriveLinuxClient | src/watchdog-0.8.2/src/watchdog/tricks/__init__.py | 11 | 5177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import subprocess
import time
from watchdog.utils import echo, has_attribute
from watchdog.events import PatternMatchingEventHandler
class Trick(PatternMatchingEventHandler):
"""Your tricks should subclass this class."""
@classmethod
def generate_yaml(cls):
context = dict(module_name=cls.__module__,
klass_name=cls.__name__)
template_yaml = """- %(module_name)s.%(klass_name)s:
args:
- argument1
- argument2
kwargs:
patterns:
- "*.py"
- "*.js"
ignore_patterns:
- "version.py"
ignore_directories: false
"""
return template_yaml % context
class LoggerTrick(Trick):
"""A simple trick that does only logs events."""
def on_any_event(self, event):
pass
@echo.echo
def on_modified(self, event):
pass
@echo.echo
def on_deleted(self, event):
pass
@echo.echo
def on_created(self, event):
pass
@echo.echo
def on_moved(self, event):
pass
class ShellCommandTrick(Trick):
"""Executes shell commands in response to matched events."""
def __init__(self, shell_command=None, patterns=None, ignore_patterns=None,
ignore_directories=False, wait_for_process=False,
drop_during_process=False):
super(ShellCommandTrick, self).__init__(patterns, ignore_patterns,
ignore_directories)
self.shell_command = shell_command
self.wait_for_process = wait_for_process
self.drop_during_process = drop_during_process
self.process = None
def on_any_event(self, event):
from string import Template
if self.drop_during_process and self.process and self.process.poll() is None:
return
if event.is_directory:
object_type = 'directory'
else:
object_type = 'file'
context = {
'watch_src_path': event.src_path,
'watch_dest_path': '',
'watch_event_type': event.event_type,
'watch_object': object_type,
}
if self.shell_command is None:
if has_attribute(event, 'dest_path'):
context.update({'dest_path': event.dest_path})
command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"'
else:
command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"'
else:
if has_attribute(event, 'dest_path'):
context.update({'watch_dest_path': event.dest_path})
command = self.shell_command
command = Template(command).safe_substitute(**context)
self.process = subprocess.Popen(command, shell=True)
if self.wait_for_process:
self.process.wait()
class AutoRestartTrick(Trick):
"""Starts a long-running subprocess and restarts it on matched events.
The command parameter is a list of command arguments, such as
['bin/myserver', '-c', 'etc/myconfig.ini'].
Call start() after creating the Trick. Call stop() when stopping
the process.
"""
def __init__(self, command, patterns=None, ignore_patterns=None,
ignore_directories=False, stop_signal=signal.SIGINT,
kill_after=10):
super(AutoRestartTrick, self).__init__(
patterns, ignore_patterns, ignore_directories)
self.command = ['setsid'] + command
self.stop_signal = stop_signal
self.kill_after = kill_after
self.process = None
def start(self):
self.process = subprocess.Popen(self.command)
def stop(self):
if self.process is None:
return
try:
os.killpg(os.getpgid(self.process.pid), self.stop_signal)
except OSError:
# Process is already gone
pass
else:
kill_time = time.time() + self.kill_after
while time.time() < kill_time:
if self.process.poll() is not None:
break
time.sleep(0.25)
else:
try:
os.killpg(os.getpgid(self.process.pid), 9)
except OSError:
# Process is already gone
pass
self.process = None
@echo.echo
def on_any_event(self, event):
self.stop()
self.start()
| mit | -2,479,692,642,133,744,000 | 28.752874 | 115 | 0.594939 | false |
dogukantufekci/workplace_saas | workplace_saas/_apps/companies/models.py | 2 | 2644 | from django.db import models
from countries.models import Country
from people.models import Person
# Models -----------------------------------------
class Company(models.Model):
SOLE_TRADER = 1
CORPORATION = 2
TYPE_CHOICES = (
(SOLE_TRADER, 'Sole Trader'),
(CORPORATION, 'Corporation'),
)
name = models.CharField(
max_length=100,
)
type = models.PositiveSmallIntegerField(
choices=TYPE_CHOICES,
)
class SoleTrader(models.Model):
person = models.OneToOneField(
Person,
related_name='sole_trader',
)
class Corporation(models.Model):
registered_in = models.ForeignKey(
Country,
related_name='companies',
)
registration_number = models.CharField(
max_length=40,
)
class Meta:
unique_together = (('registered_in', 'registration_number'),)
class CompanyEmail(models.Model):
company = models.ForeignKey(
Company,
related_name='company_emails',
)
email = models.EmailField(
max_length=254,
)
class Meta:
unique_together = (('company', 'email',),)
class CompanyPhoneNumber(models.Model):
company = models.ForeignKey(
Company,
related_name='company_phone_numbers',
)
phone_number = models.ForeignKey(
PhoneNumber,
related_name='company_phone_numbers',
)
# Custom Models -----------------------------------------
class CustomCompany(models.Model):
name = models.CharField(
max_length=100,
)
type = models.PositiveSmallIntegerField(
choices=Company.TYPE_CHOICES,
)
class CustomSoleTrader(models.Model):
person = models.OneToOneField(
CustomPerson,
related_name='custom_sole_trader',
)
class CustomCorporation(models.Model):
registered_in = models.ForeignKey(
Country,
related_name='custom_corporations',
)
registration_number = models.CharField(
max_length=40,
)
class Meta:
unique_together = (('registered_in', 'registration_number'),)
class CustomCompanyEmail(models.Model):
custom_company = models.ForeignKey(
CustomCompany,
related_name='custom_company_emails',
)
email = models.EmailField(
max_length=254,
)
class Meta:
unique_together = (('company', 'email',),)
class CustomCompanyPhoneNumber(models.Model):
company = models.ForeignKey(
Company,
related_name='company_phone_numbers',
)
phone_number = models.ForeignKey(
PhoneNumber,
related_name='company_phone_numbers',
) | mit | 3,243,802,286,183,120,400 | 20.680328 | 69 | 0.607413 | false |
Comikris/Assignment_Interpreter | FileManagement/filehandler.py | 1 | 7873 | from FileManagement.interface_filehandler import *
# Brendan
import pickle
import os
import sys
import math
# kate
import re
from datetime import *
# Kris Little design
class FileHandler(IFileHandler):
def __init__(self):
self.valid = True
# Kris
def load_file(self, file):
# put error handling here
contents = []
try:
the_file = open(file, 'r')
except FileNotFoundError:
print("file does not exist.")
else:
for line in the_file:
line = tuple(line.replace('\n', "").split(','))
contents.append(line)
the_file.close()
return contents
# Kris
def write_file(self, file, data):
the_file = open(file, 'w')
string = ""
for l in data:
new_data = [l[0], l[1], l[2], l[3], l[4], l[5], l[6]]
for i in range(len(new_data)):
string += str(new_data[i])
# prevent a space at the end of a line
if i != len(new_data) - 1:
string += ','
string += "\n"
the_file.write(string)
the_file.close()
# validate input for date type
# KATE
def valid_date(self, birthday):
minyear = 1000
maxyear = date.today().year
mydate = birthday.split('-')
if len(mydate) == 3:
birthdate = mydate[0]
birthmonth = mydate[1]
birthyear = mydate[2]
print(birthyear)
if int(birthyear) > maxyear or int(birthyear) < minyear:
print(mydate)
birthdayobj = date(birthdate, birthmonth, birthyear)
return True
else:
print('Year is out of range')
# Validate date match year
# KATE
def valid_age(self, birthday):
today = date.today()
mydate = birthday
print(mydate)
try:
born = datetime.strptime(mydate, '%d%m%Y')
except ValueError:
pass
else:
age = today.year - born.year - ((today.month, today.day) < (born.month, born.day))
return age
# Validate file data
def validate(self, data):
""" TestCase for validate
>>> aFileHandler = FileHandler()
>>> aFileHandler.validate([("e01","m","20","20","Normal","200","12-06-1998")])
invalidate data: e01
invalidate data: m
invalidate data: 20
invalidate data: 20
invalidate data: Normal
invalidate data: 200
invalidate data: 12-06-1998
"""
add_to = []
feedback = ""
for person in data:
feedback += "Feedback for data at: " + str(data.index(person) + 1) + "\n"
self.valid = True
print(person)
# check the format is a letter and 3 digit e.g A002 or a002
if re.match(r'[a-z][0-9]{3}', (person[0]).lower()):
# Kris
if len(str(person[0])) >= 5:
self.valid = False
else:
# Kris
feedback += "ID is incorrect; must contain a letter and 3 digits e.g. a001.\n"
self.valid = False
# check the format is either M/F/Male/Female
if person[1].upper() == "M" or (person[1]).upper() == "F":
print(person[1])
else:
# Kris
feedback += "Incorect Gender; must be M or F.\n"
self.valid = False
# CHECK DATE, THEN CHECK AGE..
# Kris
date_correct = True
try:
datetime.strptime(person[6], "%d-%m-%Y")
except ValueError:
date_correct = False
feedback += "Date is not corrent format! " + str(person[6])
self.valid = False
if date_correct:
the_date = datetime.strptime(person[6], "%d-%m-%Y")
test_age = math.floor(((datetime.today() - the_date).days/365))
if test_age == int(person[2]):
pass
else:
self.valid = False
feedback += "Age and birthday does not match. " + str(test_age) + ":" + str(int(person[2]))
# check sales is 3 interger value
if re.match(r'[0-9]{3}', person[3]):
print(person[3])
else:
feedback += "Incorrect sales number; must be a 3 digit whole number. \n"
self.valid = False
# check BMI is either Normal / Overweight / Obesity or Underweight
if re.match(r'\b(NORMAL|OVERWEIGHT|OBESITY|UNDERWEIGHT)\b', (person[4]).upper()):
print(person[4])
else:
feedback += "Incorrect BMI value; Choose from Normal, Overweight, Obesity or Underweight. \n"
self.valid = False
# check Income is float
try:
if int(person[5]):
if len(str(int(person[5]))) > 3:
feedback += "Income is too large."
self.valid = False
else:
pass
else:
feedback += "Incorrect income; must be an integer number. \n" + str(person[5])
self.valid = False
except ValueError:
self.valid = False
if self.valid:
add_to.append(person)
feedback += "Passed and added to database.\n"
else:
feedback += '\n\n'
print(feedback)
return add_to
# Brendan Holt
# Used to pickle the loaded graphs to default pickle file
def pack_pickle(self, graphs):
# Raises exception if the default file does not exits and creates it should this exception be raised
try:
realfilepath = os.path.dirname(os.path.realpath(sys.argv[0])) + "\\files\\pickle.dat"
if not os.path.exists(realfilepath):
raise IOError
except IOError:
os.makedirs(os.path.dirname(realfilepath))
pass
# The pickle process
pickleout = open(realfilepath, "wb")
pickle.dump(graphs, pickleout)
pickleout.close()
# Brendan Holt
# Used to unpickle graphs in the pickle file and return them to the interpreters graph list
def unpack_pickle(self, filepath):
# Raises exception if for some reason the default file has been deleted
try:
if os.path.exists(filepath) is False:
raise IOError
except IOError:
print('File does not exits')
return
# The unpickle process
picklein = open(filepath, "rb")
graphs = pickle.load(picklein)
picklein.close()
# Return the graphs to the interpreter
return graphs
# Brendan Holt
# Used to pickle the entire database to default pickle file
def pickle_all(self, data):
# Raises exception if for some reason the default file has been deleted
try:
realfiledirectory = os.path.dirname(os.path.realpath(sys.argv[0])) + "\\files\\"
if os.path.exists(realfiledirectory) is False:
raise IOError
except IOError:
os.makedirs(os.path.dirname(realfiledirectory))
return
# The pickle process
pickleout = open(realfiledirectory + "\\db_backup.dat", "wb")
pickle.dump(data, pickleout)
pickleout.close()
| apache-2.0 | -2,066,208,822,728,837,600 | 32.682819 | 111 | 0.498666 | false |
rabipanda/tensorflow | tensorflow/contrib/py2tf/pyct/templates_test.py | 2 | 2067 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import compiler
from tensorflow.contrib.py2tf.pyct import templates
from tensorflow.python.platform import test
class TemplatesTest(test.TestCase):
def test_replace_variable(self):
template = """
def test_fn(a):
a += 1
a = 2 * a + 1
return b
"""
node = templates.replace(template, a='b')[0]
result = compiler.ast_to_object(node)
self.assertEquals(7, result.test_fn(2))
def test_replace_function_name(self):
template = """
def fname(a):
a += 1
a = 2 * a + 1
return a
"""
node = templates.replace(template, fname='test_fn')[0]
result = compiler.ast_to_object(node)
self.assertEquals(7, result.test_fn(2))
def test_code_block(self):
template = """
def test_fn(a):
block
return a
"""
node = templates.replace(
template,
block=[
gast.Assign([
gast.Name('a', None, None)
], gast.BinOp(gast.Name('a', None, None), gast.Add(), gast.Num(1))),
] * 2)[0]
result = compiler.ast_to_object(node)
self.assertEquals(3, result.test_fn(1))
if __name__ == '__main__':
test.main()
| apache-2.0 | 4,031,910,180,996,787,000 | 27.315068 | 80 | 0.616836 | false |
afrantzis/pixel-format-guide | tests/pfgtest.py | 1 | 3257 | # Copyright © 2017 Collabora Ltd.
#
# This file is part of pfg.
#
# pfg is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option)
# any later version.
#
# pfg is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pfg. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alexandros Frantzis <[email protected]>
import unittest
import pfg
from pfg import util
class TestCase(unittest.TestCase):
def assertFormatMatches(self, format_str, data_type, native, memory_le, memory_be):
fd = pfg.describe(format_str)
self.assertEqual(data_type, fd.data_type)
self.assertEqual(native, fd.native)
self.assertEqual(memory_le, fd.memory_le)
self.assertEqual(memory_be, fd.memory_be)
def assertFormatMatchesUnorm(self, format_str, native, memory_le, memory_be):
self.assertFormatMatches(format_str, "UNORM", native, memory_le, memory_be)
def assertFormatIsUnknown(self, format_str):
self.assertIsNone(pfg.describe(format_str))
def assertFindCompatibleMatches(self, format_str, family_str,
everywhere, little_endian, big_endian,
treat_x_as_a=False,
treat_srgb_as_unorm=False,
ignore_data_types=False):
compatibility = pfg.find_compatible(
format_str, family_str,
treat_x_as_a=treat_x_as_a,
treat_srgb_as_unorm=treat_srgb_as_unorm,
ignore_data_types=ignore_data_types)
# assertCountEqual checks for the existence of items regardless
# of order (and has a misleading name...)
self.assertCountEqual(everywhere, compatibility.everywhere)
self.assertCountEqual(little_endian, compatibility.little_endian)
self.assertCountEqual(big_endian, compatibility.big_endian)
def assertHasDocumentationFor(self, family):
documentation = pfg.document(family)
self.assertEqual(util.read_documentation(family + ".md"), documentation)
def R(m,l): return util.component_bits("R", m, l)
def G(m,l): return util.component_bits("G", m, l)
def B(m,l): return util.component_bits("B", m, l)
def A(m,l): return util.component_bits("A", m, l)
def X(m,l): return util.component_bits("X", m, l)
def Y(m,l): return util.component_bits("Y", m, l)
def U(m,l): return util.component_bits("U", m, l)
def V(m,l): return util.component_bits("V", m, l)
def C(m,l): return util.component_bits("C", m, l)
def Rn(n,m,l): return util.component_bits("(R+%d)" % n, m, l)
def Gn(n,m,l): return util.component_bits("(G+%d)" % n, m, l)
def Bn(n,m,l): return util.component_bits("(B+%d)" % n, m, l)
def An(n,m,l): return util.component_bits("(A+%d)" % n, m, l)
def Cn(n,m,l): return util.component_bits("(C+%d)" % n, m, l)
| lgpl-2.1 | -2,459,193,073,194,229,000 | 43.60274 | 87 | 0.663391 | false |
ypwalter/fxos-certsuite | mcts/webapi_tests/telephony/test_telephony_outgoing.py | 6 | 2891 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from mcts.webapi_tests.semiauto import TestCase
from mcts.webapi_tests.telephony import TelephonyTestCommon
class TestTelephonyOutgoing(TestCase, TelephonyTestCommon):
"""
This is a test for the `WebTelephony API`_ which will:
- Disable the default gaia dialer, so that the test app can handle calls
- Ask the test user to specify a destination phone number for the test call
- Setup mozTelephonyCall event listeners for the outgoing call
- Use the API to initiate the outgoing call
- Ask the test user to answer the call on the destination phone
- Keep the call active for 5 seconds, then hang up the call via the API
- Verify that the corresponding mozTelephonyCall events were triggered
- Re-enable the default gaia dialer
.. _`WebTelephony API`: https://developer.mozilla.org/en-US/docs/Web/Guide/API/Telephony
"""
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
TelephonyTestCommon.__init__(self)
def setUp(self):
self.addCleanup(self.clean_up)
super(TestTelephonyOutgoing, self).setUp()
self.wait_for_obj("window.navigator.mozTelephony")
# disable the default dialer manager so it doesn't grab our calls
self.disable_dialer()
def test_telephony_outgoing(self):
# use the webapi to make an outgoing call to user-specified number
self.user_guided_outgoing_call()
# verify one outgoing call
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 1, "There should be 1 call")
self.assertEqual(self.calls['0'], self.outgoing_call)
# have user answer the call on target
self.answer_call(incoming=False)
# keep call active for a while
time.sleep(5)
# verify the active call
self.assertEqual(self.active_call_list[0]['number'], self.outgoing_call['number'])
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 1, "There should be 1 active call")
self.assertEqual(self.active_call_list[0]['state'], "connected", "Call state should be 'connected'")
# disconnect the active call
self.hangup_call()
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 0, "There should be 0 calls")
def clean_up(self):
# re-enable the default dialer manager
self.enable_dialer()
self.active_call_list = []
| mpl-2.0 | -3,349,398,181,666,251,300 | 42.80303 | 108 | 0.687997 | false |
Batterfii/django | django/contrib/gis/maps/google/overlays.py | 151 | 11955 | from __future__ import unicode_literals
from functools import total_ordering
from django.contrib.gis.geos import (
LinearRing, LineString, Point, Polygon, fromstr,
)
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import html_safe
@html_safe
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#event
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return '"%s", %s' % (self.event, self.action)
@html_safe
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return '%s(%s)' % (self.__class__.__name__, self.js_params)
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Polyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
https://developers.google.com/maps/documentation/javascript/reference#Icon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
https://developers.google.com/maps/documentation/javascript/reference#Marker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
| bsd-3-clause | 128,455,695,766,739,630 | 35.227273 | 119 | 0.623087 | false |
EricCline/CEM_inc | env/lib/python2.7/site-packages/south/tests/logic.py | 127 | 33513 | from south.tests import unittest
import datetime
import sys
try:
set # builtin, python >=2.6
except NameError:
from sets import Set as set # in stdlib, python >=2.3
from south import exceptions
from south.migration import migrate_app
from south.migration.base import all_migrations, Migrations
from south.creator.changes import ManualChanges
from south.migration.utils import depends, flatten, get_app_label
from south.models import MigrationHistory
from south.tests import Monkeypatcher
from south.db import db
class TestBrokenMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp", "brokenapp"]
def test_broken_dependencies(self):
self.assertRaises(
exceptions.DependsOnUnmigratedApplication,
Migrations.calculate_dependencies,
force=True,
)
#depends_on_unknown = self.brokenapp['0002_depends_on_unknown']
#self.assertRaises(exceptions.DependsOnUnknownMigration,
# depends_on_unknown.dependencies)
#depends_on_higher = self.brokenapp['0003_depends_on_higher']
#self.assertRaises(exceptions.DependsOnHigherMigration,
# depends_on_higher.dependencies)
class TestMigration(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigration, self).setUp()
self.fakeapp = Migrations('fakeapp')
self.otherfakeapp = Migrations('otherfakeapp')
Migrations.calculate_dependencies(force=True)
def test_str(self):
migrations = [str(m) for m in self.fakeapp]
self.assertEqual(['fakeapp:0001_spam',
'fakeapp:0002_eggs',
'fakeapp:0003_alter_spam'],
migrations)
def test_repr(self):
migrations = [repr(m) for m in self.fakeapp]
self.assertEqual(['<Migration: fakeapp:0001_spam>',
'<Migration: fakeapp:0002_eggs>',
'<Migration: fakeapp:0003_alter_spam>'],
migrations)
def test_app_label(self):
self.assertEqual(['fakeapp', 'fakeapp', 'fakeapp'],
[m.app_label() for m in self.fakeapp])
def test_name(self):
self.assertEqual(['0001_spam', '0002_eggs', '0003_alter_spam'],
[m.name() for m in self.fakeapp])
def test_full_name(self):
self.assertEqual(['fakeapp.migrations.0001_spam',
'fakeapp.migrations.0002_eggs',
'fakeapp.migrations.0003_alter_spam'],
[m.full_name() for m in self.fakeapp])
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
M3 = __import__("fakeapp.migrations.0003_alter_spam", {}, {}, ['Migration']).Migration
self.assertEqual([M1, M2, M3],
[m.migration().Migration for m in self.fakeapp])
self.assertRaises(exceptions.UnknownMigration,
self.fakeapp['9999_unknown'].migration)
def test_previous(self):
self.assertEqual([None,
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']],
[m.previous() for m in self.fakeapp])
def test_dependencies(self):
"Test that the dependency detection works."
self.assertEqual([
set([]),
set([self.fakeapp['0001_spam']]),
set([self.fakeapp['0002_eggs']])
],
[m.dependencies for m in self.fakeapp],
)
self.assertEqual([
set([self.fakeapp['0001_spam']]),
set([self.otherfakeapp['0001_first']]),
set([
self.otherfakeapp['0002_second'],
self.fakeapp['0003_alter_spam'],
])
],
[m.dependencies for m in self.otherfakeapp],
)
def test_forwards_plan(self):
self.assertEqual([
[self.fakeapp['0001_spam']],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs']
],
[
self.fakeapp['0001_spam'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
]
],
[m.forwards_plan() for m in self.fakeapp],
)
self.assertEqual([
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second']
],
[
self.fakeapp['0001_spam'],
self.otherfakeapp['0001_first'],
self.otherfakeapp['0002_second'],
self.fakeapp['0002_eggs'],
self.fakeapp['0003_alter_spam'],
self.otherfakeapp['0003_third'],
]
],
[m.forwards_plan() for m in self.otherfakeapp],
)
def test_is_before(self):
F1 = self.fakeapp['0001_spam']
F2 = self.fakeapp['0002_eggs']
F3 = self.fakeapp['0003_alter_spam']
O1 = self.otherfakeapp['0001_first']
O2 = self.otherfakeapp['0002_second']
O3 = self.otherfakeapp['0003_third']
self.assertTrue(F1.is_before(F2))
self.assertTrue(F1.is_before(F3))
self.assertTrue(F2.is_before(F3))
self.assertEqual(O3.is_before(O1), False)
self.assertEqual(O3.is_before(O2), False)
self.assertEqual(O2.is_before(O2), False)
self.assertEqual(O2.is_before(O1), False)
self.assertEqual(F2.is_before(O1), None)
self.assertEqual(F2.is_before(O2), None)
self.assertEqual(F2.is_before(O3), None)
class TestMigrationDependencies(Monkeypatcher):
installed_apps = ['deps_a', 'deps_b', 'deps_c']
def setUp(self):
super(TestMigrationDependencies, self).setUp()
self.deps_a = Migrations('deps_a')
self.deps_b = Migrations('deps_b')
self.deps_c = Migrations('deps_c')
Migrations.calculate_dependencies(force=True)
def test_dependencies(self):
self.assertEqual(
[
set([]),
set([self.deps_a['0001_a']]),
set([self.deps_a['0002_a']]),
set([
self.deps_a['0003_a'],
self.deps_b['0003_b'],
]),
set([self.deps_a['0004_a']]),
],
[m.dependencies for m in self.deps_a],
)
self.assertEqual(
[
set([]),
set([
self.deps_b['0001_b'],
self.deps_a['0002_a']
]),
set([
self.deps_b['0002_b'],
self.deps_a['0003_a']
]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b']]),
],
[m.dependencies for m in self.deps_b],
)
self.assertEqual(
[
set([]),
set([self.deps_c['0001_c']]),
set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([
self.deps_c['0004_c'],
self.deps_a['0002_a']
]),
],
[m.dependencies for m in self.deps_c],
)
def test_dependents(self):
self.assertEqual([set([self.deps_a['0002_a']]),
set([self.deps_c['0005_c'],
self.deps_b['0002_b'],
self.deps_a['0003_a']]),
set([self.deps_b['0003_b'],
self.deps_a['0004_a']]),
set([self.deps_a['0005_a']]),
set([])],
[m.dependents for m in self.deps_a])
self.assertEqual([set([self.deps_b['0002_b']]),
set([self.deps_b['0003_b']]),
set([self.deps_b['0004_b'],
self.deps_a['0004_a']]),
set([self.deps_b['0005_b']]),
set([])],
[m.dependents for m in self.deps_b])
self.assertEqual([set([self.deps_c['0002_c']]),
set([self.deps_c['0003_c']]),
set([self.deps_c['0004_c']]),
set([self.deps_c['0005_c']]),
set([])],
[m.dependents for m in self.deps_c])
def test_forwards_plan(self):
self.assertEqual([[self.deps_a['0001_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a']],
[self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_a['0003_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_a['0004_a'],
self.deps_a['0005_a']]],
[m.forwards_plan() for m in self.deps_a])
self.assertEqual([[self.deps_b['0001_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b']],
[self.deps_b['0001_b'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_b['0003_b'],
self.deps_b['0004_b'],
self.deps_b['0005_b']]],
[m.forwards_plan() for m in self.deps_b])
self.assertEqual([[self.deps_c['0001_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c']],
[self.deps_c['0001_c'],
self.deps_c['0002_c'],
self.deps_c['0003_c'],
self.deps_c['0004_c'],
self.deps_a['0001_a'],
self.deps_a['0002_a'],
self.deps_c['0005_c']]],
[m.forwards_plan() for m in self.deps_c])
def test_backwards_plan(self):
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
self.deps_a['0001_a'],
],
[
self.deps_c['0005_c'],
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_a['0003_a'],
self.deps_a['0002_a'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_a['0003_a'],
],
[
self.deps_a['0005_a'],
self.deps_a['0004_a'],
],
[
self.deps_a['0005_a'],
]
], [m.backwards_plan() for m in self.deps_a])
self.assertEqual([
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
self.deps_b['0001_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
self.deps_b['0002_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
self.deps_a['0005_a'],
self.deps_a['0004_a'],
self.deps_b['0003_b'],
],
[
self.deps_b['0005_b'],
self.deps_b['0004_b'],
],
[
self.deps_b['0005_b'],
],
], [m.backwards_plan() for m in self.deps_b])
self.assertEqual([
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
self.deps_c['0001_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
self.deps_c['0002_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
self.deps_c['0003_c'],
],
[
self.deps_c['0005_c'],
self.deps_c['0004_c'],
],
[self.deps_c['0005_c']]
], [m.backwards_plan() for m in self.deps_c])
class TestCircularDependencies(Monkeypatcher):
installed_apps = ["circular_a", "circular_b"]
def test_plans(self):
Migrations.calculate_dependencies(force=True)
circular_a = Migrations('circular_a')
circular_b = Migrations('circular_b')
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].forwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_a[-1].backwards_plan,
)
self.assertRaises(
exceptions.CircularDependency,
circular_b[-1].backwards_plan,
)
class TestMigrations(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_all(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
M2 = Migrations(__import__("otherfakeapp", {}, {}, ['']))
self.assertEqual(
[M1, M2],
list(all_migrations()),
)
def test(self):
M1 = Migrations(__import__("fakeapp", {}, {}, ['']))
self.assertEqual(M1, Migrations("fakeapp"))
self.assertEqual(M1, Migrations(self.create_fake_app("fakeapp")))
def test_application(self):
fakeapp = Migrations("fakeapp")
application = __import__("fakeapp", {}, {}, [''])
self.assertEqual(application, fakeapp.application)
def test_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration['0001_spam'].migration().Migration)
self.assertEqual(M2, migration['0002_eggs'].migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration['0001_jam'].migration)
def test_guess_migration(self):
# Can't use vanilla import, modules beginning with numbers aren't in grammar
M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration
migration = Migrations('fakeapp')
self.assertEqual(M1, migration.guess_migration("0001_spam").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_spa").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_sp").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_s").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001_").migration().Migration)
self.assertEqual(M1, migration.guess_migration("0001").migration().Migration)
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001-spam")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "000")
self.assertRaises(exceptions.MultiplePrefixMatches,
migration.guess_migration, "")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_spams")
self.assertRaises(exceptions.UnknownMigration,
migration.guess_migration, "0001_jam")
def test_app_label(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual(names,
[Migrations(n).app_label() for n in names])
def test_full_name(self):
names = ['fakeapp', 'otherfakeapp']
self.assertEqual([n + '.migrations' for n in names],
[Migrations(n).full_name() for n in names])
class TestMigrationLogic(Monkeypatcher):
"""
Tests if the various logic functions in migration actually work.
"""
installed_apps = ["fakeapp", "otherfakeapp"]
def setUp(self):
super(TestMigrationLogic, self).setUp()
MigrationHistory.objects.all().delete()
def assertListEqual(self, list1, list2, msg=None):
list1 = set(list1)
list2 = set(list2)
return self.assert_(list1 == list2, "%s is not equal to %s" % (list1, list2))
def test_find_ghost_migrations(self):
pass
def test_apply_migrations(self):
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Apply them normally
migrate_app(migrations, target_name=None, fake=False,
load_initial_data=True)
# We should finish with all migrations
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_migration_merge_forwards(self):
migrations = Migrations("fakeapp")
# We should start with no migrations
self.assertEqual(list(MigrationHistory.objects.all()), [])
# Insert one in the wrong order
MigrationHistory.objects.create(app_name = "fakeapp",
migration = "0002_eggs",
applied = datetime.datetime.now())
# Did it go in?
self.assertListEqual(
(("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply them normally
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name=None, fake=False)
self.assertRaises(exceptions.InconsistentMigrationHistory,
migrate_app,
migrations, target_name='zero', fake=False)
try:
migrate_app(migrations, target_name=None, fake=False)
except exceptions.InconsistentMigrationHistory as e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
try:
migrate_app(migrations, target_name="zero", fake=False)
except exceptions.InconsistentMigrationHistory as e:
self.assertEqual(
[
(
migrations['0002_eggs'],
migrations['0001_spam'],
)
],
e.problems,
)
# Nothing should have changed (no merge mode!)
self.assertListEqual(
(("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Apply with merge
migrate_app(migrations, target_name=None, merge=True, fake=False)
# We should finish with all migrations
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# Now roll them backwards
migrate_app(migrations, target_name="0002", fake=False)
migrate_app(migrations, target_name="0001", fake=True)
migrate_app(migrations, target_name="zero", fake=False)
# Finish with none
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_alter_column_null(self):
def null_ok(eat_exception=True):
from django.db import connection, transaction
# the DBAPI introspection module fails on postgres NULLs.
cursor = connection.cursor()
# SQLite has weird now()
if db.backend_name == "sqlite3":
now_func = "DATETIME('NOW')"
# So does SQLServer... should we be using a backend attribute?
elif db.backend_name == "pyodbc":
now_func = "GETDATE()"
elif db.backend_name == "oracle":
now_func = "SYSDATE"
else:
now_func = "NOW()"
try:
if db.backend_name == "pyodbc":
cursor.execute("SET IDENTITY_INSERT southtest_spam ON;")
cursor.execute("INSERT INTO southtest_spam (id, weight, expires, name) VALUES (100, NULL, %s, 'whatever');" % now_func)
except:
if eat_exception:
transaction.rollback()
return False
else:
raise
else:
cursor.execute("DELETE FROM southtest_spam")
transaction.commit()
return True
MigrationHistory.objects.all().delete()
migrations = Migrations("fakeapp")
# by default name is NOT NULL
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok())
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# after 0003, it should be NULL
migrate_app(migrations, target_name="0003", fake=False)
self.assert_(null_ok(False))
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),
("fakeapp", "0003_alter_spam"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# make sure it is NOT NULL again
migrate_app(migrations, target_name="0002", fake=False)
self.failIf(null_ok(), 'weight not null after migration')
self.assertListEqual(
(("fakeapp", "0001_spam"),
("fakeapp", "0002_eggs"),),
MigrationHistory.objects.values_list("app_name", "migration"),
)
# finish with no migrations, otherwise other tests fail...
migrate_app(migrations, target_name="zero", fake=False)
self.assertEqual(list(MigrationHistory.objects.all()), [])
def test_dependencies(self):
fakeapp = Migrations("fakeapp")
otherfakeapp = Migrations("otherfakeapp")
# Test a simple path
self.assertEqual([fakeapp['0001_spam'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam']],
fakeapp['0003_alter_spam'].forwards_plan())
# And a complex one.
self.assertEqual(
[
fakeapp['0001_spam'],
otherfakeapp['0001_first'],
otherfakeapp['0002_second'],
fakeapp['0002_eggs'],
fakeapp['0003_alter_spam'],
otherfakeapp['0003_third']
],
otherfakeapp['0003_third'].forwards_plan(),
)
class TestMigrationUtils(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_get_app_label(self):
self.assertEqual(
"southtest",
get_app_label(self.create_fake_app("southtest.models")),
)
self.assertEqual(
"baz",
get_app_label(self.create_fake_app("foo.bar.baz.models")),
)
class TestUtils(unittest.TestCase):
def test_flatten(self):
self.assertEqual([], list(flatten(iter([]))))
self.assertEqual([], list(flatten(iter([iter([]), ]))))
self.assertEqual([1], list(flatten(iter([1]))))
self.assertEqual([1, 2], list(flatten(iter([1, 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1]), 2]))))
self.assertEqual([1, 2], list(flatten(iter([iter([1, 2])]))))
self.assertEqual([1, 2, 3], list(flatten(iter([iter([1, 2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten(iter([iter([1]), iter([2]), 3]))))
self.assertEqual([1, 2, 3],
list(flatten([[1], [2], 3])))
def test_depends(self):
graph = {'A1': []}
self.assertEqual(['A1'],
depends('A1', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1']}
self.assertEqual(['A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B1'],
'B1': []}
self.assertEqual(
['B1', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1']}
self.assertEqual(
['B1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1', 'B1'],
'A3': ['A2'],
'B1': ['A1']}
self.assertEqual(['A1', 'B1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]))
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A1', 'B2'],
'B1': [],
'B2': ['B1', 'C1'],
'C1': ['B1']}
self.assertEqual(
['B1', 'C1', 'B2', 'A1', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2', 'A1', 'C1'],
'B1': ['A1'],
'B2': ['B1', 'C2', 'A1'],
'C1': ['B1'],
'C2': ['C1', 'A1'],
'C3': ['C2']}
self.assertEqual(
['A1', 'B1', 'C1', 'C2', 'B2', 'A2', 'A3'],
depends('A3', lambda n: graph[n]),
)
def assertCircularDependency(self, trace, target, graph):
"Custom assertion that checks a circular dependency is detected correctly."
self.assertRaises(
exceptions.CircularDependency,
depends,
target,
lambda n: graph[n],
)
try:
depends(target, lambda n: graph[n])
except exceptions.CircularDependency as e:
self.assertEqual(trace, e.trace)
def test_depends_cycle(self):
graph = {'A1': ['A1']}
self.assertCircularDependency(
['A1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'A2'],
'A3': ['A2']}
self.assertCircularDependency(
['A2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'A3'],
'A4': ['A3']}
self.assertCircularDependency(
['A3', 'A3'],
'A4',
graph,
)
graph = {'A1': ['B1'],
'B1': ['A1']}
self.assertCircularDependency(
['A1', 'B1', 'A1'],
'A1',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B2'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['A2', 'B2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1', 'B3'],
'A3': ['A2'],
'B1': [],
'B2': ['B1', 'A2'],
'B3': ['B2']}
self.assertCircularDependency(
['A2', 'B3', 'B2', 'A2'],
'A3',
graph,
)
graph = {'A1': [],
'A2': ['A1'],
'A3': ['A2', 'B2'],
'A4': ['A3'],
'B1': ['A3'],
'B2': ['B1']}
self.assertCircularDependency(
['A3', 'B2', 'B1', 'A3'],
'A4',
graph,
)
class TestManualChanges(Monkeypatcher):
installed_apps = ["fakeapp", "otherfakeapp"]
def test_suggest_name(self):
migrations = Migrations('fakeapp')
change = ManualChanges(migrations,
[],
['fakeapp.slug'],
[])
self.assertEquals(change.suggest_name(),
'add_field_fakeapp_slug')
change = ManualChanges(migrations,
[],
[],
['fakeapp.slug'])
self.assertEquals(change.suggest_name(),
'add_index_fakeapp_slug')
| mit | -6,485,172,978,432,499,000 | 36.154102 | 135 | 0.449885 | false |
Coder-Yu/RecQ | algorithm/ranking/BPR.py | 2 | 5528 | #coding:utf8
from baseclass.IterativeRecommender import IterativeRecommender
from random import choice
from tool.qmath import sigmoid
from math import log
from collections import defaultdict
#import tensorflow as tf
class BPR(IterativeRecommender):
# BPR:Bayesian Personalized Ranking from Implicit Feedback
# Steffen Rendle,Christoph Freudenthaler,Zeno Gantner and Lars Schmidt-Thieme
def __init__(self,conf,trainingSet=None,testSet=None,fold='[1]'):
super(BPR, self).__init__(conf,trainingSet,testSet,fold)
# def readConfiguration(self):
# super(BPR, self).readConfiguration()
def initModel(self):
super(BPR, self).initModel()
def buildModel(self):
print 'Preparing item sets...'
self.PositiveSet = defaultdict(dict)
#self.NegativeSet = defaultdict(list)
for user in self.data.user:
for item in self.data.trainSet_u[user]:
if self.data.trainSet_u[user][item] >= 1:
self.PositiveSet[user][item] = 1
# else:
# self.NegativeSet[user].append(item)
print 'training...'
iteration = 0
itemList = self.data.item.keys()
while iteration < self.maxIter:
self.loss = 0
for user in self.PositiveSet:
u = self.data.user[user]
for item in self.PositiveSet[user]:
i = self.data.item[item]
item_j = choice(itemList)
while (self.PositiveSet[user].has_key(item_j)):
item_j = choice(itemList)
j = self.data.item[item_j]
self.optimization(u,i,j)
self.loss += self.regU * (self.P * self.P).sum() + self.regI * (self.Q * self.Q).sum()
iteration += 1
if self.isConverged(iteration):
break
def optimization(self,u,i,j):
s = sigmoid(self.P[u].dot(self.Q[i]) - self.P[u].dot(self.Q[j]))
self.P[u] += self.lRate * (1 - s) * (self.Q[i] - self.Q[j])
self.Q[i] += self.lRate * (1 - s) * self.P[u]
self.Q[j] -= self.lRate * (1 - s) * self.P[u]
self.P[u] -= self.lRate * self.regU * self.P[u]
self.Q[i] -= self.lRate * self.regI * self.Q[i]
self.Q[j] -= self.lRate * self.regI * self.Q[j]
self.loss += -log(s)
def predict(self,user,item):
if self.data.containsUser(user) and self.data.containsItem(item):
u = self.data.getUserId(user)
i = self.data.getItemId(item)
predictRating = sigmoid(self.Q[i].dot(self.P[u]))
return predictRating
else:
return sigmoid(self.data.globalMean)
def next_batch(self):
batch_id=0
while batch_id<self.train_size:
if batch_id+self.batch_size<=self.train_size:
users = [self.data.trainingData[idx][0] for idx in range(batch_id,self.batch_size+batch_id)]
items = [self.data.trainingData[idx][1] for idx in range(batch_id,self.batch_size+batch_id)]
batch_id+=self.batch_size
else:
users = [self.data.trainingData[idx][0] for idx in range(batch_id, self.train_size)]
items = [self.data.trainingData[idx][1] for idx in range(batch_id, self.train_size)]
batch_id=self.train_size
u_idx,i_idx,j_idx = [],[],[]
item_list = self.data.item.keys()
for i,user in enumerate(users):
i_idx.append(self.data.item[items[i]])
u_idx.append(self.data.user[user])
neg_item = choice(item_list)
while neg_item in self.data.trainSet_u[user]:
neg_item = choice(item_list)
j_idx.append(self.data.item[neg_item])
yield u_idx,i_idx,j_idx
def buildModel_tf(self):
super(BPR, self).buildModel_tf()
self.neg_idx = tf.placeholder(tf.int32, name="neg_holder")
self.neg_item_embedding = tf.nn.embedding_lookup(self.V, self.neg_idx)
y = tf.reduce_sum(tf.multiply(self.user_embedding,self.item_embedding),1)\
-tf.reduce_sum(tf.multiply(self.user_embedding,self.neg_item_embedding),1)
loss = -tf.reduce_sum(tf.log(tf.sigmoid(y))) + self.regU * (tf.nn.l2_loss(self.user_embedding) +
tf.nn.l2_loss(self.item_embedding) +
tf.nn.l2_loss(self.neg_item_embedding))
opt = tf.train.AdamOptimizer(self.lRate)
train = opt.minimize(loss)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for iteration in range(self.maxIter):
for n,batch in enumerate(self.next_batch()):
user_idx, i_idx, j_idx = batch
_, l = sess.run([train, loss], feed_dict={self.u_idx: user_idx, self.neg_idx: j_idx,self.v_idx: i_idx})
print 'training:', iteration + 1, 'batch', n, 'loss:', l
self.P,self.Q = sess.run([self.U,self.V])
def predictForRanking(self, u):
'invoked to rank all the items for the user'
if self.data.containsUser(u):
u = self.data.getUserId(u)
return self.Q.dot(self.P[u])
else:
return [self.data.globalMean] * self.num_items
| gpl-3.0 | 562,972,796,094,163,300 | 39.632353 | 123 | 0.55067 | false |
yatinkumbhare/openstack-nova | tools/regression_tester.py | 101 | 3538 | #!/usr/bin/env python
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tool for checking if patch contains a regression test.
By default runs against current patch but can be set to use any gerrit review
as specified by change number (uses 'git review -d').
Idea: take tests from patch to check, and run against code from previous patch.
If new tests pass, then no regression test, if new tests fails against old code
then either
* new tests depend on new code and cannot confirm regression test is valid
(false positive)
* new tests detects the bug being fixed (detect valid regression test)
Due to the risk of false positives, the results from this need some human
interpretation.
"""
from __future__ import print_function
import optparse
import string
import subprocess
import sys
def run(cmd, fail_ok=False):
print("running: %s" % cmd)
obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
obj.wait()
if obj.returncode != 0 and not fail_ok:
print("The above command terminated with an error.")
sys.exit(obj.returncode)
return obj.stdout.read()
def main():
usage = """
Tool for checking if a patch includes a regression test.
Usage: %prog [options]"""
parser = optparse.OptionParser(usage)
parser.add_option("-r", "--review", dest="review",
help="gerrit review number to test")
(options, args) = parser.parse_args()
if options.review:
original_branch = run("git rev-parse --abbrev-ref HEAD")
run("git review -d %s" % options.review)
else:
print ("no gerrit review number specified, running on latest commit"
"on current branch.")
test_works = False
# run new tests with old code
run("git checkout HEAD^ nova")
run("git checkout HEAD nova/tests")
# identify which tests have changed
tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" "
"| cut -f2").split()
test_list = []
for test in tests:
test_list.append(string.replace(test[0:-3], '/', '.'))
if test_list == []:
test_works = False
expect_failure = ""
else:
# run new tests, expect them to fail
expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
fail_ok=True)
if "FAILED (id=" in expect_failure:
test_works = True
# cleanup
run("git checkout HEAD nova")
if options.review:
new_branch = run("git status | head -1 | cut -d ' ' -f 4")
run("git checkout %s" % original_branch)
run("git branch -D %s" % new_branch)
print(expect_failure)
print("")
print("*******************************")
if test_works:
print("FOUND a regression test")
else:
print("NO regression test")
sys.exit(1)
if __name__ == "__main__":
main()
| apache-2.0 | -7,639,716,123,157,085,000 | 31.458716 | 79 | 0.633974 | false |
uni-peter-zheng/tp-qemu | qemu/tests/openflow_acl_test.py | 2 | 18485 | import logging
import re
import os
from autotest.client.shared import error
from autotest.client.shared import utils
from virttest import utils_net, utils_test, utils_misc
from virttest import aexpect
from virttest import remote
from virttest import data_dir
@error.context_aware
def run(test, params, env):
"""
Test Step:
1. Boot up guest using the openvswitch bridge
2. Setup related service in test enviroment(http, ftp etc.)(optional)
3. Access the service in guest
4. Setup access control rules in ovs to disable the access
5. Access the service in guest
6. Setup access control rules in ovs to enable the access
7. Access the service in guest
8. Delete the access control rules in ovs
9. Access the service in guest
Params:
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def access_service(access_sys, access_targets, disabled, host_ip,
ref=False):
err_msg = ""
err_type = ""
for asys in access_sys:
for atgt in access_targets:
logging.debug("Try to access target %s from %s" % (atgt, asys))
access_params = access_sys[asys]
atgt_disabled = access_params['disabled_%s' % atgt]
if asys in vms_tags:
vm = env.get_vm(asys)
session = vm.wait_for_login(timeout=timeout)
run_func = session.cmd
remote_src = vm
ssh_src_ip = vm.get_address()
else:
run_func = utils.system_output
remote_src = "localhost"
ssh_src_ip = host_ip
if atgt in vms_tags:
vm = env.get_vm(atgt)
access_re_sub_string = vm.wait_for_get_address(0)
else:
access_re_sub_string = host_ip
access_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
access_params['access_cmd'])
ref_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
access_params['ref_cmd'])
if access_cmd in ["ssh", "telnet"]:
if atgt in vms_tags:
target_vm = env.get_vm(atgt)
target_ip = target_vm.get_address()
else:
target_vm = "localhost"
target_ip = host_ip
out = ""
out_err = ""
try:
out = remote_login(access_cmd, target_ip,
remote_src, params, host_ip)
stat = 0
except remote.LoginError, err:
stat = 1
out_err = "Failed to login %s " % atgt
out_err += "from %s, err: %s" % (asys, err.output)
try:
out += remote_login(access_cmd, ssh_src_ip,
target_vm, params, host_ip)
except remote.LoginError, err:
stat += 1
out_err += "Failed to login %s " % asys
out_err += "from %s, err: %s" % (atgt, err.output)
if out_err:
out = out_err
else:
try:
out = run_func(access_cmd, timeout=op_timeout)
stat = 0
check_string = access_params.get("check_from_output")
if check_string and check_string in out:
stat = 1
except (aexpect.ShellCmdError, error.CmdError,
aexpect.ShellTimeoutError), err:
if isinstance(err, error.CmdError):
out = err.result_obj.stderr
stat = err.result_obj.exit_status
else:
out = err.output
if isinstance(err, aexpect.ShellTimeoutError):
stat = 1
session.close()
session = vm.wait_for_login(timeout=timeout)
run_func = session.cmd
else:
stat = err.status
if access_params.get("clean_cmd"):
try:
run_func(access_params['clean_cmd'])
except Exception:
pass
if disabled and atgt_disabled and stat == 0:
err_msg += "Still can access %s after" % atgt
err_msg += " disable it from ovs. "
err_msg += "Command: %s. " % access_cmd
err_msg += "Output: %s" % out
if disabled and atgt_disabled and stat != 0:
logging.debug("Can not access target as expect.")
if not disabled and stat != 0:
if ref:
err_msg += "Can not access %s at the" % atgt
err_msg += " beginning. Please check your setup."
err_type = "ref"
else:
err_msg += "Still can not access %s" % atgt
err_msg += " after enable the access"
err_msg += "Command: %s. " % access_cmd
err_msg += "Output: %s" % out
if err_msg:
session.close()
if err_type == "ref":
raise error.TestNAError(err_msg)
raise error.TestFail(err_msg)
if not ref_cmd:
session.close()
return
try:
out = run_func(ref_cmd, timeout=op_timeout)
stat = 0
except (aexpect.ShellCmdError, error.CmdError,
aexpect.ShellTimeoutError), err:
if isinstance(err, error.CmdError):
out = err.result_obj.stderr
stat = err.result_obj.exit_status
else:
out = err.output
if isinstance(err, aexpect.ShellTimeoutError):
stat = 1
else:
stat = err.status
if stat != 0:
if ref:
err_msg += "Refernce command failed at beginning."
err_type = "ref"
else:
err_msg += "Refernce command failed after setup"
err_msg += " the rules"
err_msg += "Command: %s. " % ref_cmd
err_msg += "Output: %s" % out
if err_msg:
session.close()
if err_type == "ref":
raise error.TestNAError(err_msg)
raise error.TestFail(err_msg)
session.close()
def get_acl_cmd(protocol, in_port, action, extra_options):
acl_cmd = protocol.strip()
acl_cmd += ",in_port=%s" % in_port.strip()
if extra_options.strip():
acl_cmd += ",%s" % ",".join(extra_options.strip().split())
if action.strip():
acl_cmd += ",action=%s" % action.strip()
return acl_cmd
def acl_rules_check(acl_rules, acl_setup_cmd):
acl_setup_cmd = re.sub("action=", "actions=", acl_setup_cmd)
acl_option = re.split(",", acl_setup_cmd)
for line in acl_rules.splitlines():
rule = [_.lower() for _ in re.split("[ ,]", line) if _]
item_in_rule = 0
for acl_item in acl_option:
if acl_item.lower() in rule:
item_in_rule += 1
if item_in_rule == len(acl_option):
return True
return False
def remote_login(client, host, src, params_login, host_ip):
src_name = src
if src != "localhost":
src_name = src.name
logging.info("Login %s from %s" % (host, src))
port = params_login["target_port"]
username = params_login["username"]
password = params_login["password"]
prompt = params_login["shell_prompt"]
linesep = eval("'%s'" % params_login.get("shell_linesep", r"\n"))
quit_cmd = params.get("quit_cmd", "exit")
if host == host_ip:
# Try to login from guest to host.
prompt = "^\[.*\][\#\$]\s*$"
linesep = "\n"
username = params_login["host_username"]
password = params_login["host_password"]
quit_cmd = "exit"
if client == "ssh":
# We only support ssh for Linux in this test
cmd = ("ssh -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -p %s %s@%s" %
(port, username, host))
elif client == "telnet":
cmd = "telnet -l %s %s %s" % (username, host, port)
else:
raise remote.LoginBadClientError(client)
if src == "localhost":
logging.debug("Login with command %s" % cmd)
session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
else:
if params_login.get("os_type") == "windows":
if client == "telnet":
cmd = "C:\\telnet.py %s %s " % (host, username)
cmd += "%s \"%s\" && " % (password, prompt)
cmd += "C:\\wait_for_quit.py"
cmd = "%s || ping 127.0.0.1 -n 5 -w 1000 > nul" % cmd
else:
cmd += " || sleep 5"
session = src.wait_for_login()
logging.debug("Sending login command: %s" % cmd)
session.sendline(cmd)
try:
out = remote.handle_prompts(session, username, password,
prompt, timeout, debug=True)
except Exception, err:
session.close()
raise err
try:
session.cmd(quit_cmd)
session.close()
except Exception:
pass
return out
def setup_service(setup_target):
setup_timeout = int(params.get("setup_timeout", 360))
if setup_target == "localhost":
setup_func = utils.system_output
os_type = "linux"
else:
setup_vm = env.get_vm(setup_target)
setup_session = setup_vm.wait_for_login(timeout=timeout)
setup_func = setup_session.cmd
os_type = params["os_type"]
setup_params = params.object_params(os_type)
setup_cmd = setup_params.get("setup_cmd", "service SERVICE restart")
prepare_cmd = setup_params.get("prepare_cmd")
setup_cmd = re.sub("SERVICE", setup_params.get("service", ""),
setup_cmd)
error.context("Set up %s service in %s" % (setup_params.get("service"),
setup_target),
logging.info)
if prepare_cmd:
setup_func(prepare_cmd, timeout=setup_timeout)
setup_func(setup_cmd, timeout=setup_timeout)
if setup_target != "localhost":
setup_session.close()
def stop_service(setup_target):
setup_timeout = int(params.get("setup_timeout", 360))
if setup_target == "localhost":
setup_func = utils.system_output
os_type = "linux"
else:
setup_vm = env.get_vm(setup_target)
setup_session = setup_vm.wait_for_login(timeout=timeout)
setup_func = setup_session.cmd
os_type = params["os_type"]
setup_params = params.object_params(os_type)
stop_cmd = setup_params.get("stop_cmd", "service SERVICE stop")
cleanup_cmd = setup_params.get("cleanup_cmd")
stop_cmd = re.sub("SERVICE", setup_params.get("service", ""),
stop_cmd)
error.context("Stop %s service in %s" % (setup_params.get("service"),
setup_target),
logging.info)
if stop_cmd:
setup_func(stop_cmd, timeout=setup_timeout)
if cleanup_cmd:
setup_func(cleanup_cmd, timeout=setup_timeout)
if setup_target != "localhost":
setup_session.close()
timeout = int(params.get("login_timeout", '360'))
op_timeout = int(params.get("op_timeout", "360"))
acl_protocol = params['acl_protocol']
acl_extra_options = params.get("acl_extra_options", "")
for vm in env.get_all_vms():
session = vm.wait_for_login(timeout=timeout)
if params.get("disable_iptables") == "yes":
session.cmd("iptables -F")
#session.cmd_status_output("service iptables stop")
if params.get("copy_scripts"):
root_dir = data_dir.get_root_dir()
script_dir = os.path.join(root_dir, "shared", "scripts")
tmp_dir = params.get("tmp_dir", "C:\\")
for script in params.get("copy_scripts").split():
script_path = os.path.join(script_dir, script)
vm.copy_files_to(script_path, tmp_dir)
session.close()
vms_tags = params.objects("vms")
br_name = params.get("netdst")
if br_name == "private":
br_name = params.get("priv_brname", 'autotest-prbr0')
for setup_target in params.get("setup_targets", "").split():
setup_service(setup_target)
access_targets = params.get("access_targets", "localhost").split()
deny_target = params.get("deny_target", "localhost")
all_target = params.get("extra_target", "").split() + vms_tags
target_port = params["target_port"]
vm = env.get_vm(vms_tags[0])
nic = vm.virtnet[0]
if_name = nic.ifname
params_nic = params.object_params("nic1")
if params["netdst"] == "private":
params_nic["netdst"] = params_nic.get("priv_brname", "atbr0")
host_ip = utils_net.get_host_ip_address(params_nic)
if deny_target in vms_tags:
deny_vm = env.get_vm(deny_target)
deny_vm_ip = deny_vm.wait_for_get_address(0)
elif deny_target == "localhost":
deny_vm_ip = host_ip
if "NW_DST" in acl_extra_options:
acl_extra_options = re.sub("NW_DST", deny_vm_ip, acl_extra_options)
acl_extra_options = re.sub("TARGET_PORT", target_port, acl_extra_options)
access_sys = {}
for target in all_target:
if target not in access_targets:
if target in vms_tags:
os_type = params["os_type"]
else:
os_type = "linux"
os_params = params.object_params(os_type)
access_param = os_params.object_params(target)
check_from_output = access_param.get("check_from_output")
access_sys[target] = {}
access_sys[target]['access_cmd'] = access_param['access_cmd']
access_sys[target]['ref_cmd'] = access_param.get('ref_cmd', "")
access_sys[target]['clean_cmd'] = access_param.get('clean_guest',
"")
if check_from_output:
access_sys[target]['check_from_output'] = check_from_output
for tgt in access_targets:
tgt_param = access_param.object_params(tgt)
acl_disabled = tgt_param.get("acl_disabled") == "yes"
access_sys[target]['disabled_%s' % tgt] = acl_disabled
error.context("Try to access target before setup the rules", logging.info)
access_service(access_sys, access_targets, False, host_ip, ref=True)
error.context("Disable the access in ovs", logging.info)
br_infos = utils_net.openflow_manager(br_name, "show").stdout
if_port = re.findall("(\d+)\(%s\)" % if_name, br_infos)
if not if_port:
raise error.TestNAError("Can not find %s in bridge %s" % (if_name,
br_name))
if_port = if_port[0]
acl_cmd = get_acl_cmd(acl_protocol, if_port, "drop", acl_extra_options)
utils_net.openflow_manager(br_name, "add-flow", acl_cmd)
acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
if not acl_rules_check(acl_rules, acl_cmd):
raise error.TestFail("Can not find the rules from"
" ovs-ofctl: %s" % acl_rules)
error.context("Try to acess target to exam the disable rules",
logging.info)
access_service(access_sys, access_targets, True, host_ip)
error.context("Enable the access in ovs", logging.info)
acl_cmd = get_acl_cmd(acl_protocol, if_port, "normal", acl_extra_options)
utils_net.openflow_manager(br_name, "mod-flows", acl_cmd)
acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
if not acl_rules_check(acl_rules, acl_cmd):
raise error.TestFail("Can not find the rules from"
" ovs-ofctl: %s" % acl_rules)
error.context("Try to acess target to exam the enable rules",
logging.info)
access_service(access_sys, access_targets, False, host_ip)
error.context("Delete the access rules in ovs", logging.info)
acl_cmd = get_acl_cmd(acl_protocol, if_port, "", acl_extra_options)
utils_net.openflow_manager(br_name, "del-flows", acl_cmd)
acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
if acl_rules_check(acl_rules, acl_cmd):
raise error.TestFail("Still can find the rules from"
" ovs-ofctl: %s" % acl_rules)
error.context("Try to acess target to exam after delete the rules",
logging.info)
access_service(access_sys, access_targets, False, host_ip)
for setup_target in params.get("setup_targets", "").split():
stop_service(setup_target)
| gpl-2.0 | -4,274,354,512,471,735,300 | 42.803318 | 79 | 0.501758 | false |
HLFH/CouchPotatoServer | libs/suds/builder.py | 197 | 4220 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{builder} module provides an wsdl/xsd defined types factory
"""
from logging import getLogger
from suds import *
from suds.sudsobject import Factory
log = getLogger(__name__)
class Builder:
""" Builder used to construct an object for types defined in the schema """
def __init__(self, resolver):
"""
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
"""
self.resolver = resolver
def build(self, name):
""" build a an object for the specified typename as defined in the schema """
if isinstance(name, basestring):
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
else:
type = name
cls = type.name
if type.mixed():
data = Factory.property(cls)
else:
data = Factory.object(cls)
resolved = type.resolve()
md = data.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
history = []
self.add_attributes(data, resolved)
for child, ancestry in type.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
return data
def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.unbounded():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
def add_attributes(self, data, type):
""" add required attributes """
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value)
def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False
def ordering(self, type):
""" get the ordering """
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
| gpl-3.0 | 6,701,571,807,839,068,000 | 33.884298 | 85 | 0.565877 | false |
ashutosh-mishra/youtube-dl | youtube_dl/extractor/jeuxvideo.py | 1 | 1985 | # coding: utf-8
import json
import re
import xml.etree.ElementTree
from .common import InfoExtractor
class JeuxVideoIE(InfoExtractor):
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm'
_TEST = {
u'url': u'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
u'file': u'5182.mp4',
u'md5': u'046e491afb32a8aaac1f44dd4ddd54ee',
u'info_dict': {
u'title': u'GC 2013 : Tearaway nous présente ses papiers d\'identité',
u'description': u'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
xml_link = self._html_search_regex(
r'<param name="flashvars" value="config=(.*?)" />',
webpage, u'config URL')
video_id = self._search_regex(
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
xml_link, u'video ID')
xml_config = self._download_webpage(
xml_link, title, u'Downloading XML config')
config = xml.etree.ElementTree.fromstring(xml_config.encode('utf-8'))
info_json = self._search_regex(
r'(?sm)<format\.json>(.*?)</format\.json>',
xml_config, u'JSON information')
info = json.loads(info_json)['versions'][0]
video_url = 'http://video720.jeuxvideo.com/' + info['file']
return {
'id': video_id,
'title': config.find('titre_video').text,
'ext': 'mp4',
'url': video_url,
'description': self._og_search_description(webpage),
'thumbnail': config.find('image').text,
}
| unlicense | -5,161,699,879,710,836,000 | 37.076923 | 174 | 0.580808 | false |
getnikola/plugins | v7/mistune/mistune.py | 1 | 3824 | # -*- coding: utf-8 -*-
# Copyright © 2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on Mistune."""
from __future__ import unicode_literals
import codecs
import os
import re
try:
import mistune
except ImportError:
mistune = None # NOQA
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # NOQA
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, write_metadata
class CompileMistune(PageCompiler):
"""Compile Markdown into HTML using Mistune."""
name = "mistune"
demote_headers = True
def __init__(self, *args, **kwargs):
super(CompileMistune, self).__init__(*args, **kwargs)
if mistune is not None:
self.parser = mistune.Markdown()
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
if mistune is None:
req_missing(['mistune'], 'build this site (compile with mistune)')
makedirs(os.path.dirname(dest))
with codecs.open(dest, "w+", "utf8") as out_file:
with codecs.open(source, "r", "utf8") as in_file:
data = in_file.read()
if not is_two_file:
data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1]
output = self.parser(data)
output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))
out_file.write(output)
if post is None:
if shortcode_deps:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += shortcode_deps
def compile_html(self, source, dest, is_two_file=True):
"""Compile the post into HTML (deprecated API)."""
try:
post = self.site.post_per_input_file[source]
except KeyError:
post = None
return compile(source, dest, is_two_file, post, None)
def create_post(self, path, **kw):
content = kw.pop('content', 'Write your post here.')
onefile = kw.pop('onefile', False)
kw.pop('is_page', False)
metadata = OrderedDict()
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with codecs.open(path, "wb+", "utf8") as fd:
if onefile:
fd.write('<!-- \n')
fd.write(write_metadata(metadata))
fd.write('-->\n\n')
fd.write(content)
| mit | -6,196,318,237,792,976,000 | 35.759615 | 143 | 0.639289 | false |
RNAer/qiita | qiita_db/reference.py | 2 | 6342 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from os.path import join
from .base import QiitaObject
from .exceptions import QiitaDBDuplicateError
from .util import (insert_filepaths, convert_to_id,
get_mountpoint)
from .sql_connection import SQLConnectionHandler
class Reference(QiitaObject):
r"""Object to interact with reference sequence databases
Attributes
----------
sequence_fp
taxonomy_fp
tree_fp
Methods
-------
create
exists
See Also
--------
QiitaObject
"""
_table = "reference"
@classmethod
def create(cls, name, version, seqs_fp, tax_fp=None, tree_fp=None):
r"""Creates a new reference object with a new id on the storage system
Parameters
----------
name : str
The name of the reference database
version : str
The version of the reference database
seqs_fp : str
The path to the reference sequence file
tax_fp : str, optional
The path to the reference taxonomy file
tree_fp : str, optional
The path to the reference tree file
Returns
-------
A new instance of `cls` to access to the Reference stored in the DB
Raises
------
QiitaDBDuplicateError
If the reference database with name `name` and version `version`
already exists on the system
"""
if cls.exists(name, version):
raise QiitaDBDuplicateError("Reference",
"Name: %s, Version: %s"
% (name, version))
conn_handler = SQLConnectionHandler()
seq_id = insert_filepaths([(seqs_fp, convert_to_id("reference_seqs",
"filepath_type",
conn_handler))],
"%s_%s" % (name, version), "reference",
"filepath", conn_handler)[0]
# Check if the database has taxonomy file
tax_id = None
if tax_fp:
fps = [(tax_fp, convert_to_id("reference_tax", "filepath_type",
conn_handler))]
tax_id = insert_filepaths(fps, "%s_%s" % (name, version),
"reference", "filepath", conn_handler)[0]
# Check if the database has tree file
tree_id = None
if tree_fp:
fps = [(tree_fp, convert_to_id("reference_tree", "filepath_type",
conn_handler))]
tree_id = insert_filepaths(fps, "%s_%s" % (name, version),
"reference", "filepath",
conn_handler)[0]
# Insert the actual object to the db
ref_id = conn_handler.execute_fetchone(
"INSERT INTO qiita.{0} (reference_name, reference_version, "
"sequence_filepath, taxonomy_filepath, tree_filepath) VALUES "
"(%s, %s, %s, %s, %s) RETURNING reference_id".format(cls._table),
(name, version, seq_id, tax_id, tree_id))[0]
return cls(ref_id)
@classmethod
def exists(cls, name, version):
r"""Checks if a given object info is already present on the DB
Parameters
----------
name : str
The name of the reference database
version : str
The version of the reference database
Raises
------
QiitaDBNotImplementedError
If the method is not overwritten by a subclass
"""
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.{0} WHERE "
"reference_name=%s AND reference_version=%s)".format(cls._table),
(name, version))[0]
@property
def name(self):
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT reference_name FROM qiita.{0} WHERE "
"reference_id = %s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
@property
def version(self):
conn_handler = SQLConnectionHandler()
return conn_handler.execute_fetchone(
"SELECT reference_version FROM qiita.{0} WHERE "
"reference_id = %s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
@property
def sequence_fp(self):
conn_handler = SQLConnectionHandler()
rel_path = conn_handler.execute_fetchone(
"SELECT f.filepath FROM qiita.filepath f JOIN qiita.{0} r ON "
"r.sequence_filepath=f.filepath_id WHERE "
"r.reference_id=%s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
return join(basefp, rel_path)
@property
def taxonomy_fp(self):
conn_handler = SQLConnectionHandler()
rel_path = conn_handler.execute_fetchone(
"SELECT f.filepath FROM qiita.filepath f JOIN qiita.{0} r ON "
"r.taxonomy_filepath=f.filepath_id WHERE "
"r.reference_id=%s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
return join(basefp, rel_path)
@property
def tree_fp(self):
conn_handler = SQLConnectionHandler()
rel_path = conn_handler.execute_fetchone(
"SELECT f.filepath FROM qiita.filepath f JOIN qiita.{0} r ON "
"r.tree_filepath=f.filepath_id WHERE "
"r.reference_id=%s".format(self._table), (self._id,))[0]
_, basefp = get_mountpoint('reference', conn_handler=conn_handler)[0]
return join(basefp, rel_path)
| bsd-3-clause | -7,243,398,489,543,433,000 | 36.305882 | 79 | 0.541312 | false |
Curso-OpenShift/Formulario | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/conf/locale/nb/formats.py | 504 | 1766 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-3.0 | -2,061,388,936,181,356,000 | 41.047619 | 81 | 0.524915 | false |
AthinaB/synnefo | snf-astakos-app/astakos/api/user.py | 4 | 11741 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import wraps, partial
from django.views.decorators.csrf import csrf_exempt
from django import http
from astakos.im import transaction
from django.utils import simplejson as json
from django.forms.models import model_to_dict
from django.core.validators import validate_email, ValidationError
from snf_django.lib import api
from snf_django.lib.api import faults
from .util import (
get_uuid_displayname_catalogs as get_uuid_displayname_catalogs_util,
send_feedback as send_feedback_util,
user_from_token)
from astakos.im import settings
from astakos.admin import stats
from astakos.im.models import AstakosUser, get_latest_terms
from astakos.im.auth import make_local_user
from astakos.im import activation_backends
ADMIN_GROUPS = settings.ADMIN_API_PERMITTED_GROUPS
activation_backend = activation_backends.get_backend()
logger = logging.getLogger(__name__)
@csrf_exempt
@api.api_method(http_method="POST", token_required=True, user_required=False,
logger=logger)
@user_from_token # Authenticate user!!
def get_uuid_displayname_catalogs(request):
# Normal Response Codes: 200
# Error Response Codes: internalServerError (500)
# badRequest (400)
# unauthorised (401)
return get_uuid_displayname_catalogs_util(request)
@csrf_exempt
@api.api_method(http_method="POST", token_required=True, user_required=False,
logger=logger)
@user_from_token # Authenticate user!!
def send_feedback(request, email_template_name='im/feedback_mail.txt'):
# Normal Response Codes: 200
# Error Response Codes: internalServerError (500)
# badRequest (400)
# unauthorised (401)
return send_feedback_util(request, email_template_name)
# API ADMIN UTILS AND ENDPOINTS
def user_api_method(http_method):
"""
Common decorator for user admin api views.
"""
def wrapper(func):
@api.api_method(http_method=http_method, user_required=True,
token_required=True, logger=logger,
serializations=['json'])
@api.user_in_groups(permitted_groups=ADMIN_GROUPS,
logger=logger)
@wraps(func)
def method(*args, **kwargs):
return func(*args, **kwargs)
return method
return wrapper
def user_to_dict(user, detail=True):
user_fields = ['first_name', 'last_name', 'email']
date_fields = ['date_joined', 'moderated_at', 'verified_at',
'auth_token_expires']
status_fields = ['is_active', 'is_rejected', 'deactivated_reason',
'accepted_policy', 'rejected_reason']
if not detail:
fields = user_fields
date_fields = []
d = model_to_dict(user, fields=user_fields + status_fields)
d['id'] = user.uuid
for date_field in date_fields:
val = getattr(user, date_field)
if val:
d[date_field] = api.utils.isoformat(getattr(user, date_field))
else:
d[date_field] = None
methods = d['authentication_methods'] = []
d['roles'] = list(user.groups.values_list("name", flat=True))
for provider in user.auth_providers.filter():
method_fields = ['identifier', 'active', 'affiliation']
method = model_to_dict(provider, fields=method_fields)
method['backend'] = provider.auth_backend
method['metadata'] = provider.info
if provider.auth_backend == 'astakos':
method['identifier'] = user.email
methods.append(method)
return d
def users_demux(request):
if request.method == 'GET':
return users_list(request)
elif request.method == 'POST':
return users_create(request)
else:
return api.api_method_not_allowed(request)
def user_demux(request, user_id):
if request.method == 'GET':
return user_detail(request, user_id)
elif request.method == 'PUT':
return user_update(request, user_id)
else:
return api.api_method_not_allowed(request)
@user_api_method('GET')
def users_list(request, action='list', detail=False):
logger.debug('users_list detail=%s', detail)
users = AstakosUser.objects.filter()
dict_func = partial(user_to_dict, detail=detail)
users_dicts = map(dict_func, users)
data = json.dumps({'users': users_dicts})
return http.HttpResponse(data, status=200,
content_type='application/json')
@user_api_method('POST')
@transaction.commit_on_success
def users_create(request):
user_id = request.user_uniq
req = api.utils.get_json_body(request)
logger.info('users_create: %s request: %s', user_id, req)
user_data = req.get('user', {})
email = user_data.get('username', None)
first_name = user_data.get('first_name', None)
last_name = user_data.get('last_name', None)
affiliation = user_data.get('affiliation', None)
password = user_data.get('password', None)
metadata = user_data.get('metadata', {})
password_gen = AstakosUser.objects.make_random_password
if not password:
password = password_gen()
try:
validate_email(email)
except ValidationError:
raise faults.BadRequest("Invalid username (email format required)")
if AstakosUser.objects.verified_user_exists(email):
raise faults.Conflict("User '%s' already exists" % email)
if not first_name:
raise faults.BadRequest("Invalid first_name")
if not last_name:
raise faults.BadRequest("Invalid last_name")
has_signed_terms = not(get_latest_terms())
try:
user = make_local_user(email, first_name=first_name,
last_name=last_name, password=password,
has_signed_terms=has_signed_terms)
if metadata:
# we expect a unique local auth provider for the user
provider = user.auth_providers.get()
provider.info = metadata
provider.affiliation = affiliation
provider.save()
user = AstakosUser.objects.get(pk=user.pk)
code = user.verification_code
ver_res = activation_backend.handle_verification(user, code)
if ver_res.is_error():
raise Exception(ver_res.message)
mod_res = activation_backend.handle_moderation(user, accept=True)
if mod_res.is_error():
raise Exception(ver_res.message)
except Exception, e:
raise faults.BadRequest(e.message)
user_data = {
'id': user.uuid,
'password': password,
'auth_token': user.auth_token,
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200, content_type='application/json')
@user_api_method('POST')
@transaction.commit_on_success
def user_action(request, user_id):
admin_id = request.user_uniq
req = api.utils.get_json_body(request)
logger.info('user_action: %s user: %s request: %s', admin_id, user_id, req)
if 'activate' in req:
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
activation_backend.activate_user(user)
user = AstakosUser.objects.get(uuid=user_id)
user_data = {
'id': user.uuid,
'is_active': user.is_active
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200,
content_type='application/json')
if 'deactivate' in req:
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
activation_backend.deactivate_user(
user, reason=req['deactivate'].get('reason', None))
user = AstakosUser.objects.get(uuid=user_id)
user_data = {
'id': user.uuid,
'is_active': user.is_active
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200,
content_type='application/json')
if 'renewToken' in req:
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
user.renew_token()
user.save()
user_data = {
'id': user.uuid,
'auth_token': user.auth_token,
}
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200,
content_type='application/json')
raise faults.BadRequest("Invalid action")
@user_api_method('PUT')
@transaction.commit_on_success
def user_update(request, user_id):
admin_id = request.user_uniq
req = api.utils.get_json_body(request)
logger.info('user_update: %s user: %s request: %s', admin_id, user_id, req)
user_data = req.get('user', {})
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
email = user_data.get('username', None)
first_name = user_data.get('first_name', None)
last_name = user_data.get('last_name', None)
affiliation = user_data.get('affiliation', None)
password = user_data.get('password', None)
metadata = user_data.get('metadata', {})
if 'password' in user_data:
user.set_password(password)
if 'username' in user_data:
try:
validate_email(email)
except ValidationError:
raise faults.BadRequest("Invalid username (email format required)")
if AstakosUser.objects.verified_user_exists(email):
raise faults.Conflict("User '%s' already exists" % email)
user.email = email
if 'first_name' in user_data:
user.first_name = first_name
if 'last_name' in user_data:
user.last_name = last_name
try:
user.save()
if 'metadata' in user_data:
provider = user.auth_providers.get(auth_backend="astakos")
provider.info = metadata
if affiliation in user_data:
provider.affiliation = affiliation
provider.save()
except Exception, e:
raise faults.BadRequest(e.message)
data = json.dumps({'user': user_to_dict(user)})
return http.HttpResponse(data, status=200, content_type='application/json')
@user_api_method('GET')
def user_detail(request, user_id):
admin_id = request.user_uniq
logger.info('user_detail: %s user: %s', admin_id, user_id)
try:
user = AstakosUser.objects.get(uuid=user_id)
except AstakosUser.DoesNotExist:
raise faults.ItemNotFound("User not found")
user_data = user_to_dict(user, detail=True)
data = json.dumps({'user': user_data})
return http.HttpResponse(data, status=200, content_type='application/json')
| gpl-3.0 | 4,507,309,923,037,638,000 | 32.641834 | 79 | 0.633507 | false |
adw0rd/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/transactions/tests_25.py | 51 | 5306 | from __future__ import with_statement
from django.db import connection, transaction, IntegrityError
from django.test import TransactionTestCase, skipUnlessDBFeature
from models import Reporter
class TransactionContextManagerTests(TransactionTestCase):
def create_reporter_and_fail(self):
Reporter.objects.create(first_name="Bob", last_name="Holtzman")
raise Exception
@skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
"""
The default behavior is to autocommit after each save() action.
"""
with self.assertRaises(Exception):
self.create_reporter_and_fail()
# The object created before the exception still exists
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager(self):
"""
The autocommit context manager works exactly the same as the default
behavior.
"""
with self.assertRaises(Exception):
with transaction.autocommit():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager_with_using(self):
"""
The autocommit context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.autocommit(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
"""
With the commit_on_success context manager, the transaction is only
committed if the block doesn't throw an exception.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
"""
The commit_on_success context manager also works with a using argument.
"""
with self.assertRaises(Exception):
with transaction.commit_on_success(using="default"):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
"""
If there aren't any exceptions, the data will get saved.
"""
Reporter.objects.create(first_name="Alice", last_name="Smith")
with transaction.commit_on_success():
Reporter.objects.filter(first_name="Alice").delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
@skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_exit(self):
with transaction.autocommit():
with transaction.commit_on_success():
Reporter.objects.create(first_name="Bobby", last_name="Tables")
# Much more formal
r = Reporter.objects.get()
r.first_name = "Robert"
r.save()
r = Reporter.objects.get()
self.assertEqual(r.first_name, "Robert")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
"""
You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.
"""
with transaction.commit_manually():
Reporter.objects.create(first_name="Libby", last_name="Holtzman")
transaction.commit()
self.assertEqual(Reporter.objects.count(), 1)
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
"""
If you forget, you'll get bad errors.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually():
Reporter.objects.create(first_name="Scott", last_name="Browning")
@skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
"""
The commit_manually function also works with a using argument.
"""
with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually(using="default"):
Reporter.objects.create(first_name="Walter", last_name="Cronkite")
@skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
"""
Regression for #11900: If a block wrapped by commit_on_success
writes a transaction that can't be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.
"""
with self.assertRaises(IntegrityError):
with transaction.commit_on_success():
cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
transaction.set_dirty()
transaction.rollback()
| gpl-3.0 | -4,988,310,388,361,887,000 | 37.729927 | 120 | 0.647569 | false |
wpoa/wiki-imports | lib/python2.7/site-packages/mwparserfromhell/nodes/extras/__init__.py | 1 | 1389 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Ben Kurtovic <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This package contains objects used by
:py:class:`~.Node`\ s, but are not nodes themselves. This includes the
parameters of Templates or the attributes of HTML tags.
"""
from .attribute import Attribute
from .parameter import Parameter
| gpl-3.0 | 6,203,557,384,413,909,000 | 45.3 | 79 | 0.769618 | false |
botswana-harvard/edc-pharma | edc_pharmacy/old/dispense/prescription_creator.py | 1 | 3055 | from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from ..medications import medications
from ..models import Prescription, MedicationDefinition
class PrescriptionCreator:
"""Creates all prescription records after completing patient history model.
"""
def __init__(self, appointment=None, selected=None, medication_name=None,
options=None):
self.medication_name = medication_name
self.appointment = appointment
self.selected = selected
self.options = options
self.save_or_update()
def create_all(self):
"""Create prescription record per medication and update the next
refill datetime.
"""
if self.selected:
medication_definition = medications.get(self.medication_name)
self.create_history(medication_definition=medication_definition)
else:
for medication_definition in self.appointment.profile_medications:
self.create_prescription(
medication_definition=medication_definition)
self.appointment.update_next_dispense_datetime()
def medication(self, medication_definition=None):
try:
medication_obj = MedicationDefinition.objects.get(
name=medication_definition.name)
except ObjectDoesNotExist:
medication_obj = MedicationDefinition.objects.create(
name=medication_definition.name,
unit=medication_definition.unit,
category=medication_definition.category,
description=medication_definition.description,
single_dose=medication_definition.single_dose,
use_body_weight=medication_definition.use_body_weight,
milligram=medication_definition.milligram,
strength=medication_definition.strength)
return medication_obj
def create_prescription(self, medication_definition=None):
medication_obj = self.medication(
medication_definition=medication_definition)
model_obj = self.prescription(
medication_definition=medication_obj)
model_obj.save()
def save_or_update(self):
self.create_all()
self.appointment.save()
def prescription(self, medication_definition=None):
try:
prescription = Prescription.objects.get(
appointment=self.appointment,
medication_definition=medication_definition)
except Prescription.DoesNotExist:
prescription = Prescription.objects.create(
appointment=self.appointment,
dispense_datetime=datetime.today(),
medication_definition=medication_definition,
subject_identifier=self.appointment.subject_identifier,
medication_description=medication_definition.description,
category=medication_definition.category,
** self.options)
return prescription
| gpl-2.0 | -8,991,040,122,975,133,000 | 39.733333 | 79 | 0.654337 | false |
alabid/pycrunchbase | tests/test_node.py | 3 | 1070 | from unittest import TestCase
import json
from pycrunchbase.resource.node import Node
from pycrunchbase.resource.utils import parse_date
class TestNode(Node):
KNOWN_PROPERTIES = ['property1', 'property2']
def _coerce_values(self):
# intentionally coerce bad values for test purposes
attr = 'property1'
if getattr(self, attr, None):
setattr(self, attr, parse_date(getattr(self, attr)))
data = {
"type": "TestNode",
"uuid": "uuid",
'properties': {
'property1': 'one',
'property2': 'two'
},
'relationships': {
'unknown': {
'paging': {},
'items': {}
}
},
}
class NodeTestCase(TestCase):
def test_node_creation_from_dict(self):
node = TestNode(data)
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
def test_node_creation_from_string(self):
node = TestNode(json.dumps(data))
self.assertEqual(node.property1, 'one')
self.assertEqual(node.property2, 'two')
| mit | 7,323,904,174,145,346,000 | 23.883721 | 64 | 0.603738 | false |
b0ttl3z/SickRage | lib/synchronousdeluge/transfer.py | 114 | 1346 | import zlib
import struct
import socket
import ssl
from synchronousdeluge import rencode
__all__ = ["DelugeTransfer"]
class DelugeTransfer(object):
def __init__(self):
self.sock = None
self.conn = None
self.connected = False
def connect(self, hostport):
if self.connected:
self.disconnect()
self.sock = socket.create_connection(hostport)
self.conn = ssl.wrap_socket(self.sock, None, None, False, ssl.CERT_NONE, ssl.PROTOCOL_TLSv1)
self.connected = True
def disconnect(self):
if self.conn:
self.conn.close()
self.connected = False
def send_request(self, request):
data = (request.format(),)
payload = zlib.compress(rencode.dumps(data))
self.conn.sendall(payload)
buf = b""
while True:
data = self.conn.recv(1024)
if not data:
self.connected = False
break
buf += data
dobj = zlib.decompressobj()
try:
message = rencode.loads(dobj.decompress(buf))
except (ValueError, zlib.error, struct.error):
# Probably incomplete data, read more
continue
else:
buf = dobj.unused_data
yield message
| gpl-3.0 | 4,868,565,388,258,207,000 | 22.614035 | 100 | 0.552006 | false |
jymannob/CouchPotatoServer | libs/subliminal/services/thesubdb.py | 107 | 2775 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..language import language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..videos import Episode, Movie, UnknownVideo
import logging
logger = logging.getLogger(__name__)
class TheSubDB(ServiceBase):
server_url = 'http://api.thesubdb.com'
user_agent = 'SubDB/1.0 (subliminal/0.6; https://github.com/Diaoul/subliminal)'
api_based = True
# Source: http://api.thesubdb.com/?action=languages
languages = language_set(['af', 'cs', 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'id', 'it',
'la', 'nl', 'no', 'oc', 'pl', 'pt', 'ro', 'ru', 'sl', 'sr', 'sv',
'tr'])
videos = [Movie, Episode, UnknownVideo]
require_video = True
def list_checked(self, video, languages):
return self.query(video.path, video.hashes['TheSubDB'], languages)
def query(self, filepath, moviehash, languages):
r = self.session.get(self.server_url, params={'action': 'search', 'hash': moviehash})
if r.status_code == 404:
logger.debug(u'Could not find subtitles for hash %s' % moviehash)
return []
if r.status_code != 200:
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
return []
available_languages = language_set(r.content.split(','))
languages &= available_languages
if not languages:
logger.debug(u'Could not find subtitles for hash %s with languages %r (only %r available)' % (moviehash, languages, available_languages))
return []
subtitles = []
for language in languages:
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s?action=download&hash=%s&language=%s' % (self.server_url, moviehash, language.alpha2))
subtitles.append(subtitle)
return subtitles
Service = TheSubDB
| gpl-3.0 | 1,127,259,008,734,367,700 | 43.047619 | 176 | 0.651171 | false |
shangwuhencc/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause | 943,628,025,795,480,200 | 35.336134 | 79 | 0.660962 | false |
inetCatapult/troposphere | troposphere/policies.py | 20 | 1033 | from . import AWSProperty, AWSAttribute, validate_pausetime
from .validators import positive_integer, integer, boolean
class AutoScalingRollingUpdate(AWSProperty):
props = {
'MaxBatchSize': (positive_integer, False),
'MinInstancesInService': (integer, False),
'PauseTime': (validate_pausetime, False),
'SuspendProcesses': ([basestring], False),
'WaitOnResourceSignals': (boolean, False),
}
class AutoScalingScheduledAction(AWSProperty):
props = {
'IgnoreUnmodifiedGroupSizeProperties': (boolean, False),
}
class UpdatePolicy(AWSAttribute):
props = {
'AutoScalingRollingUpdate': (AutoScalingRollingUpdate, False),
'AutoScalingScheduledAction': (AutoScalingScheduledAction, False),
}
class ResourceSignal(AWSProperty):
props = {
'Count': (positive_integer, False),
'Timeout': (validate_pausetime, False),
}
class CreationPolicy(AWSAttribute):
props = {
'ResourceSignal': (ResourceSignal, True),
}
| bsd-2-clause | -7,416,100,760,144,589,000 | 26.184211 | 74 | 0.67667 | false |
bakerlover/lab5 | main/admin.py | 7 | 2264 | # -*- coding: utf-8 -*-
from flask.ext import wtf
from google.appengine.api import app_identity
import flask
import auth
import util
import model
import config
from main import app
class ConfigUpdateForm(wtf.Form):
analytics_id = wtf.StringField('Analytics ID', filters=[util.strip_filter])
announcement_html = wtf.TextAreaField('Announcement HTML', filters=[util.strip_filter])
announcement_type = wtf.SelectField('Announcement Type', choices=[(t, t.title()) for t in model.Config.announcement_type._choices])
brand_name = wtf.StringField('Brand Name', [wtf.validators.required()], filters=[util.strip_filter])
facebook_app_id = wtf.StringField('Facebook App ID', filters=[util.strip_filter])
facebook_app_secret = wtf.StringField('Facebook App Secret', filters=[util.strip_filter])
feedback_email = wtf.StringField('Feedback Email', [wtf.validators.optional(), wtf.validators.email()], filters=[util.email_filter])
flask_secret_key = wtf.StringField('Flask Secret Key', [wtf.validators.required()], filters=[util.strip_filter])
twitter_consumer_key = wtf.StringField('Twitter Consumer Key', filters=[util.strip_filter])
twitter_consumer_secret = wtf.StringField('Twitter Consumer Secret', filters=[util.strip_filter])
@app.route('/_s/admin/config/', endpoint='admin_config_update_service')
@app.route('/admin/config/', methods=['GET', 'POST'])
@auth.admin_required
def admin_config_update():
config_db = model.Config.get_master_db()
form = ConfigUpdateForm(obj=config_db)
if form.validate_on_submit():
form.populate_obj(config_db)
config_db.put()
reload(config)
app.config.update(CONFIG_DB=config_db)
return flask.redirect(flask.url_for('welcome'))
if flask.request.path.startswith('/_s/'):
return util.jsonify_model_db(config_db)
instances_url = None
if config.PRODUCTION:
instances_url = '%s?app_id=%s&version_id=%s' % (
'https://appengine.google.com/instances',
app_identity.get_application_id(),
config.CURRENT_VERSION_ID,
)
return flask.render_template(
'admin/config_update.html',
title='Admin Config',
html_class='admin-config',
form=form,
config_db=config_db,
instances_url=instances_url,
has_json=True,
)
| mit | 1,860,260,801,247,866,000 | 36.733333 | 134 | 0.708922 | false |
tensorflow/federated | tensorflow_federated/python/simulation/baselines/cifar100/__init__.py | 1 | 1147 | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for constructing baseline tasks for the CIFAR-100 dataset."""
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import create_image_classification_task
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import DEFAULT_CROP_HEIGHT
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import DEFAULT_CROP_WIDTH
from tensorflow_federated.python.simulation.baselines.cifar100.image_classification_tasks import ResnetModel
| apache-2.0 | -2,652,925,411,633,662,000 | 59.368421 | 129 | 0.813426 | false |
kk7ds/luvs | unifi_stream_server.py | 1 | 8384 | import asyncio
import aiohttp
from aiohttp import web
import logging
from logging import handlers
import signal
import socket
import time
import unifi_ws_server
class StreamerContext(object):
pass
class RequestHandler(aiohttp.server.ServerHttpProtocol):
def __init__(self, **kwargs):
self._log = kwargs.pop('log')
super(RequestHandler, self).__init__(**kwargs)
def _do_stream(self, message, payload, camera_mac, stream):
response = aiohttp.Response(self.writer, 200,
http_version=message.version)
try:
self._context = yield from controller.stream_camera(camera_mac,
stream,
response)
except NoSuchCamera:
response = aiohttp.Response(self.writer, 404)
response.send_headers()
response.write_eof()
return
except CameraInUse:
response = aiohttp.Response(self.writer, 409)
response.send_headers()
response.write_eof()
return
while (self._context.streaming
and controller.ws_server.is_camera_managed(camera_mac)):
yield from asyncio.sleep(1)
self._log.debug('Closing HTTP streaming connection for %s' % camera_mac)
response.write_eof()
self._context.controller.streaming_stopped(self._context)
def connection_lost(self, exc):
self._context.controller.streaming_stopped(self._context)
super(RequestHandler, self).connection_lost(exc)
@asyncio.coroutine
def handle_request(self, message, payload):
self._log.debug('GET %s' % message.path)
path_elements = message.path.split('/')
self._log.debug('Path: %s' % path_elements)
if len(path_elements) == 4 and path_elements[1] == 'stream':
camera_mac = path_elements[2]
stream = path_elements[3]
self._log.debug('Requested stream %s for %s' % (stream,
camera_mac))
yield from self._do_stream(message, payload, camera_mac, stream)
else:
response = aiohttp.Response(self.writer, 403)
response.send_headers()
response.write_eof()
class Streamer(asyncio.Protocol):
def __init__(self):
super(Streamer, self).__init__()
@classmethod
def factory(cls, context):
def make_thing():
instance = cls()
instance._context = context
instance.log = context.controller.log.getChild('strm')
return instance
return make_thing
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
self.log.info('Connection from %s:%i' % peername)
self.transport = transport
self.bytes = 0
self.last_report = 0
if not self._context.response.is_headers_sent():
self._context.response.send_headers()
def _cleanup_everything(self):
try:
result = self._context.controller.streaming_stopped(self._context)
except:
self.log.exception('While stopping streaming')
try:
self.transport.close()
except:
pass
self.log.debug('Total data proxied: %i KB' % (self.bytes / 1024))
def connection_lost(self, exc):
self._cleanup_everything()
def data_received(self, data):
try:
self._context.response.write(data)
self.bytes += len(data)
except socket.error:
self.log.debug('Receiver vanished')
self._cleanup_everything()
except Exception as e:
self.log.exception('Unexpected error: %s' % e)
self._cleanup_everything()
if (time.time() - self.last_report) > 10:
self.log.debug('Proxied %i KB for %s/%s' % (
self.bytes / 1024,
self._context.camera_mac,
self._context.stream))
self.last_report = time.time()
class NoSuchCamera(Exception):
pass
class CameraInUse(Exception):
pass
class UVCController(object):
def __init__(self, my_ip, baseport=7000):
self._cameras = {}
self.my_ip = my_ip
self.baseport = baseport
self.log = logging.getLogger('ctrl')
self.ws_server = unifi_ws_server.UVCWebsocketServer(
log=self.log.getChild('ws'))
@asyncio.coroutine
def init_server(self, loop):
port = 9999
srv = yield from loop.create_server(
lambda: RequestHandler(log=self.log.getChild('http'), debug=True),
'0.0.0.0', port)
self.log.info('HTTP stream server started on port %i' % port)
return srv
def start(self):
loop = self.loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGUSR1,
self.ws_server.reload_all_configs)
ws_server_server = loop.run_until_complete(
self.ws_server.make_server(7443))
http_server = loop.run_until_complete(self.init_server(loop))
loop.run_forever()
def get_free_port(self):
ports_in_use = [x.streamer_port for x in self._cameras.values()]
for i in range(self.baseport, self.baseport + 100):
if i not in ports_in_use:
return i
raise Exception('Too many ports')
def stream_camera(self, camera_mac, stream, response):
if not self.ws_server.is_camera_managed(camera_mac):
raise NoSuchCamera('No such camera')
if (camera_mac, stream) in self._cameras:
raise CameraInUse('Camera in use')
context = StreamerContext()
context.streaming = True
context.controller = self
context.camera_mac = camera_mac
context.stream = stream
context.response = response
context.streamer_port = self.get_free_port()
self.log.debug('Starting stream listener on port %i for camera %s' % (
context.streamer_port, camera_mac))
context.streamer = yield from self.loop.create_server(
Streamer.factory(context), '0.0.0.0', context.streamer_port)
self._cameras[(camera_mac, stream)] = context
yield from self.ws_server.start_video(camera_mac, self.my_ip,
context.streamer_port,
stream=context.stream)
return context
def streaming_stopped(self, context):
if not context.streaming:
# We've already done cleanup here
return
context.streaming = False
self.log.info('Stopping %s camera streaming' % context.camera_mac)
try:
context.streamer.close()
except:
self.log.exception('Failed to stop streaming server')
@asyncio.coroutine
def stop():
try:
yield from self.ws_server.stop_video(context.camera_mac,
stream=context.stream)
except unifi_ws_server.NoSuchCamera:
pass
asyncio.async(stop())
del self._cameras[(context.camera_mac, context.stream)]
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('You must specify the IP of this server')
sys.exit(1)
log_format = '%(asctime)s %(name)s/%(levelname)s: %(message)s'
date_format = '%Y-%m-%dT%H:%M:%S'
logging.getLogger(None).setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.ERROR)
logging.getLogger('websockets').setLevel(logging.WARNING)
lf = logging.Formatter(log_format, datefmt=date_format)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(lf)
logging.getLogger(None).addHandler(console)
debuglg = handlers.RotatingFileHandler('debug.log',
maxBytes=5*1024*1024,
backupCount=4)
debuglg.setLevel(logging.DEBUG)
debuglg.setFormatter(lf)
logging.getLogger(None).addHandler(debuglg)
controller = UVCController(sys.argv[1])
controller.start()
| gpl-3.0 | 7,281,761,001,856,700,000 | 32.806452 | 80 | 0.576574 | false |
RuudBurger/CouchPotatoServer | couchpotato/core/media/movie/providers/metadata/wdtv.py | 39 | 7626 | from xml.etree.ElementTree import Element, SubElement, tostring
import os
import re
import traceback
import xml.dom.minidom
from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
autoload = 'WdtvLive'
log = CPLog(__name__)
class WdtvLive(MovieMetaData):
def getThumbnailName(self, name, root, i):
return self.createMetaName('%s.jpg', name, root)
def createMetaName(self, basename, name, root):
return os.path.join(root, basename.replace('%s', name))
def getNfoName(self, name, root, i):
return self.createMetaName('%s.xml', name, root)
def getNfo(self, movie_info=None, data=None, i=0):
if not data: data = {}
if not movie_info: movie_info = {}
nfoxml = Element('details')
# Title
try:
el = SubElement(nfoxml, 'title')
el.text = toUnicode(getTitle(data))
except:
pass
# IMDB id
try:
el = SubElement(nfoxml, 'id')
el.text = toUnicode(data['identifier'])
except:
pass
# Runtime
try:
runtime = SubElement(nfoxml, 'runtime')
runtime.text = '%s min' % movie_info.get('runtime')
except:
pass
# Other values
types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released']
for type in types:
if ':' in type:
name, type = type.split(':')
else:
name = type
try:
if movie_info.get(type):
el = SubElement(nfoxml, name)
el.text = toUnicode(movie_info.get(type, ''))
except:
pass
# Rating
for rating_type in ['imdb', 'rotten', 'tmdb']:
try:
r, v = movie_info['rating'][rating_type]
rating = SubElement(nfoxml, 'rating')
rating.text = str(r)
votes = SubElement(nfoxml, 'votes')
votes.text = str(v)
break
except:
log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc()))
# Genre
for genre in movie_info.get('genres', []):
genres = SubElement(nfoxml, 'genre')
genres.text = toUnicode(genre)
# Actors
for actor_name in movie_info.get('actor_roles', {}):
role_name = movie_info['actor_roles'][actor_name]
actor = SubElement(nfoxml, 'actor')
name = SubElement(actor, 'name')
name.text = toUnicode(actor_name)
if role_name:
role = SubElement(actor, 'role')
role.text = toUnicode(role_name)
if movie_info['images']['actors'].get(actor_name):
thumb = SubElement(actor, 'thumb')
thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name))
# Directors
for director_name in movie_info.get('directors', []):
director = SubElement(nfoxml, 'director')
director.text = toUnicode(director_name)
# Writers
for writer in movie_info.get('writers', []):
writers = SubElement(nfoxml, 'credits')
writers.text = toUnicode(writer)
# Sets or collections
collection_name = movie_info.get('collection')
if collection_name:
collection = SubElement(nfoxml, 'set')
collection.text = toUnicode(collection_name)
sorttitle = SubElement(nfoxml, 'sorttitle')
sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year'))
# Images
for image_url in movie_info['images']['poster_original']:
image = SubElement(nfoxml, 'thumb')
image.text = toUnicode(image_url)
image_types = [
('fanart', 'backdrop_original'),
('banner', 'banner'),
('discart', 'disc_art'),
('logo', 'logo'),
('clearart', 'clear_art'),
('landscape', 'landscape'),
('extrathumb', 'extra_thumbs'),
('extrafanart', 'extra_fanart'),
]
for image_type in image_types:
sub, type = image_type
sub_element = SubElement(nfoxml, sub)
for image_url in movie_info['images'][type]:
image = SubElement(sub_element, 'thumb')
image.text = toUnicode(image_url)
# Add trailer if found
trailer_found = False
if data.get('renamed_files'):
for filename in data.get('renamed_files'):
if 'trailer' in filename:
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(filename)
trailer_found = True
if not trailer_found and data['files'].get('trailer'):
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(data['files']['trailer'][0])
# Add file metadata
fileinfo = SubElement(nfoxml, 'fileinfo')
streamdetails = SubElement(fileinfo, 'streamdetails')
# Video data
if data['meta_data'].get('video'):
video = SubElement(streamdetails, 'video')
codec = SubElement(video, 'codec')
codec.text = toUnicode(data['meta_data']['video'])
aspect = SubElement(video, 'aspect')
aspect.text = str(data['meta_data']['aspect'])
width = SubElement(video, 'width')
width.text = str(data['meta_data']['resolution_width'])
height = SubElement(video, 'height')
height.text = str(data['meta_data']['resolution_height'])
# Audio data
if data['meta_data'].get('audio'):
audio = SubElement(streamdetails, 'audio')
codec = SubElement(audio, 'codec')
codec.text = toUnicode(data['meta_data'].get('audio'))
channels = SubElement(audio, 'channels')
channels.text = toUnicode(data['meta_data'].get('audio_channels'))
# Clean up the xml and return it
nfoxml = xml.dom.minidom.parseString(tostring(nfoxml))
xml_string = nfoxml.toprettyxml(indent = ' ')
text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
xml_string = text_re.sub('>\g<1></', xml_string)
return xml_string.encode('utf-8')
config = [{
'name': 'wdtvlive',
'groups': [
{
'tab': 'renamer',
'subtab': 'metadata',
'name': 'wdtvlive_metadata',
'label': 'WDTV Live',
'description': 'Metadata for WDTV',
'options': [
{
'name': 'meta_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'meta_nfo',
'label': 'NFO',
'default': True,
'type': 'bool',
'description': 'Generate metadata xml',
},
{
'name': 'meta_thumbnail',
'label': 'Thumbnail',
'default': True,
'type': 'bool',
'description': 'Generate thumbnail jpg',
}
],
},
],
}]
| gpl-3.0 | -8,191,716,866,975,516,000 | 33.506787 | 116 | 0.515998 | false |
k1203/meeting | public/app/assets/plugins/vector-map/converter/simplifier.py | 234 | 5985 | import argparse
import sys
import os
from osgeo import ogr
from osgeo import osr
import anyjson
import shapely.geometry
import shapely.ops
import codecs
import time
format = '%.8f %.8f'
tolerance = 0.01
infile = '/Users/kirilllebedev/Maps/50m-admin-0-countries/ne_50m_admin_0_countries.shp'
outfile = 'map.shp'
# Open the datasource to operate on.
in_ds = ogr.Open( infile, update = 0 )
in_layer = in_ds.GetLayer( 0 )
in_defn = in_layer.GetLayerDefn()
# Create output file with similar information.
shp_driver = ogr.GetDriverByName( 'ESRI Shapefile' )
if os.path.exists('map.shp'):
shp_driver.DeleteDataSource( outfile )
shp_ds = shp_driver.CreateDataSource( outfile )
shp_layer = shp_ds.CreateLayer( in_defn.GetName(),
geom_type = in_defn.GetGeomType(),
srs = in_layer.GetSpatialRef() )
in_field_count = in_defn.GetFieldCount()
for fld_index in range(in_field_count):
src_fd = in_defn.GetFieldDefn( fld_index )
fd = ogr.FieldDefn( src_fd.GetName(), src_fd.GetType() )
fd.SetWidth( src_fd.GetWidth() )
fd.SetPrecision( src_fd.GetPrecision() )
shp_layer.CreateField( fd )
# Load geometries
geometries = []
for feature in in_layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
#if not shapelyGeometry.is_valid:
#buffer to fix selfcrosses
#shapelyGeometry = shapelyGeometry.buffer(0)
if shapelyGeometry:
geometries.append(shapelyGeometry)
in_layer.ResetReading()
start = int(round(time.time() * 1000))
# Simplification
points = []
connections = {}
counter = 0
for geom in geometries:
counter += 1
polygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
if polygon.area > 0:
lines = []
lines.append(polygon.exterior)
for line in polygon.interiors:
lines.append(line)
for line in lines:
for i in range(len(line.coords)-1):
indexFrom = i
indexTo = i+1
pointFrom = format % line.coords[indexFrom]
pointTo = format % line.coords[indexTo]
if pointFrom == pointTo:
continue
if not (pointFrom in connections):
connections[pointFrom] = {}
connections[pointFrom][pointTo] = 1
if not (pointTo in connections):
connections[pointTo] = {}
connections[pointTo][pointFrom] = 1
print int(round(time.time() * 1000)) - start
simplifiedLines = {}
pivotPoints = {}
def simplifyRing(ring):
coords = list(ring.coords)[0:-1]
simpleCoords = []
isPivot = False
pointIndex = 0
while not isPivot and pointIndex < len(coords):
pointStr = format % coords[pointIndex]
pointIndex += 1
isPivot = ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints))
pointIndex = pointIndex - 1
if not isPivot:
simpleRing = shapely.geometry.LineString(coords).simplify(tolerance)
if len(simpleRing.coords) <= 2:
return None
else:
pivotPoints[format % coords[0]] = True
pivotPoints[format % coords[-1]] = True
simpleLineKey = format % coords[0]+':'+format % coords[1]+':'+format % coords[-1]
simplifiedLines[simpleLineKey] = simpleRing.coords
return simpleRing
else:
points = coords[pointIndex:len(coords)]
points.extend(coords[0:pointIndex+1])
iFrom = 0
for i in range(1, len(points)):
pointStr = format % points[i]
if ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints)):
line = points[iFrom:i+1]
lineKey = format % line[-1]+':'+format % line[-2]+':'+format % line[0]
if lineKey in simplifiedLines:
simpleLine = simplifiedLines[lineKey]
simpleLine = list(reversed(simpleLine))
else:
simpleLine = shapely.geometry.LineString(line).simplify(tolerance).coords
lineKey = format % line[0]+':'+format % line[1]+':'+format % line[-1]
simplifiedLines[lineKey] = simpleLine
simpleCoords.extend( simpleLine[0:-1] )
iFrom = i
if len(simpleCoords) <= 2:
return None
else:
return shapely.geometry.LineString(simpleCoords)
def simplifyPolygon(polygon):
simpleExtRing = simplifyRing(polygon.exterior)
if simpleExtRing is None:
return None
simpleIntRings = []
for ring in polygon.interiors:
simpleIntRing = simplifyRing(ring)
if simpleIntRing is not None:
simpleIntRings.append(simpleIntRing)
return shapely.geometry.Polygon(simpleExtRing, simpleIntRings)
results = []
for geom in geometries:
polygons = []
simplePolygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
simplePolygon = simplifyPolygon(polygon)
if not (simplePolygon is None or simplePolygon._geom is None):
simplePolygons.append(simplePolygon)
if len(simplePolygons) > 0:
results.append(shapely.geometry.MultiPolygon(simplePolygons))
else:
results.append(None)
# Process all features in input layer.
in_feat = in_layer.GetNextFeature()
counter = 0
while in_feat is not None:
if results[counter] is not None:
out_feat = ogr.Feature( feature_def = shp_layer.GetLayerDefn() )
out_feat.SetFrom( in_feat )
out_feat.SetGeometryDirectly(
ogr.CreateGeometryFromWkb(
shapely.wkb.dumps(
results[counter]
)
)
)
shp_layer.CreateFeature( out_feat )
out_feat.Destroy()
else:
print 'geometry is too small: '+in_feat.GetField(16)
in_feat.Destroy()
in_feat = in_layer.GetNextFeature()
counter += 1
# Cleanup
shp_ds.Destroy()
in_ds.Destroy()
print int(round(time.time() * 1000)) - start | mit | 6,680,298,553,978,392,000 | 28.2 | 87 | 0.664495 | false |
aequitas/home-assistant | tests/components/zha/test_switch.py | 7 | 2904 | """Test zha switch."""
from unittest.mock import call, patch
from homeassistant.components.switch import DOMAIN
from homeassistant.const import STATE_ON, STATE_OFF, STATE_UNAVAILABLE
from tests.common import mock_coro
from .common import (
async_init_zigpy_device, make_attribute, make_entity_id,
async_test_device_join, async_enable_traffic
)
ON = 1
OFF = 0
async def test_switch(hass, config_entry, zha_gateway):
"""Test zha switch platform."""
from zigpy.zcl.clusters.general import OnOff, Basic
from zigpy.zcl.foundation import Status
# create zigpy device
zigpy_device = await async_init_zigpy_device(
hass, [OnOff.cluster_id, Basic.cluster_id], [], None, zha_gateway)
# load up switch domain
await hass.config_entries.async_forward_entry_setup(
config_entry, DOMAIN)
await hass.async_block_till_done()
cluster = zigpy_device.endpoints.get(1).on_off
entity_id = make_entity_id(DOMAIN, zigpy_device, cluster)
zha_device = zha_gateway.get_device(zigpy_device.ieee)
# test that the switch was created and that its state is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, zha_gateway, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at switch
attr = make_attribute(0, 1)
cluster.handle_message(False, 1, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
# turn off at switch
attr.value.value = 0
cluster.handle_message(False, 0, 0x0a, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
'zigpy.zcl.Cluster.request',
return_value=mock_coro([Status.SUCCESS, Status.SUCCESS])):
# turn on via UI
await hass.services.async_call(DOMAIN, 'turn_on', {
'entity_id': entity_id
}, blocking=True)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None)
# turn off from HA
with patch(
'zigpy.zcl.Cluster.request',
return_value=mock_coro([Status.SUCCESS, Status.SUCCESS])):
# turn off via UI
await hass.services.async_call(DOMAIN, 'turn_off', {
'entity_id': entity_id
}, blocking=True)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None)
# test joining a new switch to the network and HA
await async_test_device_join(
hass, zha_gateway, OnOff.cluster_id, DOMAIN)
| apache-2.0 | -5,212,971,974,287,757,000 | 35.3 | 74 | 0.664601 | false |
secretsquirrel/the-backdoor-factory | osslsigncode/misc/pagehash.py | 7 | 3180 | #!/usr/bin/python
import struct
import sys
import hashlib
from pyasn1.type import univ
from pyasn1.codec.ber import encoder, decoder
f = open(sys.argv[1], 'rb')
filehdr = f.read(1024)
if filehdr[0:2] != 'MZ':
print "Not a DOS file."
sys.exit(0)
pepos = struct.unpack('<I', filehdr[60:64])[0]
if filehdr[pepos:pepos+4] != 'PE\0\0':
print "Not a PE file."
sys.exit(0)
pepos += 4
nsections = struct.unpack('<H', filehdr[pepos+2:pepos+4])[0]
print "#sections", nsections
magic = struct.unpack('<H', filehdr[pepos+20:pepos+22])[0]
pe32plus = 0
if magic == 0x20b:
pe32plus = 1
elif magic == 0x10b:
pe32plus = 0
else:
print "Unknown magic", magic
sys.exit(0)
sectoralign = struct.unpack('<I', filehdr[pepos+52:pepos+56])[0]
print "Sector alignment", sectoralign
pos = pepos + 112 + pe32plus*16
nrvas = struct.unpack('<I', filehdr[pos:pos+4])[0]
print "#rvas", nrvas
pos += 4
tpos = pos
rvas = []
for i in range(0, nrvas):
(p1,p2) = struct.unpack('<II', filehdr[pos:pos+8])
rvas.append((p1,p2))
pos += 8
sections = []
for i in range(0, nsections):
(vsize,vaddr,rsize,raddr) = struct.unpack('<IIII', filehdr[pos+8:pos+24])
pos += 40
sections.append((vsize,vaddr,rsize,raddr))
hdrend = pos
print "End of headers", pos
print rvas
print sections
sigpos,siglen = rvas[4]
if sigpos == 0:
print "No signature found"
sys.exit(0)
f.seek(sigpos)
sigblob = f.read(siglen)
cid_page_hash = "\xa6\xb5\x86\xd5\xb4\xa1\x24\x66\xae\x05\xa2\x17\xda\x8e\x60\xd6"
oid_ph_v1 = "\x06\x01\x04\x01\x82\x37\x02\x03\x01"
oid_ph_v2 = "\x06\x01\x04\x01\x82\x37\x02\x03\x02"
p = sigblob.find(cid_page_hash)
if p == -1:
print "No page hash present"
sys.exit(0)
p += len(cid_page_hash)
sha1 = True
i = sigblob.find(oid_ph_v1)
if i == -1:
i = sigblob.find(oid_ph_v2)
if i == -1:
print "No page hash found"
sys.exit(0)
sha1 = False
p = i + len(oid_ph_v1)
blob = str(decoder.decode(sigblob[p:])[0].getComponentByPosition(0))
ph = []
i = 0
hashlen = 20
if not sha1:
hashlen = 24
while i < len(blob):
offset = struct.unpack('<I', blob[i:i+4])[0]
i += 4
data = blob[i:i+hashlen]
ph.append((offset,data.encode("hex")))
i += hashlen
if sha1:
md = hashlib.sha1()
else:
md = hashlib.sha256()
b = filehdr[0:pepos+84]
b += filehdr[pepos+88:tpos+4*8]
b += filehdr[tpos+5*8:1024]
b += '\0'*(4096-1024)
md.update(b)
digest = md.hexdigest()
print ""
print "Checking page hash..."
print ""
nph = [(0,digest)]
lastpos = 0
pagesize = sectoralign # ???
for vs,vo,rs,ro in sections:
l = 0
while l < rs:
f.seek(ro+l)
howmuch = pagesize
if rs - l < pagesize:
howmuch = rs - l
b = f.read(howmuch)
if howmuch < pagesize:
b = b + '\0' * (pagesize - (rs - l))
if sha1:
d = hashlib.sha1(b).hexdigest()
else:
d = hashlib.sha256(b).hexdigest()
nph.append((ro+l, d))
l += pagesize
lastpos = ro + rs
nph.append((lastpos,'0'*(2*hashlen)))
for i in range(0,len(nph)):
x=ph[i]
y=nph[i]
if x[0] != y[0] or x[1] != y[1]:
print "Not matching:", x, "!=", y
| bsd-3-clause | 592,326,657,579,592,700 | 21.877698 | 82 | 0.597484 | false |
kosz85/django | django/db/models/sql/query.py | 1 | 92537 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from collections import Counter, Iterator, Mapping, OrderedDict, namedtuple
from contextlib import suppress
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, EmptyResultSet, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path')
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query:
"""A single SQL query."""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = OrderedDict()
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def annotations(self):
if self._annotations is None:
self._annotations = OrderedDict()
return self._annotations
@property
def has_select_fields(self):
return bool(self.select or self.annotation_select_mask or self.extra_select_mask)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def _prepare(self, field):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj._annotations = self._annotations.copy() if self._annotations is not None else None
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
# Clear the cached_property
with suppress(AttributeError):
del obj.base_table
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
elif isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference to column. Make sure the referenced column
# is selected.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_exprs.append(Ref(col_alias, expr))
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
has_limit = self.low_mark != 0 or self.high_mark is not None
has_existing_annotations = any(
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
)
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (isinstance(self.group_by, tuple) or has_limit or has_existing_annotations or
self.distinct or self.combinator):
from django.db.models.sql.subqueries import AggregateQuery
outer_query = AggregateQuery(self.model)
inner_query = self.clone()
inner_query.select_for_update = False
inner_query.select_related = False
if not has_limit and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_annotations:
inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.alias_map}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
try:
outer_query.add_subquery(inner_query, using)
except EmptyResultSet:
return {
alias: None
for alias in outer_query.annotation_select
}
else:
outer_query = self
self.select = ()
self.default_cols = False
self._extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None for q in outer_query.annotation_select.items()]
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return {
alias: val
for (alias, annotation), val
in zip(outer_query.annotation_select.items(), result)
}
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def has_results(self, using):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
q.set_group_by()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.fields:
if field in values:
continue
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
if self._annotations:
self._annotations = OrderedDict(
(key, col.relabeled_clone(change_map)) for key, col in self._annotations.items())
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {change_map.get(alias, alias)
for alias in self.external_aliases}
def bump_prefix(self, outer_query):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
local_recursion_limit = 127 # explicitly avoid infinite loop
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RuntimeError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.alias_map):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a
sql.datastructures.BaseTable or Join.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
reuse = [a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join]
if reuse:
self.ref_alias(reuse[0])
return reuse[0]
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
# It's safe to drop ordering if the queryset isn't using slicing,
# distinct(*fields) or select_for_update().
if (self.low_mark == 0 and self.high_mark is None and
not self.distinct_fields and
not self.select_for_update):
clone.clear_ordering(True)
return clone
def as_sql(self, compiler, connection):
return self.get_compiler(connection=connection).as_sql()
def resolve_lookup_value(self, value, can_reuse, allow_joins):
used_joins = set()
if hasattr(value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
processed_values = []
for sub_value in value:
if hasattr(sub_value, 'resolve_expression'):
pre_joins = self.alias_refcount.copy()
processed_values.append(
sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
)
# The used_joins for a tuple of expressions is the union of
# the used_joins for the individual expressions.
used_joins.update(k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0))
return value, used_joins
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (isinstance(value, Query) and not value.has_select_fields and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
if len(lookups) == 0:
lookups = ['exact']
for name in lookups[:-1]:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookups[-1])
if not lookup_class:
if lhs.field.is_relation:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[-1]))
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookups[-1])
lookup_class = lhs.get_lookup('exact')
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if lookup.rhs is None:
if lookup.lookup_name not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup('isnull')(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookup.lookup_name == 'exact' and lookup.rhs == ''):
return lhs.get_lookup('isnull')(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(name, lhs.output_field.__class__.__name__))
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, allow_joins=True, split_subq=True):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
value, used_joins = self.resolve_lookup_value(value, can_reuse, allow_joins)
clause = self.where_class()
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
col = targets[0].get_col(alias, join_info.final_field)
else:
col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)
else:
col = targets[0].get_col(alias, join_info.final_field)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated
if current_negated and (lookup_type != 'isnull' or condition.rhs is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(targets[0].get_col(alias, join_info.targets[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated, allow_joins, split_subq)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, allow_joins=allow_joins,
split_subq=split_subq,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
field_names = list(get_field_names_from_opts(opts))
available = sorted(field_names + list(self.annotation_select))
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(opts.db_table, alias, None, INNER, join.join_field, nullable)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
return JoinInfo(final_field, targets, opts, joins, path)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
if not allow_joins and LOOKUP_SEP in name:
raise FieldError("Joined field references are not permitted in this query")
if name in self.annotations:
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
return Ref(name, self.annotation_select[name])
else:
return self.annotation_select[name]
else:
field_list = name.split(LOOKUP_SEP)
join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), reuse)
targets, _, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
if reuse is not None:
reuse.update(join_list)
col = targets[0].get_col(join_list[-1], join_info.targets[0])
return col
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
col = query.select[0]
select_field = col.target
alias = col.alias
if self.is_nullable(select_field):
lookup_class = select_field.get_lookup('isnull')
lookup = lookup_class(select_field.get_col(alias), False)
query.where.add(lookup, AND)
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases.add(alias)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(target.get_col(final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(list(get_field_names_from_opts(opts)) + list(self.extra) + list(self.annotation_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Remove any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = ()
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
for col in annotation.get_group_by_cols():
group_by.append(col)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(set(names).union(self.annotation_select_mask))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
self.set_group_by()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self._extra and not self._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the OrderedDict of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self._annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = OrderedDict(
(k, v) for k, v in self.annotations.items()
if k in self.annotation_select_mask
)
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map
if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed:
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| bsd-3-clause | -946,797,654,559,931,900 | 43.488942 | 119 | 0.593968 | false |
bigplus/thefuck | thefuck/rules/dirty_unzip.py | 5 | 1044 | import os
import zipfile
from thefuck.utils import for_app
def _is_bad_zip(file):
with zipfile.ZipFile(file, 'r') as archive:
return len(archive.namelist()) > 1
def _zip_file(command):
# unzip works that way:
# unzip [-flags] file[.zip] [file(s) ...] [-x file(s) ...]
# ^ ^ files to unzip from the archive
# archive to unzip
for c in command.script.split()[1:]:
if not c.startswith('-'):
if c.endswith('.zip'):
return c
else:
return '{}.zip'.format(c)
@for_app('unzip')
def match(command, settings):
return ('-d' not in command.script
and _is_bad_zip(_zip_file(command)))
def get_new_command(command, settings):
return '{} -d {}'.format(command.script, _zip_file(command)[:-4])
def side_effect(old_cmd, command, settings):
with zipfile.ZipFile(_zip_file(old_cmd), 'r') as archive:
for file in archive.namelist():
os.remove(file)
requires_output = False
| mit | 7,080,517,918,691,066,000 | 25.1 | 69 | 0.566092 | false |
daevaorn/sentry | src/sentry/migrations/0026_auto__add_field_project_status.py | 36 | 11026 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.status'
db.add_column('sentry_project', 'status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.status'
db.delete_column('sentry_project', 'status')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'project_set'", 'to': "orm['sentry.User']"})
}
}
complete_apps = ['sentry']
| bsd-3-clause | 5,013,566,886,725,600,000 | 78.323741 | 167 | 0.5575 | false |
tbeadle/django | tests/prefetch_related/test_prefetch_related_objects.py | 35 | 4734 | from django.db.models import Prefetch, prefetch_related_objects
from django.test import TestCase
from .models import Author, Book, Reader
class PrefetchRelatedObjectsTests(TestCase):
"""
Since prefetch_related_objects() is just the inner part of
prefetch_related(), only do basic tests to ensure its API hasn't changed.
"""
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def test_unknown(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertRaises(AttributeError):
prefetch_related_objects([book1], 'unknown_attribute')
def test_m2m_forward(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], 'authors')
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_m2m_reverse(self):
author1 = Author.objects.get(id=self.author1.id)
with self.assertNumQueries(1):
prefetch_related_objects([author1], 'books')
with self.assertNumQueries(0):
self.assertEqual(set(author1.books.all()), {self.book1, self.book2})
def test_foreignkey_forward(self):
authors = list(Author.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(authors, 'first_book')
with self.assertNumQueries(0):
[author.first_book for author in authors]
def test_foreignkey_reverse(self):
books = list(Book.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(books, 'first_time_authors')
with self.assertNumQueries(0):
[list(book.first_time_authors.all()) for book in books]
def test_m2m_then_m2m(self):
"""
We can follow a m2m and another m2m.
"""
authors = list(Author.objects.all())
with self.assertNumQueries(2):
prefetch_related_objects(authors, 'books__read_by')
with self.assertNumQueries(0):
self.assertEqual(
[
[[str(r) for r in b.read_by.all()] for b in a.books.all()]
for a in authors
],
[
[['Amy'], ['Belinda']], # Charlotte - Poems, Jane Eyre
[['Amy']], # Anne - Poems
[['Amy'], []], # Emily - Poems, Wuthering Heights
[['Amy', 'Belinda']], # Jane - Sense and Sense
]
)
def test_prefetch_object(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_prefetch_object_to_attr(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors', to_attr='the_authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.the_authors), {self.author1, self.author2, self.author3})
def test_prefetch_queryset(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects(
[book1],
Prefetch('authors', queryset=Author.objects.filter(id__in=[self.author1.id, self.author2.id]))
)
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2})
| bsd-3-clause | 2,251,514,227,585,012,000 | 38.781513 | 110 | 0.610689 | false |
huang4fstudio/django | tests/forms_tests/tests/test_fields.py | 5 | 86091 | # -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import uuid
from decimal import Decimal
from unittest import skipIf
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, ComboField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, Field, FileField, FilePathField,
FloatField, Form, GenericIPAddressField, HiddenInput, ImageField,
IntegerField, MultipleChoiceField, NullBooleanField, NumberInput,
PasswordInput, RadioSelect, RegexField, SlugField, SplitDateTimeField,
Textarea, TextInput, TimeField, TypedChoiceField, TypedMultipleChoiceField,
URLField, UUIDField, ValidationError, Widget, forms,
)
from django.test import SimpleTestCase, ignore_warnings
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.duration import duration_string
try:
from PIL import Image
except ImportError:
Image = None
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Otherwise, return a maxlength attribute equal to max_length
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
def test_charfield_strip(self):
"""
Ensure that values have whitespace stripped and that strip=False works.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
# IntegerField ################################################################
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, 11)
self.assertEqual(10, f.clean('10'))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, '11')
self.assertEqual(f.max_value, 10)
self.assertEqual(f.min_value, None)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'", f.clean, 21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
Make sure localized IntegerField's widget renders to a text input with
no number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_float(self):
f = IntegerField()
self.assertEqual(1, f.clean(1.0))
self.assertEqual(1, f.clean('1.0'))
self.assertEqual(1, f.clean(' 1.0 '))
self.assertEqual(1, f.clean('1.'))
self.assertEqual(1, f.clean(' 1. '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1.5')
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '…')
def test_integerfield_big_num(self):
f = IntegerField()
self.assertEqual(9223372036854775808, f.clean(9223372036854775808))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808'))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808.0'))
def test_integerfield_subclass(self):
"""
Test that class-defined widget is not overwritten by __init__ (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
# FloatField ##################################################################
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1.0, f.clean('1'))
self.assertIsInstance(f.clean('1'), float)
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Infinity')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(f, '<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" />')
def test_floatfield_localized(self):
"""
Make sure localized FloatField's widget renders to a text input with
no number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n))
# DecimalField ################################################################
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertIsInstance(f.clean('1'), Decimal)
self.assertEqual(f.clean('23'), Decimal("23"))
self.assertEqual(f.clean('3.14'), Decimal("3.14"))
self.assertEqual(f.clean(3.14), Decimal("3.14"))
self.assertEqual(f.clean(Decimal('3.14')), Decimal("3.14"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'łąść')
self.assertEqual(f.clean('1.0 '), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), Decimal("1.0"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '123.45')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '1.234')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 digits before the decimal point.'", f.clean, '123.4')
self.assertEqual(f.clean('-12.34'), Decimal("-12.34"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-123.45')
self.assertEqual(f.clean('-.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), Decimal("-0.12"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '-000.123')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-000.12345')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_3(self):
f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(f.clean('1.5'), Decimal("1.5"))
self.assertEqual(f.clean('0.5'), Decimal("0.5"))
self.assertEqual(f.clean('.5'), Decimal("0.5"))
self.assertEqual(f.clean('00.50'), Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, Decimal('1.5'))
self.assertEqual(f.min_value, Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '0.00000001')
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), Decimal('0.02'))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 3 digits in total.'", f.clean, '000000.0002')
self.assertEqual(f.clean('.002'), Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), Decimal(".01"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 0 digits before the decimal point.'", f.clean, '1.1')
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('1E+2'), Decimal('1E+2'))
self.assertEqual(f.clean('1e+2'), Decimal('1E+2'))
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean('0.546e+2')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={'step': '0.01'}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" type="number" id="id_f" />')
def test_decimalfield_localized(self):
"""
Make sure localized DecimalField's widget renders to a text input with
no number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = Decimal("0.1")
self.assertFalse(f.has_changed(d, '0.10'))
self.assertTrue(f.has_changed(d, '0.101'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
# DateField ###################################################################
def test_datefield_1(self):
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October, 2006'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-4-31')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '200a-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '25/10/06')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006 10 25'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/2006')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 25 October 2006 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, 'a\x00b')
@ignore_warnings(category=RemovedInDjango110Warning) # for _has_changed
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = datetime.date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
# Test for deprecated behavior _has_changed
self.assertFalse(f._has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""Test that field.strptime doesn't raise an UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
# TimeField ###################################################################
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f.has_changed(t1, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51 PM'))
# DateTimeField ###############################################################
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, ' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
# DurationField ###########################################################
def test_durationfield_1(self):
f = DurationField()
self.assertEqual(datetime.timedelta(seconds=30), f.clean('30'))
self.assertEqual(
datetime.timedelta(minutes=15, seconds=30),
f.clean('15:30')
)
self.assertEqual(
datetime.timedelta(hours=1, minutes=15, seconds=30),
f.clean('1:15:30')
)
self.assertEqual(
datetime.timedelta(
days=1, hours=1, minutes=15, seconds=30, milliseconds=300),
f.clean('1 1:15:30.3')
)
def test_durationfield_2(self):
class DurationForm(Form):
duration = DurationField(initial=datetime.timedelta(hours=1))
f = DurationForm()
self.assertHTMLEqual(
'<input id="id_duration" type="text" name="duration" value="01:00:00">',
str(f['duration'])
)
def test_durationfield_prepare_value(self):
field = DurationField()
td = datetime.timedelta(minutes=15, seconds=30)
self.assertEqual(field.prepare_value(td), duration_string(td))
self.assertEqual(field.prepare_value('arbitrary'), 'arbitrary')
self.assertIsNone(field.prepare_value(None))
# RegexField ##################################################################
def test_regexfield_1(self):
f = RegexField('^[0-9][A-F][0-9]$')
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_regexfield_2(self):
f = RegexField('^[0-9][A-F][0-9]$', required=False)
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertEqual('', f.clean(''))
def test_regexfield_3(self):
f = RegexField(re.compile('^[0-9][A-F][0-9]$'))
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
@ignore_warnings(category=RemovedInDjango110Warning) # error_message deprecation
def test_regexfield_4(self):
f = RegexField('^[0-9][0-9][0-9][0-9]$', error_message='Enter a four-digit number.')
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, '123')
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, 'abcd')
def test_regexfield_5(self):
f = RegexField('^[0-9]+$', min_length=5, max_length=10)
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 5 characters (it has 3).'", f.clean, '123')
six.assertRaisesRegex(self, ValidationError, "'Ensure this value has at least 5 characters \(it has 3\)\.', u?'Enter a valid value\.'", f.clean, 'abc')
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '12345678901')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '12345a')
def test_regexfield_6(self):
"""
Ensure that it works with unicode characters.
Refs #.
"""
f = RegexField('^\w+$')
self.assertEqual('éèøçÎÎ你好', f.clean('éèøçÎÎ你好'))
def test_change_regex_after_init(self):
f = RegexField('^[a-z]+$')
f.regex = '^[0-9]+$'
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, 'abcd')
# EmailField ##################################################################
# See also validators tests for validate_email specific tests
def test_emailfield_1(self):
f = EmailField()
self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
self.assertEqual('[email protected]\xe4\xf6\xfc\xdfabc.part.com',
f.clean('[email protected]äöüßabc.part.com'))
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take for-freeking-ever
# if the security fix isn't in place.
addr = '[email protected]'
self.assertEqual(addr, f.clean(addr))
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertEqual('[email protected]', f.clean(' [email protected] \t \t '))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(f, '<input id="id_f" type="email" name="f" maxlength="15" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 9).'", f.clean, '[email protected]')
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 15 characters (it has 20).'", f.clean, '[email protected]')
# FileField ##################################################################
def test_filefield_1(self):
f = FileField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''), '')
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, 'some content that is not a file')
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', None))
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', b''))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8')))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf')))
def test_filefield_2(self):
f = FileField(max_length=5)
self.assertRaisesMessage(ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'", f.clean, SimpleUploadedFile('test_maxlength.txt', b'hello world'))
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertEqual(SimpleUploadedFile,
type(f.clean(SimpleUploadedFile('name', b''))))
def test_filefield_changed(self):
'''
Test for the behavior of has_changed for FileField. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileField it is ignored.
'''
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f.has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f.has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
# ImageField ##################################################################
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.png'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_bitmap_image_after_clean(self):
"""
This also tests the situation when Pillow doesn't detect the MIME type
of the image (#24948).
"""
from PIL.BmpImagePlugin import BmpImageFile
try:
Image.register_mime(BmpImageFile.format, None)
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.bmp'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.bmp', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('BMP', uploaded_file.image.format)
self.assertIsNone(uploaded_file.content_type)
finally:
Image.register_mime(BmpImageFile.format, 'image/bmp')
# URLField ##################################################################
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('http://localhost', f.clean('http://localhost'))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com.', f.clean('http://example.com.'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test'))
self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com'))
self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com'))
self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10'))
self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'com.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://invalid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://-invalid.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.alid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.-alid.com')
self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com'))
self.assertEqual('http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah'))
self.assertEqual('http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '[a')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://[a')
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 12).'", f.clean, 'http://f.com')
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 37).'", f.clean, 'http://abcdefghijklmnopqrstuvwxyz.com')
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual('http://example.com', f.clean('example.com'))
self.assertEqual('', f.clean(''))
self.assertEqual('https://example.com', f.clean('https://example.com'))
def test_urlfield_7(self):
f = URLField()
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com/test', f.clean('http://example.com/test'))
self.assertEqual('http://example.com?some_param=some_value',
f.clean('http://example.com?some_param=some_value'))
def test_urlfield_9(self):
f = URLField()
urls = (
'http://עברית.idn.icann.org/',
'http://sãopaulo.com/',
'http://sãopaulo.com.br/',
'http://пример.испытание/',
'http://مثال.إختبار/',
'http://例子.测试/',
'http://例子.測試/',
'http://उदाहरण.परीक्षा/',
'http://例え.テスト/',
'http://مثال.آزمایشی/',
'http://실례.테스트/',
'http://العربية.idn.icann.org/',
)
for url in urls:
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""Test URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
'http://[12:34::3a53]/',
'http://[a34:9238::]:8080/',
)
for url in urls:
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 23)
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
# BooleanField ################################################################
def test_booleanfield_1(self):
f = BooleanField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(True, f.clean(True))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, False)
self.assertEqual(True, f.clean(1))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 0)
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(True, f.clean('True'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 'False')
def test_booleanfield_2(self):
f = BooleanField(required=False)
self.assertEqual(False, f.clean(''))
self.assertEqual(False, f.clean(None))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(True, f.clean(1))
self.assertEqual(False, f.clean(0))
self.assertEqual(True, f.clean('1'))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(False, f.clean('False'))
self.assertEqual(False, f.clean('false'))
self.assertEqual(False, f.clean('FaLsE'))
def test_boolean_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(BooleanField())), BooleanField)
def test_booleanfield_changed(self):
f = BooleanField()
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed('', None))
self.assertFalse(f.has_changed('', ''))
self.assertTrue(f.has_changed(False, 'on'))
self.assertFalse(f.has_changed(True, 'on'))
self.assertTrue(f.has_changed(True, ''))
# Initial value may have mutated to a string due to show_hidden_initial (#19537)
self.assertTrue(f.has_changed('False', 'on'))
# ChoiceField #################################################################
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. John is not one of the available choices.'", f.clean, 'John')
def test_choicefield_4(self):
f = ChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, '6')
def test_choicefield_callable(self):
choices = lambda: [('J', 'John'), ('P', 'Paul')]
f = ChoiceField(choices=choices)
self.assertEqual('J', f.clean('J'))
def test_choicefield_callable_may_evaluate_to_different_values(self):
choices = []
def choices_as_callable():
return choices
class ChoiceFieldForm(Form):
choicefield = ChoiceField(choices=choices_as_callable)
choices = [('J', 'John')]
form = ChoiceFieldForm()
self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices))
choices = [('P', 'Paul')]
form = ChoiceFieldForm()
self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices))
# TypedChoiceField ############################################################
# TypedChoiceField is just like ChoiceField, except that coerced types will
# be returned:
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, '2')
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean('1'))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual(True, f.clean('-1'))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, 'B')
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual('', f.clean(''))
# If you want cleaning an empty value to return a different type, tell the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean(''))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed(1, '1'))
self.assertFalse(f.has_changed('1', '1'))
def test_typedchoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedChoiceField(choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual(Decimal('1.2'), f.clean('2'))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3')
# NullBooleanField ############################################################
def test_nullbooleanfield_1(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertIsNone(f.clean(None))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertEqual(True, f.clean('true'))
self.assertEqual(False, f.clean('false'))
def test_nullbooleanfield_2(self):
# Make sure that the internal value is preserved if using HiddenInput (#7753)
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual('<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" /><input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />', str(f))
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['hidden_nullbool1'])
self.assertEqual(False, f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its boolean
# values. (#9609)
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['nullbool0'])
self.assertEqual(False, f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# MultipleChoiceField #########################################################
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ())
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['6'])
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['1', '6'])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ['1']))
self.assertFalse(f.has_changed([1, 2], ['1', '2']))
self.assertFalse(f.has_changed([2, 1], ['1', '2']))
self.assertTrue(f.has_changed([1, 2], ['1']))
self.assertTrue(f.has_changed([1, 2], ['1', '3']))
# TypedMultipleChoiceField ############################################################
# TypedMultipleChoiceField is just like MultipleChoiceField, except that coerced types
# will be returned:
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['2'])
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1', '-1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['1', '2'])
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, ['B'])
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
def test_typedmultiplechoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedMultipleChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual([Decimal('1.2')], f.clean(['2']))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3'])
# ComboField ##################################################################
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, '[email protected]')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, '[email protected]')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
# FilePathField ###############################################################
def test_filepathfield_1(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
self.assertTrue(fix_os_paths(path).endswith('/django/forms/'))
def test_filepathfield_2(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path)
f.choices = [p for p in f.choices if p[0].endswith('.py')]
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. fields.py is not one of the available choices.'", f.clean, 'fields.py')
assert fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')
def test_filepathfield_3(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_4(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
f = FilePathField(path=path, recursive=True, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/extras/__init__.py', 'extras/__init__.py'),
('/django/forms/extras/widgets.py', 'extras/widgets.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_folders(self):
path = os.path.dirname(upath(__file__)) + '/filepath_test_files/'
f = FilePathField(path=path, allow_folders=True, allow_files=False)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
f = FilePathField(path=path, allow_folders=True, allow_files=True)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'),
('/tests/forms_tests/tests/filepath_test_files/1x1.bmp', '1x1.bmp'),
('/tests/forms_tests/tests/filepath_test_files/1x1.png', '1x1.png'),
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
('/tests/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'),
('/tests/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'),
]
actual = fix_os_paths(f.choices)
self.assertEqual(len(expected), len(actual))
for exp, got in zip(expected, actual):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
# SplitDateTimeField ##########################################################
def test_splitdatetimefield_1(self):
from django.forms.widgets import SplitDateTimeWidget
f = SplitDateTimeField()
self.assertIsInstance(f.widget, SplitDateTimeWidget)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(['']))
self.assertIsNone(f.clean(['', '']))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', ''])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f.has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
# GenericIPAddressField #######################################################
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '256.125.1.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
# SlugField ###################################################################
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
# UUIDField ###################################################################
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
| bsd-3-clause | 1,439,469,414,719,323,400 | 53.686624 | 211 | 0.621072 | false |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/universaldetector.py | 200 | 6664 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE or aBuf[:2] == codecs.BOM_BE:
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {
'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()
}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| mit | -5,147,442,634,639,192,000 | 38.666667 | 78 | 0.557173 | false |
Alwnikrotikz/l5rcm | dialogs/managedatapack.py | 3 | 5873 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2011 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from PySide import QtCore, QtGui
import dal
class DataPackModel(QtCore.QAbstractTableModel):
def __init__(self, parent = None):
super(DataPackModel, self).__init__(parent)
self.items = []
self.headers = [self.tr('Name' ),
self.tr('Language'),
self.tr('Version'),
self.tr('Authors' ) ]
self.text_color = QtGui.QBrush(QtGui.QColor(0x15, 0x15, 0x15))
self.bg_color = [ QtGui.QBrush(QtGui.QColor(0xFF, 0xEB, 0x82)),
QtGui.QBrush(QtGui.QColor(0xEB, 0xFF, 0x82)) ]
self.item_size = QtCore.QSize(28, 28)
def rowCount(self, parent = QtCore.QModelIndex()):
return len(self.items)
def columnCount(self, parent = QtCore.QModelIndex()):
return len(self.headers)
def headerData(self, section, orientation, role = QtCore.Qt.ItemDataRole.DisplayRole):
if orientation != QtCore.Qt.Orientation.Horizontal:
return None
if role == QtCore.Qt.DisplayRole:
return self.headers[section]
return None
def data(self, index, role = QtCore.Qt.UserRole):
if not index.isValid() or index.row() >= len(self.items):
return None
item = self.items[index.row()]
if role == QtCore.Qt.DisplayRole:
if index.column() == 0:
return item.display_name
if index.column() == 1:
return item.language or self.tr("All")
if index.column() == 2:
return item.version or self.tr("N/A")
if index.column() == 3:
return ", ".join(item.authors) if ( item.authors is not None ) else ""
elif role == QtCore.Qt.ForegroundRole:
return self.text_color
elif role == QtCore.Qt.BackgroundRole:
return self.bg_color[ index.row() % 2 ]
elif role == QtCore.Qt.SizeHintRole:
return self.item_size
elif role == QtCore.Qt.UserRole:
return item
elif role == QtCore.Qt.CheckStateRole:
return self.__checkstate_role(item, index.column())
return None
def setData(self, index, value, role):
if not index.isValid():
return False
ret = False
item = self.items[index.row()]
self.dirty = True
if index.column() == 0 and role == QtCore.Qt.CheckStateRole:
item.active = (value == QtCore.Qt.Checked)
ret = True
else:
ret = super(DataPackModel, self).setData(index, value, role)
return ret
def flags(self, index):
if not index.isValid():
return QtCore.Qt.ItemIsDropEnabled
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
if index.column() == 0:
flags |= QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEditable
return flags
def __checkstate_role(self, item, column):
if column == 0:
return QtCore.Qt.Checked if item.active else QtCore.Qt.Unchecked
return None
def add_item(self, item):
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.items.append(item)
self.endInsertRows()
def clean(self):
self.beginResetModel()
self.items = []
self.endResetModel()
class ManageDataPackDlg(QtGui.QDialog):
def __init__(self, dstore, parent = None):
super(ManageDataPackDlg, self).__init__(parent)
self.dstore = dstore
self.build_ui ()
self.load_data()
def build_ui(self):
self.setWindowTitle(self.tr("Data Pack Manager"))
vbox = QtGui.QVBoxLayout(self)
grp = QtGui.QGroupBox (self.tr("Available data packs"))
self.view = QtGui.QTableView (self)
vbox2 = QtGui.QVBoxLayout(grp)
vbox2.addWidget(self.view)
bts = QtGui.QDialogButtonBox()
bts.addButton(self.tr("Discard"), QtGui.QDialogButtonBox.RejectRole)
bts.addButton(self.tr("Save"), QtGui.QDialogButtonBox.AcceptRole)
vbox.addWidget(grp)
vbox.addWidget(bts)
bts.accepted.connect( self.on_accept )
bts.rejected.connect( self.reject )
self.setMinimumSize( QtCore.QSize(440, 330) )
def load_data(self):
from copy import deepcopy
self.packs = deepcopy(self.dstore.packs)
model = DataPackModel(self)
for pack in self.packs:
model.add_item(pack)
self.view.setModel(model)
def on_accept(self):
self.dstore.packs = self.packs
self.accept()
| gpl-3.0 | -2,336,718,140,894,636,500 | 35.420382 | 90 | 0.558658 | false |
dbaxa/django | tests/model_regress/models.py | 281 | 2293 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
# Test models with non-default primary keys / AutoFields #5218
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
# Chained foreign keys with to_field produce incorrect query #18432
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, models.CASCADE, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, models.CASCADE, unique=True, to_field='model1')
| bsd-3-clause | -8,449,453,279,391,274,000 | 25.952941 | 86 | 0.698385 | false |
brianrock/brianrock-ringo | handlers/poll.py | 1 | 7906 | # Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Needed to avoid ambiguity in imports
from __future__ import absolute_import
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api.labs import taskqueue
import logging
import os.path
import yaml
import time
import random
import re
import oauth
import buzz
import web.helper
import models.tokens
import models.board
OAUTH_CONFIG = yaml.load(open('oauth.yaml').read())
OAUTH_CONSUMER_KEY = OAUTH_CONFIG['oauth_consumer_key']
OAUTH_CONSUMER_SECRET = OAUTH_CONFIG['oauth_consumer_secret']
OAUTH_TOKEN_KEY = OAUTH_CONFIG['oauth_token_key']
OAUTH_TOKEN_SECRET = OAUTH_CONFIG['oauth_token_secret']
PRIORITY_PROFILES = yaml.load(open('polling.yaml').read())
BUZZ_BINGO_ID = '103831860836738334913'
class PollHandler(webapp.RequestHandler):
@property
def client(self):
if not hasattr(self, '_client') or not self._client:
access_token = oauth.OAuthToken(OAUTH_TOKEN_KEY, OAUTH_TOKEN_SECRET)
self._client = buzz.Client()
self._client.build_oauth_consumer(
OAUTH_CONSUMER_KEY, OAUTH_CONSUMER_SECRET
)
self._client.oauth_access_token = access_token
self._client.oauth_scopes.append(buzz.FULL_ACCESS_SCOPE)
return self._client
@property
def combined_results(self):
if not hasattr(self, '_combined_results') or not self._combined_results:
self._combined_results = []
try:
# Ignore the Buzz Bingo game itself
for post in self.client.posts(type_id='@consumption'):
if post.actor.id != BUZZ_BINGO_ID:
self._combined_results.append(post)
for post in self.client.search(query="buzzbingo"):
if post.actor.id != BUZZ_BINGO_ID:
self._combined_results.append(post)
except buzz.RetrieveError, e:
logging.warning(str(e))
logging.info('%d posts will be scored.' % len(self._combined_results))
return self._combined_results
def get(self):
cron = False
if self.request.headers.get('X-AppEngine-Cron') == 'true':
cron = True
elif self.request.headers.get('Referer') and \
self.request.headers.get('Referer').find('/_ah/admin/cron') != -1:
cron = True
if cron:
try:
result_task = taskqueue.Task(url='/worker/poll/')
result_task.add()
logging.info('Polling task enqueued...')
except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError), e:
logging.error(str(e))
result_task = None
template_values = {
'http_get': True,
'message': None
}
path = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'poll.html'
)
self.response.out.write(template.render(path, template_values))
def scan_post(self, post_id):
logging.info('Scanning post: %s' % post_id)
topics_found = set([])
players = set([])
nonexistent_players = set([])
ignored_players = set([])
scoring_players = set([])
post = self.client.post(post_id).data
if post.actor.id == BUZZ_BINGO_ID:
return None
post_uri = post.uri
comments = post.comments()
retrieved_comments = []
post_content = post.content.lower()
post_content = re.sub('<br />|\\r|\\n', ' ', post_content)
# Avoid false positive
post_content = re.sub('buzz ?bingo', 'BUZZBINGO', post_content)
if post_content.find('BUZZBINGO') != -1:
players.add(post.actor.id)
for topic in models.board.TOPIC_LIST:
if post_content.find(topic.lower()) != -1:
topics_found.add(topic)
if post_content.find('taco'.lower()) != -1:
topics_found.add('taco')
for comment in comments:
# Need to avoid making unnecessary HTTP requests
retrieved_comments.append(comment)
comment_content = comment.content.lower()
comment_content = re.sub('<br />|\\r|\\n', ' ', comment_content)
# Avoid false positive
comment_content = re.sub('buzz ?bingo', 'BUZZBINGO', comment_content)
if comment_content.find('BUZZBINGO') != -1:
players.add(comment.actor.id)
for topic in models.board.TOPIC_LIST:
if comment_content.find(topic.lower()) != -1:
topics_found.add(topic)
if comment_content.find('taco'.lower()) != -1:
topics_found.add('taco')
for player_id in players:
player = models.player.Player.get_by_key_name(player_id)
if player:
intersection = [
topic for topic in player.topics if topic in topics_found
]
if player.has_post_scored(post_id):
logging.info("Player already scored this.")
# Sometimes a bingo gets missed by retrying a transaction
db.run_in_transaction(player.verify_bingo)
elif intersection:
scoring_players.add(player)
scoring_topic = random.choice(intersection)
db.run_in_transaction(
player.score_post, post, scoring_topic
)
# Can't be run in the transaction, hopefully there won't be
# any nasty race conditions
player.award_leader_badge()
else:
ignored_players.add(player)
else:
nonexistent_players.add(player_id)
# Lots of logging, because this turns out to be tricky to get right.
topics_log_message = 'Topics found:\n'
for topic in topics_found:
topics_log_message += topic + '\n'
logging.info(topics_log_message)
scoring_log_message = 'Players scoring:\n'
for player in scoring_players:
scoring_log_message += '%s\n' % repr(player)
logging.info(scoring_log_message)
ignored_log_message = 'Players ignored and not scoring:\n'
for player in ignored_players:
ignored_log_message += '%s\n' % repr(player)
logging.info(ignored_log_message)
nonexistent_log_message = 'Players who might score if they signed up:\n'
for player_id in nonexistent_players:
nonexistent_log_message += '%s\n' % player_id
logging.info(nonexistent_log_message)
def post(self):
post_id = self.request.get('post_id')
message = ''
if post_id:
self.scan_post(post_id)
else:
for result in self.combined_results:
try:
if result.actor.profile_name in PRIORITY_PROFILES:
# Give priority access to profiles used in any demo.
countdown = 0
logging.info('Priority scan: %s' % result.id)
else:
# One second delay for everyone else, which should be fine.
countdown = 1
result_task = taskqueue.Task(
name="%s-%d" % (result.id[25:], int(time.time())),
params={
'post_id': result.id
},
url='/worker/poll/',
countdown=countdown
)
result_task.add()
logging.info('Scanning task enqueued: %s' % result.id)
except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError), e:
logging.error(str(e))
result_task = None
message = 'Retrieved %d posts.' % len(self.combined_results)
template_values = {
'http_get': False,
'message': message
}
path = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'poll.html'
)
self.response.out.write(template.render(path, template_values))
| apache-2.0 | -2,346,677,965,826,823,000 | 35.266055 | 84 | 0.641159 | false |
crowdata/crowdata | crowdataapp/migrations/0020_auto__add_field_documentset_header_image.py | 2 | 12999 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DocumentSet.header_image'
db.add_column(u'crowdataapp_documentset', 'header_image',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DocumentSet.header_image'
db.delete_column(u'crowdataapp_documentset', 'header_image')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'crowdataapp.document': {
'Meta': {'object_name': 'Document'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['crowdataapp.DocumentSet']"}),
'entries_threshold_override': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': "'512'"}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'crowdataapp.documentset': {
'Meta': {'object_name': 'DocumentSet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entries_threshold': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'head_html': ('django.db.models.fields.TextField', [], {'default': '\'<!-- <script> or <link rel="stylesheet"> tags go here -->\'', 'null': 'True'}),
'header_image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'template_function': ('django.db.models.fields.TextField', [], {'default': "'// Javascript function to insert the document into the DOM.\\n// Receives the URL of the document as its only parameter.\\n// Must be called insertDocument\\n// JQuery is available\\n// resulting element should be inserted into div#document-viewer-container\\nfunction insertDocument(document_url) {\\n}\\n'"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetfieldentry': {
'Meta': {'object_name': 'DocumentSetFieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetFormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'crowdataapp.documentsetform': {
'Meta': {'object_name': 'DocumentSetForm'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'form'", 'unique': 'True', 'to': u"orm['crowdataapp.DocumentSet']"}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'crowdataapp.documentsetformentry': {
'Meta': {'object_name': 'DocumentSetFormEntry'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'form_entries'", 'null': 'True', 'to': u"orm['crowdataapp.Document']"}),
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetformfield': {
'Meta': {'object_name': 'DocumentSetFormField'},
'autocomplete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'field_type': ('django.db.models.fields.IntegerField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'verify': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'crowdataapp.documentsetrankingdefinition': {
'Meta': {'object_name': 'DocumentSetRankingDefinition'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rankings'", 'to': u"orm['crowdataapp.DocumentSet']"}),
'grouping_function': ('django.db.models.fields.CharField', [], {'default': "'SUM'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_field': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'label_fields'", 'to': u"orm['crowdataapp.DocumentSetFormField']"}),
'magnitude_field': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'magnitude_fields'", 'null': 'True', 'to': u"orm['crowdataapp.DocumentSetFormField']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'sort_order': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'crowdataapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'show_in_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['crowdataapp'] | mit | 2,916,883,732,037,892,600 | 80.25 | 400 | 0.562889 | false |
scenarios/tensorflow | tensorflow/python/ops/logging_ops.py | 10 | 13502 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and Summary Operations."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_logging_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_logging_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.deprecation import deprecated
# The python wrapper for Assert is in control_flow_ops, as the Assert
# call relies on certain conditionals for its dependencies. Use
# control_flow_ops.Assert.
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Print(input_, data, message=None, first_n=None, summarize=None,
name=None):
"""Prints a list of tensors.
This is an identity op with the side effect of printing `data` when
evaluating.
Args:
input_: A tensor passed through this op.
data: A list of tensors to print out when op is evaluated.
message: A string, prefix of the error message.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: A name for the operation (optional).
Returns:
Same tensor as `input_`.
"""
return gen_logging_ops._print(input_, data, message, first_n, summarize, name)
@ops.RegisterGradient("Print")
def _PrintGrad(op, *grad):
return list(grad) + [None] * (len(op.inputs) - 1)
def _Collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
ops.add_to_collection(key, val)
def histogram_summary(tag, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
This ops is deprecated. Please switch to tf.summary.histogram.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
val = gen_logging_ops._histogram_summary(
tag=tag, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.image. Note that "
"tf.summary.image uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, the max_images "
"argument was renamed to max_outputs.")
def image_summary(tag, tensor, max_images=3, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with images.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
* If `max_images` is greater than 1, the summary value tags are
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
val = gen_logging_ops._image_summary(
tag=tag, tensor=tensor, max_images=max_images, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.audio. Note that "
"tf.summary.audio uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def audio_summary(tag,
tensor,
sample_rate,
max_outputs=3,
collections=None,
name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
This op is deprecated. Please switch to tf.summary.audio.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
sample_rate = ops.convert_to_tensor(sample_rate, dtype=dtypes.float32,
name="sample_rate")
val = gen_logging_ops._audio_summary_v2(tag=tag,
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge.")
def merge_summary(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op is deprecated. Please switch to tf.summary.merge, which has identical
behavior.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
val = gen_logging_ops._merge_summary(inputs=inputs, name=name)
_Collect(val, collections, [])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.")
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
This op is deprecated. Please switch to tf.summary.merge_all, which has
identical behavior.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
if not summary_ops:
return None
else:
return merge_summary(summary_ops)
def get_summary_op():
"""Returns a single Summary op that would run all summaries.
Either existing one from `SUMMARY_OP` collection or merges all existing
summaries.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is not None:
if summary_op:
summary_op = summary_op[0]
else:
summary_op = None
if summary_op is None:
summary_op = merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
return summary_op
def scalar_summary(tags, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with scalar values.
This ops is deprecated. Please switch to tf.summary.scalar.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The input `tags` and `values` must have the same shape. The generated
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
val = gen_logging_ops._scalar_summary(tags=tags, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
ops.NotDifferentiable("HistogramAccumulatorSummary")
ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
| apache-2.0 | 5,820,675,684,226,990,000 | 38.595308 | 99 | 0.696712 | false |
lopopolo/hyperbola | hyperbola/urls.py | 1 | 1226 | """
hyperbola URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.urls import include, path
from .blog import urls as blog
from .contact import urls as contact
from .core.views import not_found
from .frontpage import urls as frontpage
from .lifestream import urls as lifestream
from .shortlinks import urls as shortlinks
urlpatterns = [
path("", include(frontpage)),
path("w/", include(blog)),
path("contact/", include(contact)),
path("lifestream/", include(lifestream)),
path("s/", include(shortlinks)),
path("404.html", not_found),
] + settings.ENVIRONMENT.additional_urls
| mit | 6,656,134,100,177,578,000 | 34.028571 | 79 | 0.711256 | false |
SergeyPirogov/selene | tests/integration/inner_selement_waiting_search_on_actions_like_click_test.py | 1 | 4742 | import pytest
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selene import config
from selene.common.none_object import NoneObject
from selene.driver import SeleneDriver
from tests.acceptance.helpers.helper import get_test_driver
from tests.integration.helpers.givenpage import GivenPage
__author__ = 'yashaka'
driver = NoneObject('driver') # type: SeleneDriver
GIVEN_PAGE = NoneObject('GivenPage') # type: GivenPage
WHEN = GIVEN_PAGE # type: GivenPage
original_timeout = config.timeout
def setup_module(m):
global driver
driver = SeleneDriver.wrap(get_test_driver())
global GIVEN_PAGE
GIVEN_PAGE = GivenPage(driver)
global WHEN
WHEN = GIVEN_PAGE
def teardown_module(m):
driver.quit()
def setup_function(fn):
global original_timeout
def test_waits_for_inner_visibility():
GIVEN_PAGE\
.opened_with_body(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''')\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
250)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_for_inner_presence_in_dom_and_visibility():
GIVEN_PAGE.opened_with_body(
'''
<p>
<h2 id="second">Heading 2</h2>
</p>''')
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
250)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_first_for_inner_presence_in_dom_then_visibility():
GIVEN_PAGE.opened_with_body(
'''
<p>
<h2 id="second">Heading 2</h2>
</p>''')
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
250)\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
500)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_first_for_parent_in_dom_then_inner_in_dom_then_visibility():
GIVEN_PAGE.opened_empty()
WHEN.load_body_with_timeout(
'''
<p>
<h2 id="second">Heading 2</h2>
</p>''',
250)
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
500)\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
750)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
def test_waits_first_for_parent_in_dom_then_visible_then_inner_in_dom_then_visibility():
GIVEN_PAGE.opened_empty()
WHEN.load_body_with_timeout(
'''
<p style="display:none">
<h2 id="second">Heading 2</h2>
</p>''',
250)\
.execute_script_with_timeout(
'document.getElementsByTagName("p")[0].style = "display:block";',
500)
WHEN.load_body_with_timeout(
'''
<p>
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>
</p>''',
750)\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
1000)
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is True
# todo: there should be each such test method for each "passing" test from above...
def test_fails_on_timeout_during_waiting_for_inner_visibility():
config.timeout = 0.25
GIVEN_PAGE\
.opened_with_body(
'''
<p>
<a href='#second' style='display:none'>go to Heading 2</a>
<h2 id='second'>Heading 2</h2>
</p>''')\
.execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";',
500)
with pytest.raises(TimeoutException):
driver.element('p').element('a').click()
assert ('second' in driver.current_url) is False
| mit | -5,820,012,015,551,275,000 | 29.203822 | 88 | 0.551877 | false |
mitre/multiscanner | multiscanner/tests/test_parse_reports.py | 2 | 1531 | # -*- coding: utf-8 -*-
import multiscanner
def test_valid_reports_string():
reportlist = [([('file', 'result')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=False)
assert r == '{"file":{"Test":"result"}}'
def test_valid_reports_python():
reportlist = [([('file', 'result')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=True)
assert r == {"file": {"Test": "result"}}
def test_valid_utf8_string():
reportlist = [([('file', '안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=False)
assert r == u'{"file":{"Test":"안녕하세요"}}'
def test_valid_utf8_python():
reportlist = [([('file', '안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=True)
assert r == {"file": {"Test": "안녕하세요"}}
def test_invalid_utf8_string():
reportlist = [([('file', '\x97안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=False)
assert r == u'{"file":{"Test":"\x97안녕하세요"}}' or r == u'{"file":{"Test":"\ufffd안녕하세요"}}'
def test_invalid_utf8_python():
reportlist = [([('file', '\x97안녕하세요')], {'Name': 'Test', 'Type': 'Test'})]
r = multiscanner.parse_reports(reportlist, python=True)
assert r == {"file": {"Test": "\x97안녕하세요"}} or r == {"file": {"Test": u"\ufffd안녕하세요"}}
| mpl-2.0 | 6,658,379,538,375,173,000 | 36.657895 | 91 | 0.583508 | false |
ddurst/zamboni | manage.py | 17 | 2113 | #!/usr/bin/env python
import logging
import os
import sys
from django.core.management import execute_from_command_line
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
if len(sys.argv) > 1 and sys.argv[1] == 'test':
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings_test'
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkt.settings')
# waffle and mkt form an import cycle because mkt patches waffle and
# waffle loads the user model, so we have to make sure mkt gets
# imported before anything else imports waffle.
import mkt # noqa
import session_csrf # noqa
session_csrf.monkeypatch()
# Fix jinja's Markup class to not crash when localizers give us bad format
# strings.
from jinja2 import Markup # noqa
mod = Markup.__mod__
trans_log = logging.getLogger('z.trans')
# Load this early so that anything else you import will use these log settings.
# Mostly to shut Raven the hell up.
from lib.log_settings_base import log_configure # noqa
log_configure()
def new(self, arg):
try:
return mod(self, arg)
except Exception:
trans_log.error(unicode(self))
return ''
Markup.__mod__ = new
# Import for side-effect: configures our logging handlers.
# pylint: disable-msg=W0611
from lib.utils import update_csp, validate_modules, validate_settings # noqa
update_csp()
validate_modules()
validate_settings()
import django.conf # noqa
newrelic_ini = getattr(django.conf.settings, 'NEWRELIC_INI', None)
load_newrelic = False
# Monkey patches DRF to not use fqdn urls.
from mkt.api.patch import patch # noqa
patch()
if newrelic_ini:
import newrelic.agent # noqa
try:
newrelic.agent.initialize(newrelic_ini)
load_newrelic = True
except:
startup_logger = logging.getLogger('z.startup')
startup_logger.exception('Failed to load new relic config.')
# Alter zamboni to run on a particular port as per the
# marketplace docs, unless overridden.
from django.core.management.commands import runserver # noqa
runserver.DEFAULT_PORT = 2600
if __name__ == '__main__':
execute_from_command_line(sys.argv)
| bsd-3-clause | 7,407,909,184,496,092,000 | 27.945205 | 79 | 0.716517 | false |
rjschwei/WALinuxAgent | azurelinuxagent/common/utils/restutil.py | 1 | 18482 | # Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import re
import threading
import time
import traceback
import socket
import struct
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.textutil as textutil
from azurelinuxagent.common.exception import HttpError, ResourceGoneError, InvalidContainerError
from azurelinuxagent.common.future import httpclient, urlparse, ustr
from azurelinuxagent.common.version import PY_VERSION_MAJOR, AGENT_NAME, GOAL_STATE_AGENT_VERSION
SECURE_WARNING_EMITTED = False
DEFAULT_RETRIES = 6
DELAY_IN_SECONDS = 1
THROTTLE_RETRIES = 25
THROTTLE_DELAY_IN_SECONDS = 1
REDACTED_TEXT = "<SAS_SIGNATURE>"
SAS_TOKEN_RETRIEVAL_REGEX = re.compile(r'^(https?://[a-zA-Z0-9.].*sig=)([a-zA-Z0-9%-]*)(.*)$')
RETRY_CODES = [
httpclient.RESET_CONTENT,
httpclient.PARTIAL_CONTENT,
httpclient.FORBIDDEN,
httpclient.INTERNAL_SERVER_ERROR,
httpclient.NOT_IMPLEMENTED,
httpclient.BAD_GATEWAY,
httpclient.SERVICE_UNAVAILABLE,
httpclient.GATEWAY_TIMEOUT,
httpclient.INSUFFICIENT_STORAGE,
429, # Request Rate Limit Exceeded
]
RESOURCE_GONE_CODES = [
httpclient.GONE
]
OK_CODES = [
httpclient.OK,
httpclient.CREATED,
httpclient.ACCEPTED
]
NOT_MODIFIED_CODES = [
httpclient.NOT_MODIFIED
]
HOSTPLUGIN_UPSTREAM_FAILURE_CODES = [
502
]
THROTTLE_CODES = [
httpclient.FORBIDDEN,
httpclient.SERVICE_UNAVAILABLE,
429, # Request Rate Limit Exceeded
]
RETRY_EXCEPTIONS = [
httpclient.NotConnected,
httpclient.IncompleteRead,
httpclient.ImproperConnectionState,
httpclient.BadStatusLine
]
# http://www.gnu.org/software/wget/manual/html_node/Proxies.html
HTTP_PROXY_ENV = "http_proxy"
HTTPS_PROXY_ENV = "https_proxy"
NO_PROXY_ENV = "no_proxy"
HTTP_USER_AGENT = "{0}/{1}".format(AGENT_NAME, GOAL_STATE_AGENT_VERSION)
HTTP_USER_AGENT_HEALTH = "{0}+health".format(HTTP_USER_AGENT)
INVALID_CONTAINER_CONFIGURATION = "InvalidContainerConfiguration"
REQUEST_ROLE_CONFIG_FILE_NOT_FOUND = "RequestRoleConfigFileNotFound"
KNOWN_WIRESERVER_IP = '168.63.129.16'
HOST_PLUGIN_PORT = 32526
class IOErrorCounter(object):
_lock = threading.RLock()
_protocol_endpoint = KNOWN_WIRESERVER_IP
_counts = {"hostplugin":0, "protocol":0, "other":0}
@staticmethod
def increment(host=None, port=None):
with IOErrorCounter._lock:
if host == IOErrorCounter._protocol_endpoint:
if port == HOST_PLUGIN_PORT:
IOErrorCounter._counts["hostplugin"] += 1
else:
IOErrorCounter._counts["protocol"] += 1
else:
IOErrorCounter._counts["other"] += 1
@staticmethod
def get_and_reset():
with IOErrorCounter._lock:
counts = IOErrorCounter._counts.copy()
IOErrorCounter.reset()
return counts
@staticmethod
def reset():
with IOErrorCounter._lock:
IOErrorCounter._counts = {"hostplugin":0, "protocol":0, "other":0}
@staticmethod
def set_protocol_endpoint(endpoint=KNOWN_WIRESERVER_IP):
IOErrorCounter._protocol_endpoint = endpoint
def _compute_delay(retry_attempt=1, delay=DELAY_IN_SECONDS):
fib = (1, 1)
for n in range(retry_attempt):
fib = (fib[1], fib[0]+fib[1])
return delay*fib[1]
def _is_retry_status(status, retry_codes=RETRY_CODES):
return status in retry_codes
def _is_retry_exception(e):
return len([x for x in RETRY_EXCEPTIONS if isinstance(e, x)]) > 0
def _is_throttle_status(status):
return status in THROTTLE_CODES
def _parse_url(url):
"""
Parse URL to get the components of the URL broken down to host, port
:rtype: string, int, bool, string
"""
o = urlparse(url)
rel_uri = o.path
if o.fragment:
rel_uri = "{0}#{1}".format(rel_uri, o.fragment)
if o.query:
rel_uri = "{0}?{1}".format(rel_uri, o.query)
secure = False
if o.scheme.lower() == "https":
secure = True
return o.hostname, o.port, secure, rel_uri
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def get_no_proxy():
no_proxy = os.environ.get(NO_PROXY_ENV) or os.environ.get(NO_PROXY_ENV.upper())
if no_proxy:
no_proxy = [host for host in no_proxy.replace(' ', '').split(',') if host]
# no_proxy in the proxies argument takes precedence
return no_proxy
def bypass_proxy(host):
no_proxy = get_no_proxy()
if no_proxy:
if is_ipv4_address(host):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(host, proxy_ip):
return True
elif host == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for proxy_domain in no_proxy:
if host.lower().endswith(proxy_domain.lower()):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
return False
def _get_http_proxy(secure=False):
# Prefer the configuration settings over environment variables
host = conf.get_httpproxy_host()
port = None
if not host is None:
port = conf.get_httpproxy_port()
else:
http_proxy_env = HTTPS_PROXY_ENV if secure else HTTP_PROXY_ENV
http_proxy_url = None
for v in [http_proxy_env, http_proxy_env.upper()]:
if v in os.environ:
http_proxy_url = os.environ[v]
break
if not http_proxy_url is None:
host, port, _, _ = _parse_url(http_proxy_url)
return host, port
def redact_sas_tokens_in_urls(url):
return SAS_TOKEN_RETRIEVAL_REGEX.sub(r"\1" + REDACTED_TEXT + r"\3", url)
def _http_request(method, host, rel_uri, port=None, data=None, secure=False,
headers=None, proxy_host=None, proxy_port=None):
headers = {} if headers is None else headers
headers['Connection'] = 'close'
use_proxy = proxy_host is not None and proxy_port is not None
if port is None:
port = 443 if secure else 80
if 'User-Agent' not in headers:
headers['User-Agent'] = HTTP_USER_AGENT
if use_proxy:
conn_host, conn_port = proxy_host, proxy_port
scheme = "https" if secure else "http"
url = "{0}://{1}:{2}{3}".format(scheme, host, port, rel_uri)
else:
conn_host, conn_port = host, port
url = rel_uri
if secure:
conn = httpclient.HTTPSConnection(conn_host,
conn_port,
timeout=10)
if use_proxy:
conn.set_tunnel(host, port)
else:
conn = httpclient.HTTPConnection(conn_host,
conn_port,
timeout=10)
logger.verbose("HTTP connection [{0}] [{1}] [{2}] [{3}]",
method,
redact_sas_tokens_in_urls(url),
data,
headers)
conn.request(method=method, url=url, body=data, headers=headers)
return conn.getresponse()
def http_request(method,
url, data, headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
global SECURE_WARNING_EMITTED
host, port, secure, rel_uri = _parse_url(url)
# Use the HTTP(S) proxy
proxy_host, proxy_port = (None, None)
if use_proxy and not bypass_proxy(host):
proxy_host, proxy_port = _get_http_proxy(secure=secure)
if proxy_host or proxy_port:
logger.verbose("HTTP proxy: [{0}:{1}]", proxy_host, proxy_port)
# If httplib module is not built with ssl support,
# fallback to HTTP if allowed
if secure and not hasattr(httpclient, "HTTPSConnection"):
if not conf.get_allow_http():
raise HttpError("HTTPS is unavailable and required")
secure = False
if not SECURE_WARNING_EMITTED:
logger.warn("Python does not include SSL support")
SECURE_WARNING_EMITTED = True
# If httplib module doesn't support HTTPS tunnelling,
# fallback to HTTP if allowed
if secure and \
proxy_host is not None and \
proxy_port is not None \
and not hasattr(httpclient.HTTPSConnection, "set_tunnel"):
if not conf.get_allow_http():
raise HttpError("HTTPS tunnelling is unavailable and required")
secure = False
if not SECURE_WARNING_EMITTED:
logger.warn("Python does not support HTTPS tunnelling")
SECURE_WARNING_EMITTED = True
msg = ''
attempt = 0
delay = 0
was_throttled = False
while attempt < max_retry:
if attempt > 0:
# Compute the request delay
# -- Use a fixed delay if the server ever rate-throttles the request
# (with a safe, minimum number of retry attempts)
# -- Otherwise, compute a delay that is the product of the next
# item in the Fibonacci series and the initial delay value
delay = THROTTLE_DELAY_IN_SECONDS \
if was_throttled \
else _compute_delay(retry_attempt=attempt,
delay=retry_delay)
logger.verbose("[HTTP Retry] "
"Attempt {0} of {1} will delay {2} seconds: {3}",
attempt+1,
max_retry,
delay,
msg)
time.sleep(delay)
attempt += 1
try:
resp = _http_request(method,
host,
rel_uri,
port=port,
data=data,
secure=secure,
headers=headers,
proxy_host=proxy_host,
proxy_port=proxy_port)
logger.verbose("[HTTP Response] Status Code {0}", resp.status)
if request_failed(resp):
if _is_retry_status(resp.status, retry_codes=retry_codes):
msg = '[HTTP Retry] {0} {1} -- Status Code {2}'.format(method, url, resp.status)
# Note if throttled and ensure a safe, minimum number of
# retry attempts
if _is_throttle_status(resp.status):
was_throttled = True
max_retry = max(max_retry, THROTTLE_RETRIES)
continue
# If we got a 410 (resource gone) for any reason, raise an exception. The caller will handle it by
# forcing a goal state refresh and retrying the call.
if resp.status in RESOURCE_GONE_CODES:
response_error = read_response_error(resp)
raise ResourceGoneError(response_error)
# If we got a 400 (bad request) because the container id is invalid, it could indicate a stale goal
# state. The caller will handle this exception by forcing a goal state refresh and retrying the call.
if resp.status == httpclient.BAD_REQUEST:
response_error = read_response_error(resp)
if INVALID_CONTAINER_CONFIGURATION in response_error:
raise InvalidContainerError(response_error)
return resp
except httpclient.HTTPException as e:
clean_url = redact_sas_tokens_in_urls(url)
msg = '[HTTP Failed] {0} {1} -- HttpException {2}'.format(method, clean_url, e)
if _is_retry_exception(e):
continue
break
except IOError as e:
IOErrorCounter.increment(host=host, port=port)
clean_url = redact_sas_tokens_in_urls(url)
msg = '[HTTP Failed] {0} {1} -- IOError {2}'.format(method, clean_url, e)
continue
raise HttpError("{0} -- {1} attempts made".format(msg, attempt))
def http_get(url,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("GET",
url, None, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_head(url,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("HEAD",
url, None, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_post(url,
data,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("POST",
url, data, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_put(url,
data,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("PUT",
url, data, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def http_delete(url,
headers=None,
use_proxy=False,
max_retry=DEFAULT_RETRIES,
retry_codes=RETRY_CODES,
retry_delay=DELAY_IN_SECONDS):
return http_request("DELETE",
url, None, headers=headers,
use_proxy=use_proxy,
max_retry=max_retry,
retry_codes=retry_codes,
retry_delay=retry_delay)
def request_failed(resp, ok_codes=OK_CODES):
return not request_succeeded(resp, ok_codes=ok_codes)
def request_succeeded(resp, ok_codes=OK_CODES):
return resp is not None and resp.status in ok_codes
def request_not_modified(resp):
return resp is not None and resp.status in NOT_MODIFIED_CODES
def request_failed_at_hostplugin(resp, upstream_failure_codes=HOSTPLUGIN_UPSTREAM_FAILURE_CODES):
"""
Host plugin will return 502 for any upstream issue, so a failure is any 5xx except 502
"""
return resp is not None and resp.status >= 500 and resp.status not in upstream_failure_codes
def read_response_error(resp):
result = ''
if resp is not None:
try:
result = "[HTTP Failed] [{0}: {1}] {2}".format(
resp.status,
resp.reason,
resp.read())
# this result string is passed upstream to several methods
# which do a raise HttpError() or a format() of some kind;
# as a result it cannot have any unicode characters
if PY_VERSION_MAJOR < 3:
result = ustr(result, encoding='ascii', errors='ignore')
else:
result = result\
.encode(encoding='ascii', errors='ignore')\
.decode(encoding='ascii', errors='ignore')
result = textutil.replace_non_ascii(result)
except Exception:
logger.warn(traceback.format_exc())
return result
| apache-2.0 | -7,687,376,563,869,384,000 | 31.031196 | 113 | 0.573964 | false |
gregdek/ansible | lib/ansible/modules/packaging/os/apk.py | 82 | 11119 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <[email protected]>)
# and apt (Matthew Williams <[email protected]>) modules.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
author: "Kevin Brebanov (@kbrebanov)"
version_added: "2.0"
options:
available:
description:
- During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
if the currently installed package is no longer available from any repository.
type: bool
default: 'no'
version_added: "2.4"
name:
description:
- A package name, like C(foo), or multiple packages, like C(foo, bar).
repository:
description:
- A package repository or multiple repositories.
Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
version_added: "2.4"
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
type: bool
default: 'no'
upgrade:
description:
- Upgrade all installed packages to their latest version.
type: bool
default: 'no'
notes:
- '"name" and "upgrade" are mutually exclusive.'
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
# Update repositories and install "foo" package
- apk:
name: foo
update_cache: yes
# Update repositories and install "foo" and "bar" packages
- apk:
name: foo,bar
update_cache: yes
# Remove "foo" package
- apk:
name: foo
state: absent
# Remove "foo" and "bar" packages
- apk:
name: foo,bar
state: absent
# Install the package "foo"
- apk:
name: foo
state: present
# Install the packages "foo" and "bar"
- apk:
name: foo,bar
state: present
# Update repositories and update package "foo" to latest version
- apk:
name: foo
state: latest
update_cache: yes
# Update repositories and update packages "foo" and "bar" to latest versions
- apk:
name: foo,bar
state: latest
update_cache: yes
# Update all installed packages to the latest versions
- apk:
upgrade: yes
# Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
- apk:
available: yes
upgrade: yes
# Update repositories as a separate step
- apk:
update_cache: yes
# Install package from a specific repository
- apk:
name: foo
state: latest
update_cache: yes
repository: http://dl-3.alpinelinux.org/alpine/edge/main
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when packages have changed
type: list
sample: ['package', 'other-package']
'''
import re
# Import module snippets.
from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data:
p = regex.search(l)
if p:
packages.append(p.group(1))
return packages
def update_package_db(module, exit):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
elif exit:
module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
else:
return True
def query_toplevel(module, name):
# /etc/apk/world contains a list of top-level packages separated by ' ' or \n
# packages may contain repository (@) or version (=<>~) separator characters or start with negation !
regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
with open('/etc/apk/world') as f:
content = f.read().split()
for p in content:
if regex.search(p):
return True
return False
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"^%s: virtual meta package" % (re.escape(name))
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module, available):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
if available:
cmd = "%s --available" % cmd
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
if re.search(r'^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
def install_packages(module, names, state):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_toplevel(module, name):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install + to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
# Check to see if packages are still present because of dependencies
for name in installed:
if query_package(module, name):
rc = 1
break
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name=dict(type='list'),
repository=dict(type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
available=dict(default='no', type='bool'),
),
required_one_of=[['name', 'update_cache', 'upgrade']],
mutually_exclusive=[['name', 'upgrade']],
supports_check_mode=True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# add repositories to the APK_PATH
if p['repository']:
for r in p['repository']:
APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module, not p['name'] and not p['upgrade'])
if p['upgrade']:
upgrade_packages(module, p['available'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
if __name__ == '__main__':
main()
| gpl-3.0 | 7,164,721,391,530,359,000 | 31.322674 | 147 | 0.624786 | false |
blaze/dask | dask/base.py | 1 | 37839 | from collections import OrderedDict
from collections.abc import Mapping, Iterator
from contextlib import contextmanager
from functools import partial
from hashlib import md5
from operator import getitem
import inspect
import pickle
import os
import threading
import uuid
from distutils.version import LooseVersion
from tlz import merge, groupby, curry, identity
from tlz.functoolz import Compose
from .compatibility import is_dataclass, dataclass_fields
from .context import thread_state
from .core import flatten, quote, get as simple_get, literal
from .hashing import hash_buffer_hex
from .utils import Dispatch, ensure_dict, apply
from . import config, local, threaded
__all__ = (
"DaskMethodsMixin",
"annotate",
"is_dask_collection",
"compute",
"persist",
"optimize",
"visualize",
"tokenize",
"normalize_token",
)
@contextmanager
def annotate(**annotations):
"""Context Manager for setting HighLevelGraph Layer annotations.
Annotations are metadata or soft constraints associated with
tasks that dask schedulers may choose to respect: They signal intent
without enforcing hard constraints. As such, they are
primarily designed for use with the distributed scheduler.
Almost any object can serve as an annotation, but small Python objects
are preferred, while large objects such as NumPy arrays are discouraged.
Callables supplied as an annotation should take a single *key* argument and
produce the appropriate annotation. Individual task keys in the annotated collection
are supplied to the callable.
Parameters
----------
**annotations : key-value pairs
Examples
--------
All tasks within array A should have priority 100 and be retried 3 times
on failure.
>>> import dask
>>> import dask.array as da
>>> with dask.annotate(priority=100, retries=3):
... A = da.ones((10000, 10000))
Prioritise tasks within Array A on flattened block ID.
>>> nblocks = (10, 10)
>>> with dask.annotate(priority=lambda k: k[1]*nblocks[1] + k[2]):
... A = da.ones((1000, 1000), chunks=(100, 100))
Annotations may be nested.
>>> with dask.annotate(priority=1):
... with dask.annotate(retries=3):
... A = da.ones((1000, 1000))
... B = A + 1
"""
prev_annotations = config.get("annotations", {})
new_annotations = {
**prev_annotations,
**{f"annotations.{k}": v for k, v in annotations.items()},
}
with config.set(new_annotations):
yield
def is_dask_collection(x):
"""Returns ``True`` if ``x`` is a dask collection"""
try:
return x.__dask_graph__() is not None
except (AttributeError, TypeError):
return False
class DaskMethodsMixin(object):
"""A mixin adding standard dask collection methods"""
__slots__ = ()
def visualize(self, filename="mydask", format=None, optimize_graph=False, **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color: {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
return visualize(
self,
filename=filename,
format=format,
optimize_graph=optimize_graph,
**kwargs,
)
def persist(self, **kwargs):
"""Persist this dask collection into memory
This turns a lazy Dask collection into a Dask collection with the same
metadata, but now with the results fully computed or actively computing
in the background.
The action of function differs significantly depending on the active
task scheduler. If the task scheduler supports asynchronous computing,
such as is the case of the dask.distributed scheduler, then persist
will return *immediately* and the return value's task graph will
contain Dask Future objects. However if the task scheduler only
supports blocking computation then the call to persist will *block*
and the return value's task graph will contain concrete Python results.
This function is particularly useful when using distributed systems,
because the results will be kept in distributed memory, rather than
returned to the local process as with compute.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
See Also
--------
dask.base.persist
"""
(result,) = persist(self, traverse=False, **kwargs)
return result
def compute(self, **kwargs):
"""Compute this dask collection
This turns a lazy Dask collection into its in-memory equivalent.
For example a Dask array turns into a NumPy array and a Dask dataframe
turns into a Pandas dataframe. The entire dataset must fit into memory
before calling this operation.
Parameters
----------
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
See Also
--------
dask.base.compute
"""
(result,) = compute(self, traverse=False, **kwargs)
return result
def __await__(self):
try:
from distributed import wait, futures_of
except ImportError as e:
raise ImportError(
"Using async/await with dask requires the `distributed` package"
) from e
from tornado import gen
@gen.coroutine
def f():
if futures_of(self):
yield wait(self)
raise gen.Return(self)
return f().__await__()
def compute_as_if_collection(cls, dsk, keys, scheduler=None, get=None, **kwargs):
"""Compute a graph as if it were of type cls.
Allows for applying the same optimizations and default scheduler."""
schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)
dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs)
return schedule(dsk2, keys, **kwargs)
def dont_optimize(dsk, keys, **kwargs):
return dsk
def optimization_function(x):
return getattr(x, "__dask_optimize__", dont_optimize)
def collections_to_dsk(collections, optimize_graph=True, **kwargs):
"""
Convert many collections into a single dask graph, after optimization
"""
from .highlevelgraph import HighLevelGraph
optimizations = kwargs.pop("optimizations", None) or config.get("optimizations", [])
if optimize_graph:
groups = groupby(optimization_function, collections)
_opt_list = []
for opt, val in groups.items():
dsk, keys = _extract_graph_and_keys(val)
groups[opt] = (dsk, keys)
_opt = opt(dsk, keys, **kwargs)
_opt_list.append(_opt)
for opt in optimizations:
_opt_list = []
group = {}
for k, (dsk, keys) in groups.items():
_opt = opt(dsk, keys, **kwargs)
group[k] = (_opt, keys)
_opt_list.append(_opt)
groups = group
# Merge all graphs
if any(isinstance(graph, HighLevelGraph) for graph in _opt_list):
dsk = HighLevelGraph.merge(*_opt_list)
else:
dsk = merge(*map(ensure_dict, _opt_list))
else:
dsk, _ = _extract_graph_and_keys(collections)
return dsk
def _extract_graph_and_keys(vals):
"""Given a list of dask vals, return a single graph and a list of keys such
that ``get(dsk, keys)`` is equivalent to ``[v.compute() for v in vals]``."""
from .highlevelgraph import HighLevelGraph
graphs, keys = [], []
for v in vals:
graphs.append(v.__dask_graph__())
keys.append(v.__dask_keys__())
if any(isinstance(graph, HighLevelGraph) for graph in graphs):
graph = HighLevelGraph.merge(*graphs)
else:
graph = merge(*map(ensure_dict, graphs))
return graph, keys
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop("traverse", True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif is_dataclass(expr) and not isinstance(expr, type):
tsk = (
apply,
typ,
(),
(
dict,
[
[f.name, _unpack(getattr(expr, f.name))]
for f in dataclass_fields(expr)
],
),
)
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
def optimize(*args, **kwargs):
"""Optimize several dask collections at once.
Returns equivalent dask collections that all share the same merged and
optimized underlying graph. This can be useful if converting multiple
collections to delayed objects, or to manually apply the optimizations at
strategic points.
Note that in most cases you shouldn't need to call this method directly.
Parameters
----------
*args : objects
Any number of objects. If a dask object, its graph is optimized and
merged with all those of all other dask objects before returning an
equivalent dask collection. Non-dask arguments are passed through
unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``optimize``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimizations : list of callables, optional
Additional optimization passes to perform.
**kwargs
Extra keyword arguments to forward to the optimization passes.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> a2, b2 = d.optimize(a, b)
>>> a2.compute() == a.compute()
True
>>> b2.compute() == b.compute()
True
"""
collections, repack = unpack_collections(*args, **kwargs)
if not collections:
return args
dsk = collections_to_dsk(collections, **kwargs)
postpersists = []
for a in collections:
r, s = a.__dask_postpersist__()
postpersists.append(r(dsk, *s))
return repack(postpersists)
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If it is a dask object, it's computed and the
result is returned. By default, python builtin collections are also
traversed to look for dask objects (for more information see the
``traverse`` keyword). Non-dask arguments are passed through unchanged.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``compute``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler function.
Examples
--------
>>> import dask as d
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> d.compute(a, b)
(45, 4.5)
By default, dask objects inside python collections will also be computed:
>>> d.compute({'a': a, 'b': b, 'c': 1})
({'a': 45, 'b': 4.5, 'c': 1},)
"""
traverse = kwargs.pop("traverse", True)
optimize_graph = kwargs.pop("optimize_graph", True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(
scheduler=kwargs.pop("scheduler", None),
collections=collections,
get=kwargs.pop("get", None),
)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postcomputes = [], []
for x in collections:
keys.append(x.__dask_keys__())
postcomputes.append(x.__dask_postcompute__())
results = schedule(dsk, keys, **kwargs)
return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
color : {None, 'order'}, optional
Options to color nodes. Provide ``cmap=`` keyword for additional
colormap
collapse_outputs : bool, optional
Whether to collapse output boxes, which often have empty labels.
Default is False.
verbose : bool, optional
Whether to label output and input boxes even if the data aren't chunked.
Beware: these labels can get very long. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Examples
--------
>>> x.visualize(filename='dask.pdf') # doctest: +SKIP
>>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See Also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
https://docs.dask.org/en/latest/optimize.html
"""
from dask.dot import dot_graph
filename = kwargs.pop("filename", "mydask")
optimize_graph = kwargs.pop("optimize_graph", False)
dsks = []
args3 = []
for arg in args:
if isinstance(arg, (list, tuple, set)):
for a in arg:
if isinstance(a, Mapping):
dsks.append(a)
if is_dask_collection(a):
args3.append(a)
else:
if isinstance(arg, Mapping):
dsks.append(arg)
if is_dask_collection(arg):
args3.append(arg)
dsk = dict(collections_to_dsk(args3, optimize_graph=optimize_graph))
for d in dsks:
dsk.update(d)
color = kwargs.get("color")
if color == "order":
from .order import order
import matplotlib.pyplot as plt
o = order(dsk)
try:
cmap = kwargs.pop("cmap")
except KeyError:
cmap = plt.cm.RdBu
if isinstance(cmap, str):
import matplotlib.pyplot as plt
cmap = getattr(plt.cm, cmap)
mx = max(o.values()) + 1
colors = {k: _colorize(cmap(v / mx, bytes=True)) for k, v in o.items()}
kwargs["function_attributes"] = {
k: {"color": v, "label": str(o[k])} for k, v in colors.items()
}
kwargs["data_attributes"] = {k: {"color": v} for k, v in colors.items()}
elif color:
raise NotImplementedError("Unknown value color=%s" % color)
return dot_graph(dsk, filename=filename, **kwargs)
def persist(*args, **kwargs):
"""Persist multiple Dask collections into memory
This turns lazy Dask collections into Dask collections with the same
metadata, but now with their results fully computed or actively computing
in the background.
For example a lazy dask.array built up from many lazy calls will now be a
dask.array of the same shape, dtype, chunks, etc., but now with all of
those previously lazy tasks either computed in memory as many small :class:`numpy.array`
(in the single-machine case) or asynchronously running in the
background on a cluster (in the distributed case).
This function operates differently if a ``dask.distributed.Client`` exists
and is connected to a distributed scheduler. In this case this function
will return as soon as the task graph has been submitted to the cluster,
but before the computations have completed. Computations will continue
asynchronously in the background. When using this function with the single
machine scheduler it blocks until the computations have finished.
When using Dask on a single machine you should ensure that the dataset fits
entirely within memory.
Examples
--------
>>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP
>>> df = df[df.name == 'Alice'] # doctest: +SKIP
>>> df['in-debt'] = df.balance < 0 # doctest: +SKIP
>>> df = df.persist() # triggers computation # doctest: +SKIP
>>> df.value().min() # future computations are now fast # doctest: +SKIP
-10
>>> df.value().max() # doctest: +SKIP
100
>>> from dask import persist # use persist function on multiple collections
>>> a, b = persist(a, b) # doctest: +SKIP
Parameters
----------
*args: Dask collections
scheduler : string, optional
Which scheduler to use like "threads", "synchronous" or "processes".
If not provided, the default is to check the global settings first,
and then fall back to the collection defaults.
traverse : bool, optional
By default dask traverses builtin python collections looking for dask
objects passed to ``persist``. For large collections this can be
expensive. If none of the arguments contain any dask objects, set
``traverse=False`` to avoid doing this traversal.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
**kwargs
Extra keywords to forward to the scheduler function.
Returns
-------
New dask collections backed by in-memory data
"""
traverse = kwargs.pop("traverse", True)
optimize_graph = kwargs.pop("optimize_graph", True)
collections, repack = unpack_collections(*args, traverse=traverse)
if not collections:
return args
schedule = get_scheduler(
scheduler=kwargs.pop("scheduler", None), collections=collections
)
if inspect.ismethod(schedule):
try:
from distributed.client import default_client
except ImportError:
pass
else:
try:
client = default_client()
except ValueError:
pass
else:
if client.get == schedule:
results = client.persist(
collections, optimize_graph=optimize_graph, **kwargs
)
return repack(results)
dsk = collections_to_dsk(collections, optimize_graph, **kwargs)
keys, postpersists = [], []
for a in collections:
a_keys = list(flatten(a.__dask_keys__()))
rebuild, state = a.__dask_postpersist__()
keys.extend(a_keys)
postpersists.append((rebuild, a_keys, state))
results = schedule(dsk, keys, **kwargs)
d = dict(zip(keys, results))
results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]
return repack(results2)
############
# Tokenize #
############
def tokenize(*args, **kwargs):
"""Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
normalize_token = Dispatch()
normalize_token.register(
(int, float, str, bytes, type(None), type, slice, complex, type(Ellipsis)), identity
)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
def func(seq):
try:
return list(map(normalize_token, seq))
except RecursionError:
return str(uuid.uuid4())
return type(seq).__name__, func(seq)
@normalize_token.register(literal)
def normalize_literal(lit):
return "literal", normalize_token(lit())
@normalize_token.register(range)
def normalize_range(r):
return list(map(normalize_token, [r.start, r.stop, r.step]))
@normalize_token.register(object)
def normalize_object(o):
method = getattr(o, "__dask_tokenize__", None)
if method is not None:
return method()
return normalize_function(o) if callable(o) else uuid.uuid4().hex
function_cache = {}
function_cache_lock = threading.Lock()
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
with function_cache_lock:
if len(function_cache) >= 500:
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, Compose):
first = getattr(func, "first", None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, (partial, curry)):
args = tuple(normalize_token(i) for i in func.args)
if func.keywords:
kws = tuple(
(k, normalize_token(v)) for k, v in sorted(func.keywords.items())
)
else:
kws = None
return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b"__main__" not in result: # abort on dynamic functions
return result
except Exception:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except Exception:
return str(func)
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
# Intentionally not importing PANDAS_GT_0240 from dask.dataframe._compat
# to avoid ImportErrors from extra dependencies
PANDAS_GT_0240 = LooseVersion(pd.__version__) >= LooseVersion("0.24.0")
@normalize_token.register(pd.Index)
def normalize_index(ind):
if PANDAS_GT_0240:
values = ind.array
else:
values = ind.values
return [ind.name, normalize_token(values)]
@normalize_token.register(pd.MultiIndex)
def normalize_index(ind):
codes = ind.codes if PANDAS_GT_0240 else ind.levels
return (
[ind.name]
+ [normalize_token(x) for x in ind.levels]
+ [normalize_token(x) for x in codes]
)
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes), normalize_token(cat.dtype)]
if PANDAS_GT_0240:
@normalize_token.register(pd.arrays.PeriodArray)
@normalize_token.register(pd.arrays.DatetimeArray)
@normalize_token.register(pd.arrays.TimedeltaArray)
def normalize_period_array(arr):
return [normalize_token(arr.asi8), normalize_token(arr.dtype)]
@normalize_token.register(pd.arrays.IntervalArray)
def normalize_interval_array(arr):
return [
normalize_token(arr.left),
normalize_token(arr.right),
normalize_token(arr.closed),
]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [
s.name,
s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index),
]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data.extend([df.columns, df.index])
return list(map(normalize_token, data))
@normalize_token.register(pd.api.extensions.ExtensionArray)
def normalize_extension_array(arr):
import numpy as np
return normalize_token(np.asarray(arr))
# Dtypes
@normalize_token.register(pd.api.types.CategoricalDtype)
def normalize_categorical_dtype(dtype):
return [normalize_token(dtype.categories), normalize_token(dtype.ordered)]
@normalize_token.register(pd.api.extensions.ExtensionDtype)
def normalize_period_dtype(dtype):
return normalize_token(dtype.name)
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (x.item(), x.dtype)
if hasattr(x, "mode") and getattr(x, "filename", None):
if hasattr(x.base, "ctypes"):
offset = (
x.ctypes.get_as_parameter().value
- x.base.ctypes.get_as_parameter().value
)
else:
offset = 0 # root memmap's have mmap object as base
if hasattr(
x, "offset"
): # offset numpy used while opening, and not the offset to the beginning of the file
offset += getattr(x, "offset")
return (
x.filename,
os.path.getmtime(x.filename),
x.dtype,
x.shape,
x.strides,
offset,
)
if x.dtype.hasobject:
try:
try:
# string fast-path
data = hash_buffer_hex(
"-".join(x.flat).encode(
encoding="utf-8", errors="surrogatepass"
)
)
except UnicodeDecodeError:
# bytes fast-path
data = hash_buffer_hex(b"-".join(x.flat))
except (TypeError, UnicodeDecodeError):
try:
data = hash_buffer_hex(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
except Exception:
# pickling not supported, use UUID4-based fallback
data = uuid.uuid4().hex
else:
try:
data = hash_buffer_hex(x.ravel(order="K").view("i1"))
except (BufferError, AttributeError, ValueError):
data = hash_buffer_hex(x.copy().ravel(order="K").view("i1"))
return (data, x.dtype, x.shape, x.strides)
@normalize_token.register(np.matrix)
def normalize_matrix(x):
return type(x).__name__, normalize_array(x.view(type=np.ndarray))
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return "np." + name
except AttributeError:
return normalize_function(x)
@normalize_token.register_lazy("scipy")
def register_scipy():
import scipy.sparse as sp
def normalize_sparse_matrix(x, attrs):
return (
type(x).__name__,
normalize_seq((normalize_token(getattr(x, key)) for key in attrs)),
)
for cls, attrs in [
(sp.dia_matrix, ("data", "offsets", "shape")),
(sp.bsr_matrix, ("data", "indices", "indptr", "blocksize", "shape")),
(sp.coo_matrix, ("data", "row", "col", "shape")),
(sp.csr_matrix, ("data", "indices", "indptr", "shape")),
(sp.csc_matrix, ("data", "indices", "indptr", "shape")),
(sp.lil_matrix, ("data", "rows", "shape")),
]:
normalize_token.register(cls, partial(normalize_sparse_matrix, attrs=attrs))
@normalize_token.register(sp.dok_matrix)
def normalize_dok_matrix(x):
return type(x).__name__, normalize_token(sorted(x.items()))
def _colorize(t):
"""Convert (r, g, b) triple to "#RRGGBB" string
For use with ``visualize(color=...)``
Examples
--------
>>> _colorize((255, 255, 255))
'#FFFFFF'
>>> _colorize((0, 32, 128))
'#002080'
"""
t = t[:3]
i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))
h = hex(int(i))[2:].upper()
h = "0" * (6 - len(h)) + h
return "#" + h
named_schedulers = {
"sync": local.get_sync,
"synchronous": local.get_sync,
"single-threaded": local.get_sync,
"threads": threaded.get,
"threading": threaded.get,
}
try:
from dask import multiprocessing as dask_multiprocessing
except ImportError:
pass
else:
named_schedulers.update(
{
"processes": dask_multiprocessing.get,
"multiprocessing": dask_multiprocessing.get,
}
)
get_err_msg = """
The get= keyword has been removed.
Please use the scheduler= keyword instead with the name of
the desired scheduler like 'threads' or 'processes'
x.compute(scheduler='single-threaded')
x.compute(scheduler='threads')
x.compute(scheduler='processes')
or with a function that takes the graph and keys
x.compute(scheduler=my_scheduler_function)
or with a Dask client
x.compute(scheduler=client)
""".strip()
def get_scheduler(get=None, scheduler=None, collections=None, cls=None):
"""Get scheduler function
There are various ways to specify the scheduler to use:
1. Passing in scheduler= parameters
2. Passing these into global configuration
3. Using defaults of a dask collection
This function centralizes the logic to determine the right scheduler to use
from those many options
"""
if get:
raise TypeError(get_err_msg)
if scheduler is not None:
if callable(scheduler):
return scheduler
elif "Client" in type(scheduler).__name__ and hasattr(scheduler, "get"):
return scheduler.get
elif scheduler.lower() in named_schedulers:
return named_schedulers[scheduler.lower()]
elif scheduler.lower() in ("dask.distributed", "distributed"):
from distributed.worker import get_client
return get_client().get
else:
raise ValueError(
"Expected one of [distributed, %s]"
% ", ".join(sorted(named_schedulers))
)
# else: # try to connect to remote scheduler with this name
# return get_client(scheduler).get
if config.get("scheduler", None):
return get_scheduler(scheduler=config.get("scheduler", None))
if config.get("get", None):
raise ValueError(get_err_msg)
if getattr(thread_state, "key", False):
from distributed.worker import get_worker
return get_worker().client.get
if cls is not None:
return cls.__dask_scheduler__
if collections:
collections = [c for c in collections if c is not None]
if collections:
get = collections[0].__dask_scheduler__
if not all(c.__dask_scheduler__ == get for c in collections):
raise ValueError(
"Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler=` parameter explicitly in compute or "
"globally with `dask.config.set`."
)
return get
return None
def wait(x, timeout=None, return_when="ALL_COMPLETED"):
"""Wait until computation has finished
This is a compatibility alias for ``dask.distributed.wait``.
If it is applied onto Dask collections without Dask Futures or if Dask
distributed is not installed then it is a no-op
"""
try:
from distributed import wait
return wait(x, timeout=timeout, return_when=return_when)
except (ImportError, ValueError):
return x
| bsd-3-clause | -8,081,680,606,068,015,000 | 31.960801 | 98 | 0.608261 | false |
sjohannes/exaile | xl/player/__init__.py | 3 | 1566 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
"""
Allows for playback and queue control
"""
__all__ = ['adapters', 'gst', 'queue', 'PLAYER', 'QUEUE']
import os
from xl import xdg
from . import player
from . import queue
PLAYER = player.ExailePlayer('player')
QUEUE = queue.PlayQueue(
PLAYER, 'queue', location=os.path.join(xdg.get_data_dir(), 'queue.state')
)
| gpl-2.0 | 6,328,799,349,692,610,000 | 35.418605 | 81 | 0.747765 | false |
SDSG-Invenio/invenio | invenio/modules/messages/testsuite/test_messages.py | 15 | 5465 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebMessage."""
__revision__ = \
"$Id$"
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
webmessage_mailutils = lazy_import('invenio.utils.mail')
class TestQuotingMessage(InvenioTestCase):
"""Test for quoting messages."""
def test_simple_quoting_per_block(self):
"""webmessage - test quoting simple message (HTML, per block)"""
text = """Dear romeo
I received your mail
>>Would you like to come with me to the restaurant?
Of course!
>>>>When could we get together?
Reply to my question please.
see you..."""
expected_text = """Dear romeo<br/>
I received your mail<br/>
<div class="commentbox">
\tWould you like to come with me to the restaurant?<br/>
</div>
Of course!<br/>
<div class="commentbox">
\t<div class="commentbox">
\t\tWhen could we get together?<br/>
\t</div>
</div>
Reply to my question please.<br/>
see you...<br/>
"""
res = webmessage_mailutils.email_quoted_txt2html(text,
tabs_before=0,
indent_txt='>>',
linebreak_txt="\n",
indent_html=('<div class="commentbox">', "</div>"),
linebreak_html='<br/>')
self.assertEqual(res, expected_text)
def test_simple_quoting_per_line(self):
"""webmessage - test quoting simple message (HTML, per line)"""
text = """Dear romeo
I received your mail
>>Would you like to come with me to the restaurant?
>>I discovered a really nice one.
Of course!
>>>>When could we get together?
Reply to my question please.
see you..."""
expected_text = """Dear romeo <br/>
I received your mail <br/>
<blockquote><div>Would you like to come with me to the restaurant? </div></blockquote> <br/>
<blockquote><div>I discovered a really nice one. </div></blockquote> <br/>
Of course! <br/>
<blockquote><div><blockquote><div>When could we get together? </div></blockquote> </div></blockquote> <br/>
Reply to my question please. <br/>
see you... <br/>
"""
res = webmessage_mailutils.email_quoted_txt2html(text,
tabs_before=0,
indent_txt='>>',
linebreak_txt="\n",
indent_html=('<blockquote><div>', ' </div></blockquote>'),
linebreak_html=" <br/>",
indent_block=False)
self.assertEqual(res, expected_text)
def test_quoting_message(self):
"""webmessage - test quoting message (text)"""
text = """C'est un lapin, lapin de bois.
>>Quoi?
Un cadeau.
>>What?
A present.
>>Oh, un cadeau"""
expected_text = """>>C'est un lapin, lapin de bois.
>>>>Quoi?
>>Un cadeau.
>>>>What?
>>A present.
>>>>Oh, un cadeau
"""
res = webmessage_mailutils.email_quote_txt(text,
indent_txt='>>',
linebreak_input="\n",
linebreak_output="\n")
self.assertEqual(res, expected_text)
def test_indenting_rule_message(self):
"""webmessage - return email-like indenting rule"""
text = """>>Brave Sir Robin ran away...
<img src="malicious_script"/>*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
"""
expected_text = """>>Brave Sir Robin ran away...
<img src="malicious_script" />*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
"""
res = webmessage_mailutils.escape_email_quoted_text(text,
indent_txt='>>',
linebreak_txt='\n')
self.assertEqual(res, expected_text)
TEST_SUITE = make_test_suite(TestQuotingMessage)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 | -913,659,442,418,672,500 | 37.216783 | 122 | 0.563403 | false |
ccn-2m/django | django/contrib/localflavor/be/forms.py | 194 | 2910 | """
Belgium-specific Form helpers
"""
from __future__ import absolute_import
from django.contrib.localflavor.be.be_provinces import PROVINCE_CHOICES
from django.contrib.localflavor.be.be_regions import REGION_CHOICES
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
class BEPostalCodeField(RegexField):
"""
A form field that validates its input as a belgium postal code.
Belgium postal code is a 4 digits string. The first digit indicates
the province (except for the 3ddd numbers that are shared by the
eastern part of Flemish Brabant and Limburg and the and 1ddd that
are shared by the Brussels Capital Region, the western part of
Flemish Brabant and Walloon Brabant)
"""
default_error_messages = {
'invalid': _(
'Enter a valid postal code in the range and format 1XXX - 9XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(BEPostalCodeField, self).__init__(r'^[1-9]\d{3}$',
max_length, min_length, *args, **kwargs)
class BEPhoneNumberField(RegexField):
"""
A form field that validates its input as a belgium phone number.
Landlines have a seven-digit subscriber number and a one-digit area code,
while smaller cities have a six-digit subscriber number and a two-digit
area code. Cell phones have a six-digit subscriber number and a two-digit
area code preceeded by the number 4.
0d ddd dd dd, 0d/ddd.dd.dd, 0d.ddd.dd.dd,
0dddddddd - dialling a bigger city
0dd dd dd dd, 0dd/dd.dd.dd, 0dd.dd.dd.dd,
0dddddddd - dialling a smaller city
04dd ddd dd dd, 04dd/ddd.dd.dd,
04dd.ddd.dd.dd, 04ddddddddd - dialling a mobile number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats '
'0x xxx xx xx, 0xx xx xx xx, 04xx xx xx xx, '
'0x/xxx.xx.xx, 0xx/xx.xx.xx, 04xx/xx.xx.xx, '
'0x.xxx.xx.xx, 0xx.xx.xx.xx, 04xx.xx.xx.xx, '
'0xxxxxxxx or 04xxxxxxxx.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(BEPhoneNumberField, self).__init__(r'^[0]\d{1}[/. ]?\d{3}[. ]\d{2}[. ]?\d{2}$|^[0]\d{2}[/. ]?\d{2}[. ]?\d{2}[. ]?\d{2}$|^[0][4]\d{2}[/. ]?\d{2}[. ]?\d{2}[. ]?\d{2}$',
max_length, min_length, *args, **kwargs)
class BERegionSelect(Select):
"""
A Select widget that uses a list of belgium regions as its choices.
"""
def __init__(self, attrs=None):
super(BERegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class BEProvinceSelect(Select):
"""
A Select widget that uses a list of belgium provinces as its choices.
"""
def __init__(self, attrs=None):
super(BEProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
| bsd-3-clause | 5,681,554,138,726,975,000 | 39.985915 | 180 | 0.639175 | false |
alessandro-aglietti/rosdep | src/rosdep2/catkin_support.py | 7 | 4024 | """
Helper routines for catkin. These are distributed inside of rosdep2
to protect catkin against future rosdep2 API updatese. These helper
routines are assumed to run in an interactive mode with an end-user
and thus return end-user oriented error messages.
Errors are returned as arguments to raised :exc:`ValidationFailed`
exceptions.
Workflow::
installer = get_installer(APT_INSTALLER)
view = get_catkin_view(rosdistro_name, 'ubuntu', 'lucid')
resolve_for_os(rosdep_key, view, installer, 'ubuntu', 'lucid')
"""
from __future__ import print_function
import os
from subprocess import Popen, PIPE, CalledProcessError
from . import create_default_installer_context
from .lookup import RosdepLookup
from .platforms.debian import APT_INSTALLER
from .platforms.osx import BREW_INSTALLER
from .platforms.pip import PIP_INSTALLER
from .platforms.redhat import YUM_INSTALLER
from .rep3 import download_targets_data
from .rosdistrohelper import get_targets
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import get_sources_list_dir, DataSourceMatcher, SourcesListLoader
class ValidationFailed(Exception):
pass
def call(command, pipe=None):
"""
Copy of call() function from catkin-generate-debian to mimic output
"""
working_dir = '.'
#print('+ cd %s && ' % working_dir + ' '.join(command))
process = Popen(command, stdout=pipe, stderr=pipe, cwd=working_dir)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, command)
if pipe:
return output
def get_ubuntu_targets(rosdistro):
"""
Get a list of Ubuntu distro codenames for the specified ROS
distribution. This method blocks on an HTTP download.
:raises: :exc:`ValidationFailed`
"""
targets_data = get_targets()
legacy_targets = download_targets_data()
if 'fuerte' in legacy_targets:
targets_data['fuerte'] = {'ubuntu': legacy_targets['fuerte']}
if 'electric' in legacy_targets:
targets_data['electric'] = {'ubuntu': legacy_targets['electric']}
return targets_data[rosdistro]['ubuntu']
def get_installer(installer_name):
""" Expected installers APT_INSTALLER, YUM_INSTALLER, ..."""
installer_context = create_default_installer_context()
return installer_context.get_installer(installer_name)
def resolve_for_os(rosdep_key, view, installer, os_name, os_version):
"""
Resolve rosdep key to dependencies.
:param os_name: OS name, e.g. 'ubuntu'
:raises: :exc:`rosdep2.ResolutionError`
"""
d = view.lookup(rosdep_key)
ctx = create_default_installer_context()
os_installers = ctx.get_os_installer_keys(os_name)
default_os_installer = ctx.get_default_os_installer_key(os_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, os_installers, default_os_installer)
assert inst_key in os_installers
return installer.resolve(rule)
def update_rosdep():
call(('rosdep', 'update'), pipe=PIPE)
def get_catkin_view(rosdistro_name, os_name, os_version, update=True):
"""
:raises: :exc:`ValidationFailed`
"""
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
raise ValidationFailed("""rosdep database is not initialized, please run:
\tsudo rosdep init
""")
if update:
update_rosdep()
sources_matcher = DataSourceMatcher([rosdistro_name, os_name, os_version])
sources_loader = SourcesListLoader.create_default(matcher=sources_matcher)
if not (sources_loader.sources):
raise ValidationFailed("""rosdep database does not have any sources.
Please make sure you have a valid configuration in:
\t%s
"""%(sources_list_dir))
# for vestigial reasons, using the roskg loader, but we're only
# actually using the backend db as resolution is not resource-name based
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader)
return lookup.get_rosdep_view(DEFAULT_VIEW_KEY)
| bsd-3-clause | -526,288,577,649,642,560 | 33.101695 | 102 | 0.713221 | false |
mammique/django | django/template/loaders/filesystem.py | 225 | 1851 | """
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with open(filepath, 'rb') as fp:
return (fp.read().decode(settings.FILE_CHARSET), filepath)
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
| bsd-3-clause | -1,620,274,751,021,421,300 | 39.23913 | 117 | 0.621286 | false |
AndroidOpenDevelopment/android_external_chromium_org | tools/idl_parser/idl_lexer_test.py | 116 | 2758 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from idl_lexer import IDLLexer
from idl_ppapi_lexer import IDLPPAPILexer
#
# FileToTokens
#
# From a source file generate a list of tokens.
#
def FileToTokens(lexer, filename):
with open(filename, 'rb') as srcfile:
lexer.Tokenize(srcfile.read(), filename)
return lexer.GetTokens()
#
# TextToTokens
#
# From a source file generate a list of tokens.
#
def TextToTokens(lexer, text):
lexer.Tokenize(text)
return lexer.GetTokens()
class WebIDLLexer(unittest.TestCase):
def setUp(self):
self.lexer = IDLLexer()
self.filenames = [
'test_lexer/values.in',
'test_lexer/keywords.in'
]
#
# testRebuildText
#
# From a set of tokens, generate a new source text by joining with a
# single space. The new source is then tokenized and compared against the
# old set.
#
def testRebuildText(self):
for filename in self.filenames:
tokens1 = FileToTokens(self.lexer, filename)
to_text = '\n'.join(['%s' % t.value for t in tokens1])
tokens2 = TextToTokens(self.lexer, to_text)
count1 = len(tokens1)
count2 = len(tokens2)
self.assertEqual(count1, count2)
for i in range(count1):
msg = 'Value %s does not match original %s on line %d of %s.' % (
tokens2[i].value, tokens1[i].value, tokens1[i].lineno, filename)
self.assertEqual(tokens1[i].value, tokens2[i].value, msg)
#
# testExpectedType
#
# From a set of tokens pairs, verify the type field of the second matches
# the value of the first, so that:
# integer 123 float 1.1 ...
# will generate a passing test, when the first token has both the type and
# value of the keyword integer and the second has the type of integer and
# value of 123 and so on.
#
def testExpectedType(self):
for filename in self.filenames:
tokens = FileToTokens(self.lexer, filename)
count = len(tokens)
self.assertTrue(count > 0)
self.assertFalse(count & 1)
index = 0
while index < count:
expect_type = tokens[index].value
actual_type = tokens[index + 1].type
msg = 'Type %s does not match expected %s on line %d of %s.' % (
actual_type, expect_type, tokens[index].lineno, filename)
index += 2
self.assertEqual(expect_type, actual_type, msg)
class PepperIDLLexer(WebIDLLexer):
def setUp(self):
self.lexer = IDLPPAPILexer()
self.filenames = [
'test_lexer/values_ppapi.in',
'test_lexer/keywords_ppapi.in'
]
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -7,567,070,696,298,513,000 | 26.858586 | 78 | 0.656998 | false |
rversteegen/commandergenius | project/jni/python/src/Lib/lib2to3/pgen2/tokenize.py | 52 | 16184 | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| lgpl-2.1 | 5,145,212,820,851,544,000 | 38.960494 | 79 | 0.50655 | false |
gioman/QGIS | python/plugins/processing/algs/examplescripts/ProcessingExampleScriptsPlugin.py | 9 | 1441 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'May 2016'
__copyright__ = '(C) 2016, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.core.Processing import Processing
class ProcessingExampleScriptsPlugin(object):
def initGui(self):
Processing.addScripts(os.path.join(os.path.dirname(__file__), "scripts"))
def unload(self):
Processing.removeScripts(os.path.join(os.path.dirname(__file__), "scripts"))
| gpl-2.0 | 3,228,148,530,997,548,000 | 35.025 | 84 | 0.447606 | false |
salomanders/NbodyPythonTools | nbdpt/nchiladareader.py | 1 | 4326 | import numpy as np
import glob
import struct
import pdb
import re
class Nchilada(object):
def __init__(self, filename):
self.codedict = {1: 'int8',
2: 'uint8',
3: 'int16',
4: 'uint16',
5: 'int32',
6: 'uint32',
7: 'int64',
8: 'uint64',
9: 'float32',
10: 'float64'}
self.fmt_codedict = {1: 'h',
2: 'H',
3: 'i',
4: 'I',
5: 'l',
6: 'L',
7: 'q',
8: 'Q',
9: 'f',
10: 'd'}
self.codedictlen = {1: 1
}
self.filename = filename
def read_param(self):
try:
paramfilename = [f for f in glob.glob('*.param') if re.match('^(cosmo|h)', f)][0]
except IndexError:
try:
paramfilename = [f for f in glob.glob('../*.param') if re.match('^(cosmo|h)', f)][0]
print 'There is no param file in this directory, trying one up'
except IndexError:
print "Can't find param file"
return
f = open(paramfilename, 'rb')
paramfile = {}
for line in f:
try:
if line[0] != '#':
s = line.split('#')[0].split()
paramfile[s[0]] = "".join(s[2:])
except IndexError, ValueError:
pass
self.paramfile = paramfile
dKpcUnit = np.float(paramfile['dKpcUnit'])
dMsolUnit = np.float(paramfile['dMsolUnit'])
self.timeunit=np.sqrt((dKpcUnit*3.086e21)**3/
(6.67e-8*dMsolUnit*1.99e33)
)/(3600.*24.*365.24*1e9)
try: hub = np.float(paramfile['dHubble0'])
except KeyError: hub=0.
dunit = np.float(paramfile['dKpcUnit'])
munit = np.float(paramfile['dMsolUnit'])
denunit = munit/dunit**3.
self.velunit = 8.0285*np.sqrt(6.6743e-8*denunit)*dunit
hubunit = 10.*self.velunit/dunit
self.h = hub*hubunit
f.close()
def unpack_header(self, family, file):
f = open(self.filename+'/'+family+'/'+file)
(magic, time, iHighWord, nbodies, ndim, code) = struct.unpack('>idiiii', f.read(28))
if (ndim < 1) or (ndim > 3):
f.seek(0)
(magic, time, iHighWord, nbodies, ndim, code) = struct.unpack('<idiiii', f.read(28))
self.byte_swap = True
f.close()
return(time, nbodies, ndim, code)
def unpack_file(self, family, file):
time, nbodies, ndim, code = self.unpack_header(family, file)
self.time = time
self.nbodies = nbodies
self.ndim = ndim
f = open(self.filename+'/'+family+'/'+file)
f.seek(28)
minvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
maxvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
dtype = np.dtype({'names':('array',),'formats':('>'+self.fmt_codedict[code],)})
ar = np.core.records.fromfile(f, dtype=dtype, shape=self.nbodies)
return ar['array']
def minvalue(self, family, file):
time, nbodies, ndim, code = self.unpack_header(family, file)
self.time = time
self.nbodies = nbodies
self.ndim = ndim
f = open(self.filename+'/'+family+'/'+file)
f.seek(28)
minvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
maxvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
return minvalue
def maxvalue(self, family, file):
time, nbodies, ndim, code = self.unpack_header(family, file)
self.time = time
self.nbodies = nbodies
self.ndim = ndim
f = open(self.filename+'/'+family+'/'+file)
f.seek(28)
minvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
maxvalue = struct.unpack('>'+self.fmt_codedict[code],f.read(4))[0]
return maxvalue
| mit | 9,126,079,551,243,223,000 | 36.617391 | 100 | 0.473648 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.