code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import mimetypes
import posixpath
import urllib
import os
from operator import concat
from django.http import HttpResponse
from django.views.static import serve as django_serve
from django_bundles.core import get_bundles
from django_bundles.processors import processor_pipeline
from django_bundles.utils.files import FileChunkGenerator
from django.conf import settings
file_cache = {}
def get_file(path):
global file_cache
if not file_cache:
for bundle in get_bundles():
for bundle_file in bundle.files:
file_cache[os.path.realpath(bundle_file.file_path)] = {
'bundle_file': bundle_file,
'cache': None
}
if path in file_cache:
if not file_cache[path]['cache']:
mimetype, encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
# TODO: less files need to change the way they are rendered in the template
print "Generating", path
file_cache[path]['cache'] = {
'contents': reduce(concat, (chunk for chunk in processor_pipeline(file_cache[path]['bundle_file'].processors, FileChunkGenerator(open(file_cache[path]['bundle_file'].file_path, 'rb'))))),
'mimetype': mimetype,
}
return file_cache[path]['cache']
return None
def serve(request, path, document_root=None, show_indexes=False):
if not settings.USE_BUNDLES:
path = posixpath.normpath(urllib.unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
fullpath = os.path.join(document_root, newpath)
cached = get_file(fullpath)
if cached:
return HttpResponse(cached['contents'], content_type=cached['mimetype'])
return django_serve(request, path, document_root=document_root, show_indexes=show_indexes)
| sdcooke/django_bundles | django_bundles/views.py | Python | mit | 2,308 |
#!/usr/bin/env python
#
# entropy_plot.py
#
import matplotlib.pyplot as plt
import numpy as np
# import csv
fig = plt.figure()
# Reading data
redent = (np.loadtxt('RED-ENTROPY.dat')).T
cpent = (np.loadtxt( 'CP-ENTROPY.dat')).T
redprod = (np.loadtxt('RED-ENTROPY-PROD.dat')).T
cpprod = (np.loadtxt('CP-ENTROPY-PROD.dat')).T
#
# Subplot n.1 : Entropy evolution
#
plt.subplot(211)
plt.title('Entropy time-evolution', fontsize=40)
# Setup labels
plt.xlabel('$t \Delta$', fontsize=40)
plt.ylabel('$S(t)$', rotation='horizontal', fontsize=40)
#
# Text box
#
plt.text(100, 0.1, '$\Omega/\Delta=2$\n$\kappa_B T/\hbar \Delta=0.1$\n$\\alpha=0.005$\n$\\rho_0=\\vert z;-\\rangle$', bbox={'facecolor':'white'})
#plt.text(250, 0.5, '$\Omega/\Delta=2$\n$\kappa_B T/\hbar \Delta=0.1$\n$\\alpha=0.005$\n$\\rho_0=\{1, 0, 0.5, -0.4\}$', bbox={'facecolor':'white'})
# Plotting
rfig = plt.plot(redent[0], redent[1], color='red',
label='Redfield dynamics entropy')
cfig = plt.plot(cpent[0], cpent[1], color='blue',
label='Completely positive dynamics entropy')
# Maximum entropy
maxent = np.log(2.0)*np.ones_like(redent[0])
plt.plot(redent[0], maxent)
plt.grid(True)
plt.legend(('Redfield dynamics entropy',
'Completely positive dynamics entropy'), loc='upper left',
bbox_to_anchor=(0.2, 0.95))
#ax = plt.twinx()
#
# Subplot n.2 : Entropy production
#
plt.subplot(212)
plt.title('Internal entropy production')
plt.xlabel('$t \Delta$')
plt.ylabel('$\sigma(t)$', rotation='horizontal')
rpfig = plt.plot(redprod[0], redprod[1], 'y-', label='Redfield entropy prod.')
cpfig = plt.plot(cpprod[0], cpprod[1], 'c-', label='Completely posit. entropy prod.')
plt.grid(True)
plt.legend(('Redfield entropy prod.', 'Completely posit. entropy prod.'),
loc='upper left', bbox_to_anchor=(0.2, 0.8))
plt.show()
| j-silver/quantum_dots | entropy_plot.py | Python | bsd-2-clause | 1,809 |
# -*- coding: utf-8 -*-
"""
@brief test log(time=92s)
"""
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
import ensae_teaching_cs
class TestNotebookRunner1a_soft_sql(unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails", "jyquickhelper", "mlstatpy"],
__file__, hide=True)
def test_notebook_runner_soft_sql(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_notebook1a_soft_sql")
from ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_1a
keepnote = ls_notebooks("td1a_soft")
for n in keepnote:
fLOG(n)
execute_notebooks(temp, keepnote,
lambda i, n: "csharp" not in n and "cython" not in n,
fLOG=fLOG,
clean_function=clean_function_1a,
dump=ensae_teaching_cs)
if __name__ == "__main__":
unittest.main()
| sdpython/ensae_teaching_cs | _unittests/ut_dnotebooks/test_1A_notebook_soft_sql.py | Python | mit | 1,241 |
#PBS -N UnBalance
#PBS -m ae
#PBS -q long
#PBS -l nodes=1:opteron:ppn=2
"""Test handling of extreme load-unbalancing."""
from asap3 import *
from asap3.md import MDLogger
from ase.lattice.cubic import FaceCenteredCubic
import numpy as np
from asap3.mpi import world
#DebugOutput("UnBalance.%d.out")
#set_verbose(1)
print_version(1)
fast = False
#AsapThreads()
cpulayout = (1,1,2)
element = 'Pt'
size = (20,20,100)
master = world.rank == 0
if master:
atoms = FaceCenteredCubic(symbol=element, size=size, pbc=(True, True, False))
atoms.center(vacuum=10.0, axis=2)
atoms.set_momenta(np.zeros((len(atoms),3)))
# Select an atom to get a kick
r = atoms.get_positions()
uc = atoms.get_cell()
x = r[:,0] - 0.5 * uc[0,0]
y = r[:,1] - 0.5 * uc[1,1]
z = r[:,2]
zprime = z - 0.01 * (x * x + y * y)
n = np.argmax(zprime)
#a = atoms[n]
#dp = np.sqrt(2 * a.mass * 1000.0)
#a.momentum = np.array([0, 0, dp])
t = np.zeros(len(atoms), int)
t[n] = 1
atoms.set_tags(t)
else:
atoms = None
atoms = MakeParallelAtoms(atoms, cpulayout)
print len(atoms), atoms.get_number_of_atoms()
atoms.set_calculator(EMT())
traj = PickleTrajectory("UnBalance.traj", "w", atoms)
if fast:
atoms.get_forces()
traj.write()
for i in range(50):
print "\n\n\n\n*** STEP %i ***\n\n\n\n\n" % (i,)
r = atoms.get_positions()
r += atoms.get_tags().reshape((-1,1)) * np.array([[0, 0, 20.0],])
atoms.set_positions(r)
atoms.get_forces()
traj.write()
else:
dyn = VelocityVerlet(atoms, 5*units.fs)
logger = MDLogger(dyn, atoms, 'UnBalance.log', stress=True, peratom=True)
dyn.attach(logger, interval=10)
dyn.attach(traj, interval=100)
dyn.run(10000)
| auag92/n2dm | Asap-3.8.4/Debug/UnBalance.py | Python | mit | 1,763 |
# Django settings for testproject
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
#BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ae0g37((kp655_*4wbtj999azzo)xw6)9*mv3n@&e6k&7&*#z3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get("DJANGO_DEBUG", False))
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testproject.admin',
'aws_manager',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'testproject.urls'
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(os.path.dirname(__file__)), 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| jromich/django-aws-manager | testproject/settings.py | Python | mit | 1,753 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.utils import cstr, cint
from frappe.model.document import Document
from erpnext.healthcare.page.patient_history.patient_history import get_patient_history_doctypes
class PatientHistorySettings(Document):
def validate(self):
self.validate_submittable_doctypes()
self.validate_date_fieldnames()
def validate_submittable_doctypes(self):
for entry in self.custom_doctypes:
if not cint(frappe.db.get_value('DocType', entry.document_type, 'is_submittable')):
msg = _('Row #{0}: Document Type {1} is not submittable. ').format(
entry.idx, frappe.bold(entry.document_type))
msg += _('Patient Medical Record can only be created for submittable document types.')
frappe.throw(msg)
def validate_date_fieldnames(self):
for entry in self.custom_doctypes:
field = frappe.get_meta(entry.document_type).get_field(entry.date_fieldname)
if not field:
frappe.throw(_('Row #{0}: No such Field named {1} found in the Document Type {2}.').format(
entry.idx, frappe.bold(entry.date_fieldname), frappe.bold(entry.document_type)))
if field.fieldtype not in ['Date', 'Datetime']:
frappe.throw(_('Row #{0}: Field {1} in Document Type {2} is not a Date / Datetime field.').format(
entry.idx, frappe.bold(entry.date_fieldname), frappe.bold(entry.document_type)))
def get_doctype_fields(self, document_type, fields):
multicheck_fields = []
doc_fields = frappe.get_meta(document_type).fields
for field in doc_fields:
if field.fieldtype not in frappe.model.no_value_fields or \
field.fieldtype in frappe.model.table_fields and not field.hidden:
multicheck_fields.append({
'label': field.label,
'value': field.fieldname,
'checked': 1 if field.fieldname in fields else 0
})
return multicheck_fields
def get_date_field_for_dt(self, document_type):
meta = frappe.get_meta(document_type)
date_fields = meta.get('fields', {
'fieldtype': ['in', ['Date', 'Datetime']]
})
if date_fields:
return date_fields[0].get('fieldname')
def create_medical_record(doc, method=None):
medical_record_required = validate_medical_record_required(doc)
if not medical_record_required:
return
if frappe.db.exists('Patient Medical Record', { 'reference_name': doc.name }):
return
subject = set_subject_field(doc)
date_field = get_date_field(doc.doctype)
medical_record = frappe.new_doc('Patient Medical Record')
medical_record.patient = doc.patient
medical_record.subject = subject
medical_record.status = 'Open'
medical_record.communication_date = doc.get(date_field)
medical_record.reference_doctype = doc.doctype
medical_record.reference_name = doc.name
medical_record.reference_owner = doc.owner
medical_record.save(ignore_permissions=True)
def update_medical_record(doc, method=None):
medical_record_required = validate_medical_record_required(doc)
if not medical_record_required:
return
medical_record_id = frappe.db.exists('Patient Medical Record', { 'reference_name': doc.name })
if medical_record_id:
subject = set_subject_field(doc)
frappe.db.set_value('Patient Medical Record', medical_record_id[0][0], 'subject', subject)
else:
create_medical_record(doc)
def delete_medical_record(doc, method=None):
medical_record_required = validate_medical_record_required(doc)
if not medical_record_required:
return
record = frappe.db.exists('Patient Medical Record', { 'reference_name': doc.name })
if record:
frappe.delete_doc('Patient Medical Record', record, force=1)
def set_subject_field(doc):
from frappe.utils.formatters import format_value
meta = frappe.get_meta(doc.doctype)
subject = ''
patient_history_fields = get_patient_history_fields(doc)
for entry in patient_history_fields:
fieldname = entry.get('fieldname')
if entry.get('fieldtype') == 'Table' and doc.get(fieldname):
formatted_value = get_formatted_value_for_table_field(doc.get(fieldname), meta.get_field(fieldname))
subject += frappe.bold(_(entry.get('label')) + ': ') + '<br>' + cstr(formatted_value) + '<br>'
else:
if doc.get(fieldname):
formatted_value = format_value(doc.get(fieldname), meta.get_field(fieldname), doc)
subject += frappe.bold(_(entry.get('label')) + ': ') + cstr(formatted_value) + '<br>'
return subject
def get_date_field(doctype):
dt = get_patient_history_config_dt(doctype)
return frappe.db.get_value(dt, { 'document_type': doctype }, 'date_fieldname')
def get_patient_history_fields(doc):
dt = get_patient_history_config_dt(doc.doctype)
patient_history_fields = frappe.db.get_value(dt, { 'document_type': doc.doctype }, 'selected_fields')
if patient_history_fields:
return json.loads(patient_history_fields)
def get_formatted_value_for_table_field(items, df):
child_meta = frappe.get_meta(df.options)
table_head = ''
table_row = ''
html = ''
create_head = True
for item in items:
table_row += '<tr>'
for cdf in child_meta.fields:
if cdf.in_list_view:
if create_head:
table_head += '<td>' + cdf.label + '</td>'
if item.get(cdf.fieldname):
table_row += '<td>' + str(item.get(cdf.fieldname)) + '</td>'
else:
table_row += '<td></td>'
create_head = False
table_row += '</tr>'
html += "<table class='table table-condensed table-bordered'>" + table_head + table_row + "</table>"
return html
def get_patient_history_config_dt(doctype):
if frappe.db.get_value('DocType', doctype, 'custom'):
return 'Patient History Custom Document Type'
else:
return 'Patient History Standard Document Type'
def validate_medical_record_required(doc):
if frappe.flags.in_patch or frappe.flags.in_install or frappe.flags.in_setup_wizard \
or get_module(doc) != 'Healthcare':
return False
if doc.doctype not in get_patient_history_doctypes():
return False
return True
def get_module(doc):
module = doc.meta.module
if not module:
module = frappe.db.get_value('DocType', doc.doctype, 'module')
return module | saurabh6790/erpnext | erpnext/healthcare/doctype/patient_history_settings/patient_history_settings.py | Python | gpl-3.0 | 6,153 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Merges 3 CSV files into 1.
The two first columns identify the records.
First file: numerators
Columns: user, template, nc, na
Second file: denominators
Columns: user, template, denominator
Third file: strengths
Columns: user, template, strength
**Example of usage**
``python3 -m barbante.scripts.merge_user_user_collections num.csv denom.csv strengths.csv output_file.csv``
**Output**
It saves a CSV file with the following columns: user, template, nc, na, denominator, strength.
"""
import json
import sys
import traceback
from time import time
import barbante.utils.logging as barbante_logging
log = barbante_logging.get_logger(__name__)
def merge_collections(numerators_file, denominators_file, strengths_file, output_file):
log.info("----------")
log.info("Start.")
start = time()
f_numerators = open(numerators_file, 'rU')
f_denominators = open(denominators_file, 'rU')
f_strengths = open(strengths_file, 'rU')
# skips the headers
next(f_numerators)
next(f_denominators)
next(f_strengths)
f_output = open(output_file, 'w')
f_output.write("user,template_user,nc,na,denominator,strength\n")
numerator_key, nc, na = yield_numerator(f_numerators)
denominator_key, denominator = yield_denominator(f_denominators)
strength_key, strength = yield_strength(f_strengths)
done = 0
while True:
keys = []
if numerator_key is not None:
keys += [numerator_key]
if denominator_key is not None:
keys += [denominator_key]
if strength_key is not None:
keys += [strength_key]
if len(keys) == 0:
break # exhausted all files
min_key = min(keys)
merged_doc = {"user": min_key[0],
"template_user": min_key[1]}
if numerator_key == min_key:
merged_doc["nc"] = nc
merged_doc["na"] = na
numerator_key, nc, na = yield_numerator(f_numerators)
else:
merged_doc["nc"] = ""
merged_doc["na"] = ""
if denominator_key == min_key:
merged_doc["denominator"] = denominator
denominator_key, denominator = yield_denominator(f_denominators)
else:
merged_doc["denominator"] = ""
if strength_key == min_key:
merged_doc["strength"] = strength
strength_key, strength = yield_strength(f_strengths)
else:
merged_doc["strength"] = ""
write_to_file(merged_doc, f_output)
done += 1
if done % 100000 == 0:
log.info("Done writing %d lines." % done)
f_numerators.close()
f_denominators.close()
f_strengths.close()
f_output.close()
log.info("End. Took %d seconds." % (time() - start))
def yield_numerator(numerators_handler):
try:
numerator_line = next(numerators_handler).split(",")
numerator_key = (numerator_line[0], numerator_line[1])
nc = int(numerator_line[2])
na = int(numerator_line[3])
except:
numerator_key, nc, na = None, None, None
return numerator_key, nc, na
def yield_denominator(denominators_handler):
try:
denominator_line = next(denominators_handler).split(",")
denominator_key = (denominator_line[0], denominator_line[1])
denominator = int(denominator_line[2])
except:
denominator_key, denominator = None, None
return denominator_key, denominator
def yield_strength(strengths_handler):
try:
strength_line = next(strengths_handler).split(",")
strength_key = (strength_line[0], strength_line[1])
strength = float(strength_line[2])
except:
strength_key, strength = None, None
return strength_key, strength
def write_to_file(document, output_handler):
line = ','.join([str(document["user"]),
str(document["template_user"]),
str(document["nc"]),
str(document["na"]),
str(document["denominator"]),
str(document["strength"])]) + '\n'
output_handler.write(line)
def main(argv):
if len(argv) < 4:
msg = "You must specify the numerators file, the denominators file, " \
"the strengths file and the output file."
log.error(msg)
return json.dumps({"success": False, "message": msg})
try:
# command-line arguments
numerators_file = argv[0]
denominators_file = argv[1]
strengths_file = argv[2]
output_file = argv[3]
merge_collections(numerators_file, denominators_file, strengths_file, output_file)
except Exception:
log.exception('Exception on {0}'.format(__name__))
return json.dumps({"success": False,
"message": traceback.format_exc()})
return_json = json.dumps({"success": True})
return return_json
if __name__ == '__main__':
print(main(sys.argv[1:]))
| hypermindr/barbante | barbante/scripts/merge_user_user_collections.py | Python | mit | 5,107 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import time
from openerp.report import report_sxw
from openerp import pooler
class doctor_disability(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(doctor_disability, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'select_type': self.select_type,
'select_age': self.select_age,
'select_diseases': self.select_diseases,
'select_diseases_type': self.select_diseases_type,
'return_street_home': self.return_street_home,
'return_number_phone': self.return_number_phone,
'return_sex': self.return_sex
})
def return_street_home(self, country, state, city):
street = ""
if country:
street += country.title() + " - "
if state:
street += state.title() + " - "
if city:
street += city.title() + " - "
return street[:len(street) -2]
def return_number_phone(self, phone, mobile):
return_phone = ""
if phone:
return_phone += phone + " - "
if mobile:
return_phone += mobile + " - "
return return_phone[:len(return_phone)-2]
def return_sex(self, sex):
if sex == 'm':
return "Masculino"
return "Femenino"
def select_type(self, tipo_usuario):
if tipo_usuario:
tipo = self.pool.get('doctor.tipousuario.regimen').browse(self.cr, self.uid, tipo_usuario).name
else:
tipo= None
return tipo
def select_age(self, age):
context = {}
context.update({'lang' : self.pool.get('res.users').browse(self.cr, self.uid, self.uid, context=context).lang})
attentions = self.pool.get('doctor.attentions')
age_unit = dict(attentions.fields_get(self.cr, self.uid, 'age_unit',context=context).get('age_unit').get('selection')).get(
str(age))
return age_unit
def select_diseases(self, status):
if status== 'presumptive':
return "Impresión Diagnóstica"
if status== 'confirm':
return "Confirmado"
if status== 'recurrent':
return "Recurrente"
return ""
def select_diseases_type(self, diseases_type):
if diseases_type== 'main':
return "Principal"
if diseases_type== 'related':
return "Relacionado"
return ""
report_sxw.report_sxw('report.doctor_disability_half', 'doctor.attentions',
'addons/l10n_co_doctor/report/doctor_disability_half.rml',
parser=doctor_disability, header=False)
| hivam/l10n_co_doctor | report/doctor_disability_half.py | Python | agpl-3.0 | 3,267 |
# -*- coding: utf-8 -*-
u"""
.. module:: offers
"""
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.models import ADDITION, CHANGE
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.text import slugify
from django.views.generic import View
from apps.volontulo.forms import (
CreateOfferForm, OfferApplyForm, OfferImageForm
)
from apps.volontulo.lib.email import send_mail
from apps.volontulo.models import Offer, OfferImage, UserProfile
from apps.volontulo.utils import correct_slug, save_history
from apps.volontulo.views import logged_as_admin
class OffersList(View):
u"""View that handle list of offers."""
@staticmethod
def get(request):
u"""It's used for volunteers to show active ones and for admins to show
all of them.
:param request: WSGIRequest instance
"""
if logged_as_admin(request):
offers = Offer.objects.all()
else:
offers = Offer.objects.get_active()
return render(request, "offers/offers_list.html", context={
'offers': offers,
})
@staticmethod
def post(request):
u"""Method responsible for rendering form for new offer.
:param request: WSGIRequest instance
"""
if (
request.POST.get('edit_type') == 'status_change' and
request.POST.get('offer_id')
):
offer = get_object_or_404(Offer, id=request.POST.get('offer_id'))
offer.publish()
messages.success(request,
u"Aktywowałeś ofertę '%s'" % offer.title)
return redirect('offers_list')
class OffersCreate(View):
u"""Class view supporting creation of new offer."""
@staticmethod
def get(request):
u"""Method responsible for rendering form for new offer.
:param request: WSGIRequest instance
"""
if request.user.userprofile.is_administrator:
messages.info(
request,
u"Administrator nie może tworzyć nowych ofert."
)
return redirect('offers_list')
organizations = request.user.userprofile.organizations.all()
if not organizations.exists():
messages.info(
request,
u"Nie masz jeszcze żadnej założonej organizacji"
u" na volontuloapp.org. Aby założyć organizację,"
u" <a href='{}'>kliknij tu.</a>".format(
reverse('organizations_create')
)
)
return redirect('offers_list')
return render(
request,
'offers/offer_form.html',
{
'offer': Offer(),
'form': CreateOfferForm(),
'organizations': organizations,
}
)
@staticmethod
def post(request):
u"""Method responsible for saving new offer.
:param request: WSGIRequest instance
"""
form = CreateOfferForm(request.POST)
if form.is_valid():
offer = form.save()
offer.create_new()
offer.save()
save_history(request, offer, action=ADDITION)
send_mail(
request,
'offer_creation',
['[email protected]'],
{'offer': offer}
)
messages.success(request, u"Dziękujemy za dodanie oferty.")
return redirect(
'offers_view',
slug=slugify(offer.title),
id_=offer.id,
)
messages.error(
request,
u"Formularz zawiera niepoprawnie wypełnione pola <br />{0}".format(
'<br />'.join(form.errors)),
)
return render(
request,
'offers/offer_form.html',
{
'form': form,
'offer': Offer(),
'organizations': request.user.userprofile.organizations.all(),
}
)
class OffersReorder(View):
u"""Class view supporting change of a offer."""
@staticmethod
def get(request, id_):
u"""Display offer list with weights GET request.
:param request: WSGIRequest instance
:param id_:
:return:
"""
offers = Offer.objects.get_weightened()
return render(request, 'offers/reorder.html', {
'offers': offers, 'id': id_})
@staticmethod
def post(request, id_):
u"""Display offer list with weights GET request.
:param request:
:param id_: Integer newly created offer id
:return:
"""
if request.POST.get('submit') == 'reorder':
items = [item
for item
in request.POST.items()
if item[0].startswith('weight_')]
weights = {id_.split('_')[1]: weight
for id_, weight in items}
for id_, weight in weights.items():
Offer.objects.filter(id=id_).update(weight=weight)
messages.success(
request,
u"Uporządkowano oferty."
)
return redirect('offers_list')
class OffersEdit(View):
u"""Class view supporting change of a offer."""
def dispatch(self, request, *args, **kwargs):
u"""Dispatch method overriden to check offer edit permission"""
try:
is_edit_allowed = request.user.userprofile.can_edit_offer(
offer_id=kwargs['id_'])
except Offer.DoesNotExist:
is_edit_allowed = False
if not is_edit_allowed:
raise Http404()
return super().dispatch(request, *args, **kwargs)
@staticmethod
@correct_slug(Offer, 'offers_edit', 'title')
def get(request, slug, id_): # pylint: disable=unused-argument
u"""Method responsible for rendering form for offer to be changed.
:param request: WSGIRequest instance
:param slug: string Offer title slugified
:param id_: int Offer database unique identifier (primary key)
"""
offer = Offer.objects.get(id=id_)
if offer.id or request.user.userprofile.is_administrator:
organizations = [offer.organization]
else:
organizations = request.user.userprofile.organizations.all()
return render(
request,
'offers/offer_form.html',
{
'offer': offer,
'offer_form': CreateOfferForm(),
'organization': offer.organization,
'organizations': organizations,
'offer_image_form': OfferImageForm(),
'images': OfferImage.objects.filter(offer=offer).all(),
'MEDIA_URL': settings.MEDIA_URL,
}
)
@staticmethod
def post(request, slug, id_): # pylint: disable=unused-argument
u"""Method resposible for saving changed offer.
:param request: WSGIRequest instance
:param slug: string Offer title slugified
:param id_: int Offer database unique identifier (primary key)
"""
offer = Offer.objects.get(id=id_)
if request.POST.get('submit') == 'save_image' and request.FILES:
form = OfferImageForm(request.POST, request.FILES)
if form.is_valid():
offer.save_offer_image(
form.save(commit=False),
request.user.userprofile,
form.cleaned_data['is_main']
)
messages.success(request, u"Dodano zdjęcie do galerii.")
else:
messages.error(
request,
u"Problem w trakcie dodawania grafiki: {}".format(
'<br />'.join(form.errors)
)
)
return redirect(
reverse(
'offers_edit',
args=[slugify(offer.title), offer.id]
)
)
elif request.POST.get('close_offer') == 'close':
offer.close_offer()
return redirect(
reverse(
'offers_view',
args=[slugify(offer.title), offer.id]
)
)
elif request.POST.get('status_flag') == 'change_status':
if request.POST.get('status') == 'published':
offer.publish()
if request.user.userprofile.is_administrator:
return redirect('offers_reorder', offer.id)
elif request.POST.get('status') == 'rejected':
offer.reject()
return redirect('offers_list')
form = CreateOfferForm( # pylint: disable=redefined-variable-type
request.POST, instance=offer
)
if form.is_valid():
offer = form.save()
offer.unpublish()
offer.save()
save_history(request, offer, action=CHANGE)
messages.success(request, u"Oferta została zmieniona.")
else:
messages.error(
request,
u"Formularz zawiera niepoprawnie wypełnione pola: {}".format(
'<br />'.join(form.errors)
)
)
if offer.id or request.user.userprofile.is_administrator:
organizations = [offer.organization]
else:
organizations = request.user.userprofile.organizations.all()
return render(
request,
'offers/offer_form.html',
{
'offer': offer,
'form': form,
'organizations': organizations,
'offer_image_form': OfferImageForm(),
}
)
class OffersDelete(View):
""" Class view responsible for deletion of offers """
@staticmethod
def get(request, pk): # pylint: disable=invalid-name
"""Method which allows to delete selected offer
:param request: WSGIRequest instance
:param pk: Offer id
"""
offer = get_object_or_404(Offer, pk=pk)
if (
request.user.is_authenticated() and
request.user.userprofile.is_administrator
):
offer.reject()
messages.info(request, 'Oferta została odrzucona.')
return redirect('homepage')
else:
return HttpResponseForbidden()
class OffersAccept(View):
""" Class view responsible for acceptance of offers """
@staticmethod
def get(request, pk): # pylint: disable=invalid-name
"""Method which allows to delete selected offer
:param request: WSGIRequest instance
:param pk: Offer id
"""
offer = get_object_or_404(Offer, pk=pk)
if (
request.user.is_authenticated() and
request.user.userprofile.is_administrator
):
offer.publish()
messages.info(request, 'Oferta została zaakceptowana.')
return redirect('homepage')
else:
return HttpResponseForbidden()
class OffersView(View):
u"""Class view supporting offer preview."""
@staticmethod
@correct_slug(Offer, 'offers_view', 'title')
def get(request, slug, id_):
u"""View responsible for showing details of particular offer."""
offer = get_object_or_404(Offer, id=id_)
try:
main_image = OfferImage.objects.get(offer=offer, is_main=True)
except OfferImage.DoesNotExist:
main_image = ''
volunteers = None
users = [u.user.id for u in offer.organization.userprofiles.all()]
if (
request.user.is_authenticated() and (
request.user.userprofile.is_administrator or
request.user.userprofile.id in users
)
):
volunteers = offer.volunteers.all()
context = {
'offer': offer,
'volunteers': volunteers,
'MEDIA_URL': settings.MEDIA_URL,
'main_image': main_image,
}
return render(request, "offers/show_offer.html", context=context)
@staticmethod
def post(request, slug, id_):
u"""View responsible for submitting volunteers awarding."""
offer = get_object_or_404(Offer, id=id_)
post_data = request.POST
if post_data.get('csrfmiddlewaretoken'):
del post_data['csrfmiddlewaretoken']
if post_data.get('submit'):
del post_data['submit']
offer.votes = True
offer.save()
context = {
'offer': offer,
}
return render(request, "offers/show_offer.html", context=context)
class OffersJoin(View):
"""Class view supporting joining offer."""
@staticmethod
@correct_slug(Offer, 'offers_join', 'title')
def get(request, slug, id_): # pylint: disable=unused-argument
"""View responsible for showing join form for particular offer."""
if request.user.is_authenticated():
has_applied = Offer.objects.filter(
volunteers=request.user,
volunteers__offer=id_,
).count()
if has_applied:
messages.error(
request,
'Już wyraziłeś chęć uczestnictwa w tej ofercie.'
)
return redirect('offers_list')
offer = Offer.objects.get(id=id_)
try:
main_image = OfferImage.objects.get(offer=offer, is_main=True)
except OfferImage.DoesNotExist:
main_image = ''
context = {
'form': OfferApplyForm(),
'offer': offer,
'MEDIA_URL': settings.MEDIA_URL,
'main_image': main_image,
}
context['volunteer_user'] = UserProfile()
if request.user.is_authenticated():
context['volunteer_user'] = request.user.userprofile
return render(
request,
'offers/offer_apply.html',
context
)
@staticmethod
@correct_slug(Offer, 'offers_join', 'title')
def post(request, slug, id_): # pylint: disable=unused-argument
"""View responsible for saving join for particular offer."""
form = OfferApplyForm(request.POST)
offer = Offer.objects.get(id=id_)
if form.is_valid():
if request.user.is_authenticated():
user = request.user
else:
user = User.objects.filter(
email=request.POST.get('email')
).exists()
if user:
messages.info(
request,
'Zaloguj się, aby zapisać się do oferty.'
)
return redirect(
reverse('login') + '?next={}'.format(request.path)
)
else:
messages.info(
request,
'Zarejestruj się, aby zapisać się do oferty.'
)
return redirect('register')
has_applied = Offer.objects.filter(
volunteers=user,
volunteers__offer=id_,
).count()
if has_applied:
messages.error(
request,
u'Już wyraziłeś chęć uczestnictwa w tej ofercie.'
)
return redirect('offers_list')
offer.volunteers.add(user)
offer.save()
send_mail(
request,
'offer_application',
[
user.email,
request.POST.get('email'),
],
dict(
email=request.POST.get('email'),
phone_no=request.POST.get('phone_no'),
fullname=request.POST.get('fullname'),
comments=request.POST.get('comments'),
offer=offer,
)
)
messages.success(
request,
u'Zgłoszenie chęci uczestnictwa zostało wysłane.'
)
return redirect(
'offers_view',
slug=slugify(offer.title),
id_=offer.id,
)
else:
errors = '<br />'.join(form.errors)
messages.error(
request,
u'Formularz zawiera nieprawidłowe dane' + errors
)
volunteer_user = UserProfile()
if request.user.is_authenticated():
volunteer_user = request.user.userprofile
return render(
request,
'offers/offer_apply.html',
{
'offer': offer,
'form': form,
'volunteer_user': volunteer_user,
}
)
class OffersArchived(View):
u"""Class based view to list archived offers."""
@staticmethod
def get(request):
u"""GET request for offer archive page.
:param request: WSGIRequest instance
"""
return render(request, 'offers/archived.html', {
'offers': Offer.objects.get_archived()
})
| stxnext-csr/volontulo | apps/volontulo/views/offers.py | Python | mit | 17,750 |
"""
Tests for .objective package.
"""
| Xion/taipan | tests/test_objective/__init__.py | Python | bsd-2-clause | 38 |
"""
MythTV Backend Represntation
This is very basic at the moment, it just stores the backend location and
returns the requested service.
"""
from mythtvlib.settings import settings
from mythtvlib.services import MythTVServiceAPI
class MythTVBackend(object):
_default = None
def __init__(self, hostname=None, port=None):
"""Initialise the receiver.
If no parameters are supplied, settings will be used.
If settings are not defined, use defaults."""
self.hostname = hostname
if self.hostname is None:
self.hostname = getattr(settings, "HOSTNAME", "localhost")
self.port = port
if self.port is None:
self.port = int(getattr(settings, "PORT", 6544))
self._service_apis = {}
return
@classmethod
def default(cls, hostname=None, port=None):
"Answer the default backend"
if cls._default is None:
cls._default = cls(hostname, port)
return cls._default
@classmethod
def services(cls):
return MythTVServiceAPI.services
def service_api(self, service_name):
api = self._service_apis.get(service_name)
if api is None:
api = MythTVServiceAPI(service_name, self)
self._service_apis[service_name] = api
return api
def __str__(self):
return "MythTVBackend(hostname={hostname}, port={port}".format(
hostname=self.hostname, port=self.port)
| akgrant43/mythtv_cli_extensions | mythtvlib/backend.py | Python | gpl-2.0 | 1,485 |
import subprocess
#import ipaddress
import re
import pprint
import telnetlib
def decodeTopology(output):
#var
startRow = 0
listIP = []
#regular expression
rule1 = re.compile('Router Link States')
rule2 = re.compile('Link ID')
rule3 = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
#divide it in rows
rows = output.split('\n')
#search for "Router Link States"
for i in range(0, len(rows)):
if rule1.search(rows[i]) is None:
continue
else:
startRow = i
break
#print(startRow)
#search for "Link ID "
for i in range(startRow, len(rows)):
if rule2.search(rows[i]) is None:
continue
else:
#return i+1 because the first usable results is in the next line
startRow = (i + 1)
break
#startRow is the row of the first ip
#print(startRow)
#we only need the first ip of each line
#search for "ip" until empty line
for i in range(startRow, len(rows)):
if(len(rows[i]) == 0):
#empty line found
break
#If it finds an IPv4 address
if rule3.search(rows[i]) != None:
#Insert it in the listIP
listIP.append(rule3.search(rows[i]).group())
#Otherwise, it means that the list of router ip addresses is finished
else:
break
#print(listIP)
return listIP
def findDr(draddress, listInfo, originalNode):
for i in range(0, len(listInfo)):
routes = listInfo[i]['nRoutes']
for j in range(0, routes):
if(listInfo[i][str(j) + '_draddress'] == draddress):
if(i != originalNode):
return i
return -1
def buildTopologyMatrix(interfaces, ip):
listInfo = []
topologyMatrix = [[0 for x in range(len(interfaces))]
for x in range(len(interfaces))]
ruleIP = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
for i in interfaces:
listInfo.append({})
listInfo[len(listInfo) - 1]['routerId'] = i
#cmd = 'vtysh -c "show ip ospf database router ' + i + '"'
#output = subprocess.check_output(cmd, shell=True)
cmd = 'show ip ospf database router ' + i + '\n'
output = telnetRouter(ip, cmd)
rows = output.split('\n')
#pprint.pprint(rows)
#find OSPF link
counter = 0
for row in range(0, len(rows)):
#If it is a Transit Network: read the designated router and the incoming interface
if(re.search('Link connected to: a Transit Network', rows[row]) is not None):
if(re.search('\(Link ID\) Designated Router address:', rows[row + 1]) is not None):
listInfo[len(listInfo) - 1][str(counter) + '_draddress'] = ruleIP.search(rows[row + 1]).group()
if(re.search('\(Link Data\) Router Interface address:', rows[row + 2]) is not None):
listInfo[len(listInfo) - 1][str(counter) + '_ip'] = ruleIP.search(rows[row + 2]).group()
counter = counter + 1
#Otherwise checks if it is a point-to-point link
elif(re.search('Link connected to: another Router \(point-to-point\)', rows[row]) is not None):
if(re.search('\(Link ID\) Neighboring Router ID:', rows[row + 1]) is not None):
listInfo[len(listInfo) - 1][str(counter) + '_draddress'] = ruleIP.search(rows[row + 1]).group()
if(re.search('\(Link Data\) Router Interface address:', rows[row + 2]) is not None):
listInfo[len(listInfo) - 1][str(counter) + '_ip'] = ruleIP.search(rows[row + 2]).group()
counter = counter + 1
listInfo[len(listInfo) - 1]['nRoutes'] = counter
#pprint.pprint(listInfo)
#build matrix
for i in range(0, len(listInfo)):
routes = listInfo[i]['nRoutes']
for j in range(0, routes):
draddress = listInfo[i][str(j) + '_draddress']
k = findDr(draddress, listInfo, i)
if(k < 0):
print('ERROR')
topologyMatrix[i][k] = listInfo[i][str(j) + '_ip']
return listInfo, topologyMatrix
def telnetRouter(ipaddr, cmd):
tn = telnetlib.Telnet(ipaddr)
tn.write(cmd)
output = tn.read_until('>')
output = tn.read_until('>')
tn.close
return output
def getTopology(ip):
#output of the command
#output = subprocess.check_output('vtysh -c "show ip ospf database"', shell=True)
output = telnetRouter(ip, 'show ip ospf database\n')
#return the ip list of the routers
interfaces = decodeTopology(output)
#return interface list & matrix topology
interfaceList, matrix = buildTopologyMatrix(interfaces, ip)
return interfaceList, matrix
#lista, matrice = getTopology('192.168.3.1')
#print('lista')
#pprint.pprint(lista)
#print('\nmatrice')
#pprint.pprint(matrice)
| PP90/ANAWS-project-on-Traffic-Engineering | Old_project/buildTopology.py | Python | bsd-3-clause | 4,940 |
# -*- coding: utf-8 -*-
#
# Copyright 2015-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Comparing remote and local predictions
"""
import sys
from .world import world, setup_module, teardown_module, show_doc, show_method
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_association_steps as association_create
from . import create_cluster_steps as cluster_create
from . import create_anomaly_steps as anomaly_create
from . import create_prediction_steps as prediction_create
from . import compare_predictions_steps as prediction_compare
class TestComparePrediction(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <time_3> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1910-05-08T19:10:23.106","cat-0":"cat2","target-2":0.4}',
0.52477],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1920-06-30T20:21:20.320","cat-0":"cat1","target-2":0.2}',
0.50654]]
show_doc(self.test_scenario1, examples)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
anomaly_create.i_create_an_anomaly(self)
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example["model_wait"])
prediction_compare.i_create_a_local_anomaly(self)
prediction_create.i_create_an_anomaly_score(
self, example["input_data"])
prediction_create.the_anomaly_score_is(
self, example["score"])
prediction_compare.i_create_a_local_anomaly_score(
self, example["input_data"])
prediction_compare.the_local_anomaly_score_is(
self, example["score"])
def test_scenario1b(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <time_3> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1932-01-30T19:24:11.440","cat-0":"cat2","target-2":0.1}',
0.54343],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1950-11-06T05:34:05.602","cat-0":"cat1" ,"target-2":0.9}',
0.5202]]
show_doc(self.test_scenario1b)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
anomaly_create.i_create_an_anomaly(self, shared=example["data"])
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example["model_wait"], shared=example["data"])
prediction_compare.i_create_a_local_anomaly(self)
prediction_create.i_create_an_anomaly_score(
self, example["input_data"])
prediction_create.the_anomaly_score_is(
self, example["score"])
prediction_compare.i_create_a_local_anomaly_score(
self, example["input_data"])
prediction_compare.the_local_anomaly_score_is(
self, example["score"])
def test_scenario1b_a(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <time_3> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1969-7-14 17:36","cat-0":"cat2","target-2":0.9}',
0.93639]]
show_doc(self.test_scenario1b_a)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
anomaly_create.i_create_an_anomaly(self, shared=example["data"])
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example["model_wait"], shared=example["data"])
prediction_compare.i_create_a_local_anomaly(self)
prediction_create.i_create_an_anomaly_score(
self, example["input_data"])
prediction_create.the_anomaly_score_is(
self, example["score"])
prediction_compare.i_create_a_local_anomaly_score(
self, example["input_data"])
prediction_compare.the_local_anomaly_score_is(
self, example["score"])
def test_scenario1c(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <time_3> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"2001-01-05T23:04:04.693","cat-0":"cat2","target-2":0.01}',
0.54911],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"2011-04-01T00:16:45.747","cat-0":"cat2","target-2":0.32}',
0.52477]]
show_doc(self.test_scenario1c)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
anomaly_create.i_create_an_anomaly(self, shared=example["data"])
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example["model_wait"], shared=example["data"])
prediction_compare.i_create_a_local_anomaly(self)
prediction_create.i_create_an_anomaly_score(
self, example["input_data"])
prediction_create.the_anomaly_score_is(self, example["score"])
prediction_compare.i_create_a_local_anomaly_score(
self, example["input_data"])
prediction_compare.the_local_anomaly_score_is(
self, example["score"])
def test_scenario1c_a(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for anomaly detectors
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less
than <model_wait> secs
And I create a local anomaly detector
When I create an anomaly score for "<input_data>"
Then the anomaly score is "<score>"
And I create a local anomaly score for "<input_data>"
Then the local anomaly score is "<score>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "score"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1969-W29-1T17:36:39Z","cat-0":"cat1","target-2":0.87}',
0.93678],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"Mon Jul 14 17:36 +0000 1969","cat-0":"cat1","target-2":0}',
0.93717]]
show_doc(self.test_scenario1c_a)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
anomaly_create.i_create_an_anomaly(self, shared=example["data"])
anomaly_create.the_anomaly_is_finished_in_less_than(
self, example["model_wait"], shared=example["data"])
prediction_compare.i_create_a_local_anomaly(self)
prediction_create.i_create_an_anomaly_score(
self, example["input_data"])
prediction_create.the_anomaly_score_is(
self, example["score"])
prediction_compare.i_create_a_local_anomaly_score(
self, example["input_data"])
prediction_compare.the_local_anomaly_score_is(
self, example["score"])
def test_scenario2(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for cluster
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create a cluster
And I wait until the cluster is ready less than <model_wait> secs
And I create a local cluster
When I create a centroid for "<input_data>"
Then the centroid is "<centroid>" with distance "<distance>"
And I create a local centroid for "<input_data>"
Then the local centroid is "<centroid>" with
distance "<distance>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "centroid", "distance"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1910-05-08T19:10:23.106","cat-0":"cat2","target-2":0.4}',
"Cluster 2", 0.92112],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1920-06-30T20:21:20.320","cat-0":"cat1","target-2":0.2}',
"Cluster 3", 0.77389]]
show_doc(self.test_scenario2)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
cluster_create.i_create_a_cluster(self, shared=example["data"])
cluster_create.the_cluster_is_finished_in_less_than(
self, example["model_wait"], shared=example["data"])
prediction_compare.i_create_a_local_cluster(self)
prediction_create.i_create_a_centroid(
self, example["input_data"])
prediction_create.the_centroid_is_with_distance(
self, example["centroid"], example["distance"])
prediction_compare.i_create_a_local_centroid(
self, example["input_data"])
prediction_compare.the_local_centroid_is(
self, example["centroid"], example["distance"])
def test_scenario2_a(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for cluster
And I wait until the source is ready less than <source_wait> secs
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create a cluster
And I wait until the cluster is ready less than <model_wait> secs
And I create a local cluster
When I create a centroid for "<input_data>"
Then the centroid is "<centroid>" with distance "<distance>"
And I create a local centroid for "<input_data>"
Then the local centroid is "<centroid>" with
distance "<distance>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "centroid", "distance"]
examples = [
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1932-01-30T19:24:11.440","cat-0":"cat2","target-2":0.1}',
"Cluster 0", 0.87855],
['data/dates2.csv', '20', '30', '60',
'{"time-1":"1950-11-06T05:34:05.602","cat-0":"cat1" ,"target-2":0.9}',
"Cluster 6", 0.83506]]
show_doc(self.test_scenario2_a)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
cluster_create.i_create_a_cluster(self, shared=example["data"])
cluster_create.the_cluster_is_finished_in_less_than(
self, example["model_wait"])
prediction_compare.i_create_a_local_cluster(self)
prediction_create.i_create_a_centroid(
self, example["input_data"])
prediction_create.the_centroid_is_with_distance(
self, example["centroid"], example["distance"])
prediction_compare.i_create_a_local_centroid(
self, example["input_data"])
prediction_compare.the_local_centroid_is(
self, example["centroid"], example["distance"])
def test_scenario3(self):
"""
Scenario: Successfully comparing association sets:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <source_wait> secs
And I update the source with params "<source_conf>"
And I create a dataset
And I wait until the dataset is ready less than <dataset_wait> secs
And I create a model
And I wait until the association is ready less than <model_wait> secs
And I create a local association
When I create an association set for "<input_data>"
Then the association set is like the contents of
"<association_set_file>"
And I create a local association set for "<input_data>"
Then the local association set is like the contents of
"<association_set_file>"
"""
headers = ["data", "source_wait", "dataset_wait", "model_wait",
"input_data", "association_set_file"]
examples = [['data/dates2.csv', '20', '30', '80', '{"target-2": -1}',
'data/associations/association_set2.json']]
show_doc(self.test_scenario3)
for example in examples:
example = dict(zip(headers, example))
show_method(self, sys._getframe().f_code.co_name, example)
source_create.i_upload_a_file(
self, example["data"], shared=example["data"])
source_create.the_source_is_finished(
self, example["source_wait"], shared=example["data"])
dataset_create.i_create_a_dataset(self, shared=example["data"])
dataset_create.the_dataset_is_finished_in_less_than(
self, example["dataset_wait"], shared=example["data"])
association_create.i_create_an_association_from_dataset(
self, shared=example["data"])
association_create.the_association_is_finished_in_less_than(
self, example["model_wait"], shared=example["data"])
prediction_compare.i_create_a_local_association(self)
prediction_create.i_create_an_association_set(
self, example["input_data"])
prediction_compare.the_association_set_is_like_file(
self, example["association_set_file"])
prediction_compare.i_create_a_local_association_set(
self, example["input_data"])
prediction_compare.the_local_association_set_is_like_file(
self, example["association_set_file"])
| bigmlcom/python | bigml/tests/test_44_compare_predictions.py | Python | apache-2.0 | 22,664 |
import os
import pdb
import copy
import random
import xml.dom.minidom
import microdom
import types
import common
from module import Module
import svg
import entity
from entity import Entity
import generate
import share
import wx
VERSION = "7.0"
MAX_RECURSIVE_INSTANTIATION_DEPTH = 5
defaultForBuiltinType = {
"boolean": False,
"integer": 0,
}
class Model:
"""Rather then type/instance, there are really 2 levels instantiations, more along the lines of C++ template,type,instance. What I mean by this is that the object defined in SAFplusAmf.yang is really a "meta-type". Take the example of a Service Group. You first "instantiate" this in the UML editor to create the "Apache" (for example) Service Group. Next you "instantiate" the "Apache Service Group" to create a particular instance of Apache running on 2 nodes.
The user can modify the configuration after every instantiation, but also has the option to "lock" particular configuration so downstream instantiation cannot modify it.
For example, the user instantiates the Apache Service Group (for example), selects 1+1 redundancy and then "locks" it. The user also selects 3 restarts before failover but does NOT lock that. Now, when the Apache Service Group is instantiated on say node1, the user CANNOT change the redundancy model, but he can change the # of restarts (for this particular instance).
SAFplus6 SAFPlus7 SAFplus7 model.py code What I'm talking about
hardcoded .yang entityTypes Meta-types (e.g. Service Group)
config <entities> entities entities (e.g. Apache web browser)
instantiated <instances> instances instances (e.g. Apache running on 2 particular nodes)
"""
def __init__(self, modelfile=None):
self.init()
if modelfile:
self.load(modelfile)
def init(self):
"""Clear this model, forgetting everything"""
self.data = {} # empty model
self.filename = None
self.modules = {}
self.dataTypes= {}
self.entityTypes = {}
self.entities = {}
self.instances = {}
def directory(self):
"""Returns the location of this model on disk """
return os.path.dirname(self.filename)
def delete(self, items):
"""Accept a list of items in a variety of formats to be deleted"""
if type(items) is types.ListType or isinstance(items,set):
for item in items:
self.delete(item)
if type(items) is types.DictType:
for item in items.items():
self.delete(item)
if type(items) in types.StringTypes:
#if self.entityTypes.get(item):
# self.deleteEntity(self.entities[item])
if self.entities.get(items):
self.deleteEntity(self.entities[items])
if self.instances.get(items):
self.deleteInstance(self.instances[items])
if isinstance(items,entity.Instance):
if share.instancePanel:
share.instancePanel.deleteEntities([items], False)
self.deleteInstance(items)
elif isinstance(items, entity.Entity):
#self.deleteEntity(items)
entname = items.data["name"]
insToDelete = []
for name,e in self.instances.items():
if e.entity.data["name"] == entname:
insToDelete.append(e)
if share.instancePanel:
share.instancePanel.deleteEntities(insToDelete, False)
for i in insToDelete:
# delete instances and its related instances (instances have relationship with it)
self.deleteInstance(i)
self.deleteEntity(items)
def deleteEntity(self,entity):
"""Delete this instance of Entity from the model"""
entname = entity.data["name"]
for (name,e) in self.entities.items():
e.containmentArrows[:] = [ x for x in e.containmentArrows if x.contained != entity]
for k,v in e.data.items():
if (v == entity.data['name']):
e.data[k] = ''
break
del self.entities[entname]
# Also delete the entity from the microdom
#entities = self.data.getElementsByTagName("entities")
#if entities:
# entities[0].delChild(entities[0].findOneByChild("name",entname))
self.deleteEntityFromMicrodom(entname, entity.et.name)
"""Delete entity.Instance of Entity type
nameInstances = [name for (name, e) in self.instances.items() if e.entity.data["name"] == entname]
self.delete(nameInstances)
"""
def deleteEntityFromMicrodom(self, entname, enttype):
entities = self.data.getElementsByTagName("entities")
if entities:
entities[0].delChild(entities[0].findOneByChild("name",entname))
ide = self.data.getElementsByTagName("ide")
if ide:
entTypes = ide[0].getElementsByTagName(enttype)
if entTypes:
e = entTypes[0].getElementsByTagName(entname)
if e:
entTypes[0].delChild(e[0])
ideEntities = self.data.getElementsByTagName("ide_entity_info")
if ideEntities:
e = ideEntities[0].getElementsByTagName(entname)
if e:
ideEntities[0].delChild(e[0])
# delete entity in containment arrows if any
name = "containmentArrows"
caTags = ideEntities[0].getElementsByTagName(name)
if caTags:
name = "_"+entname
for caTag in caTags:
t = caTag.getElementsByTagName(name)
if t:
caTag.delChild(t[0])
def deleteWireFromMicrodom(self, containerName, containedName):
ideEntities = self.data.getElementsByTagName("ide_entity_info")
if ideEntities:
e = ideEntities[0].getElementsByTagName(containerName)
if e:
arrows = e[0].getElementsByTagName("containmentArrows")
if arrows:
name = "_" + containedName
t = arrows[0].getElementsByTagName(name)
if t:
arrows[0].delChild(t[0])
def getEntitiesAndInfos(self):
entities = self.data.getElementsByTagName("entities")
entitiesInfo = self.data.getElementsByTagName("ide_entity_info")
if entities and entitiesInfo:
return (entities[0].pretty(), entitiesInfo[0].pretty())
else: return ("", "")
def getInstanceInfomation(self):
self.updateMicrodom()
instances = self.data.getElementsByTagName("instances")
if instances:
return instances[0].pretty()
def setEntitiesAndInfos(self, data):
c = self.data.getElementsByTagName("entities")
if c:
self.data.delChild(c[0])
dom = xml.dom.minidom.parseString(data[0])
entities = microdom.LoadMiniDom(dom.childNodes[0])
self.data.addChild(entities)
c = self.data.getElementsByTagName("ide")
if c:
c1 = c[0].getElementsByTagName("ide_entity_info")
if c1:
c[0].delChild(c1[0])
dom1 = xml.dom.minidom.parseString(data[1])
entitiesInfo = microdom.LoadMiniDom(dom1.childNodes[0])
c[0].addChild(entitiesInfo)
self.entities = {}
self.loadDataInfomation()
def loadDataInfomation(self):
entities = self.data.getElementsByTagName("entities")
ideEntities = self.data.getElementsByTagName("ide_entity_info")
if ideEntities: ideEntities = ideEntities[0] # Get first item in the list
if entities:
assert(len(entities)==1)
entities = entities[0]
fileEntLst = []
for ed in entities.children(microdom.microdomFilter):
name = ed["name"].data_
entType = self.entityTypes[ed.tag_]
pos = None
size = None
if ideEntities: # Load the pos and size from the model (if it exists)
ideInfo = ideEntities.getElementsByTagName(name)
if ideInfo:
ideInfo = ideInfo[0]
pos = common.str2Tuple(ideInfo["position"].data_)
size = common.str2Tuple(ideInfo["size"].data_)
if pos is None:
pos = self.makeUpAScreenPosition()
size = entType.iconSvg.size
eo = entity.Entity(entType,pos,size,name)
eo.updateDataFields(ed)
self.entities[name] = eo
fileEntLst.append((ed,eo))
# Look for relationships. I can't do this until all the entities are created
for (ed,eo) in fileEntLst:
for et in self.entityTypes.items(): # Look through all the children for a key that corresponds to the name of an entityType (+ s), eg: "ServiceGroups"
if ed.child_.has_key(et[0] + 's'):
linkstr = ed.child_[et[0] + 's'].data_
linklst = linkstr.split(",")
for link in linklst:
contained = self.entities.get(link,None)
if contained:
# TODO: look the positions up in the GUI section of the xml file
(beginOffset, endOffset, midpoints) = self.getContainmemtArrowPos(ideEntities, eo, contained)
ca = entity.ContainmentArrow(eo,beginOffset,contained,endOffset,midpoints)
eo.containmentArrows.append(ca)
else: # target of the link is missing, so drop the link as well. This could happen if the user manually edits the XML
# TODO: create some kind of warning/audit log in share.py that we can post messages to.
pass
# Recreate all the images in case loading data would have changed them.
for (ed,eo) in fileEntLst:
eo.recreateBitmap()
# Get instance lock fields
ide = self.data.getElementsByTagName("ide")
if ide:
for (name,e) in self.entities.items():
etType = ide[0].getElementsByTagName(e.et.name)
if etType:
et = etType[0].getElementsByTagName(name)
if et:
for ed in et[0].children(microdom.microdomFilter):
e.instanceLocked[str(ed.tag_)] = ed.data_
instances = self.data.find("instances")
if instances:
for (path, obj) in instances:
fileEntLst = []
for entityType in self.entityTypes.keys():
for instance in obj.children(lambda(x): x if (type(x) is types.InstanceType and x.__class__ is microdom.MicroDom and x.tag_ == entityType) else None):
if instance.child_.has_key("%sType"%entityType):
entityTypeName = instance.child_.get("%sType"%entityType).data_
# Entity of this instance
entityParent = self.entities.get(entityTypeName)
if not entityParent:
continue
entityInstance = entity.Instance(entityParent, instance, (0,0), (10,10), instance.name.data_)
entityInstance.updateDataFields(instance)
# Copy instance locked, then bind to readonly wxwidget
entityInstance.instanceLocked = entityParent.instanceLocked.copy()
self.instances[instance.name.data_] = entityInstance
fileEntLst.append((instance,entityInstance))
for (ed,eo) in fileEntLst:
for et in self.entityTypes.items(): # Look through all the children for a key that corresponds to the name of an entityType (+ s), eg: "ServiceGroups"
child = et[0][0].lower() + et[0][1:] + 's'
for ch in ed.children(lambda(x): x if (type(x) is types.InstanceType and x.__class__ is microdom.MicroDom and x.tag_ == child) else None):
# Strip out instance-identifier if any
childName = str(ch.data_)[str(ch.data_).rfind("/")+1:]
contained = self.instances.get(childName,None)
if contained:
# TODO: look the positions up in the GUI section of the xml file
ca = entity.ContainmentArrow(eo,(0,0),contained,(0,0),[])
contained.childOf.add(eo)
eo.containmentArrows.append(ca)
else: # target of the link is missing, so drop the link as well. This could happen if the user manually edits the XML
# TODO: create some kind of warning/audit log in share.py that we can post messages to.
pass
entity.updateNamelyDict(self)
def deleteInstance(self,inst):
self.recursiveDeleteInstance(inst)
def deleteInstanceFromMicrodom(self, entname):
instances = self.data.getElementsByTagName("instances")
if instances:
instances[0].delChild(instances[0].findOneByChild("name",entname))
def recursiveDeleteInstance(self,inst):
entname = inst.data["name"]
#if len(inst.containmentArrows)==0:
# self.deleteInstanceFromMicrodom(entname)
# for (name, e) in self.instances.items():
# if name==entname:
# del self.instances[name]
# return
for ca in inst.containmentArrows:
self.recursiveDeleteInstance(ca.contained)
if len(inst.containmentArrows)>0:
del inst.containmentArrows[:]
self.deleteInstanceFromMicrodom(entname)
del self.instances[entname]
for (name,e) in self.instances.items():
e.containmentArrows = [ x for x in e.containmentArrows if x.contained != inst]
def connect(self,container, contained):
"""Connects 2 instances together. Returns the containment arrow instance"""
assert(isinstance(container,entity.Instance)) # TODO, allow this function to connect 2 entities (but not 1 instance and 1 entity)
assert(isinstance(contained,entity.Instance))
ca = entity.ContainmentArrow(container,(0,0),contained,(0,0))
container.containmentArrows.append(ca)
contained.childOf.add(container)
return ca
def isProxyOf(self,proxy, proxied):
if proxy.data['csiType']==proxied.data['proxyCSI']:
#print 'same csi for proxied [%s]'%proxied.data['name']
for ca in proxy.containmentArrows:
#print 'ca of [%s]: container [%s]. contained [%s]' %(proxy.data['name'],ca.container.data['name'],ca.contained.data['name'])
if ca.contained.data['name']==proxied.data['name']:
#print 'proxied found'
return True
#print 'no proxied found'
return False
def generateSource(self,srcDir):
#print 'eneter generateSource'
output = common.FilesystemOutput()
#comps = filter(lambda entity: entity.et.name == 'Component' and entity.data['NonSafComponents']!='', self.entities.values()) # SA_Aware comp no proxied
#proxyComps = filter(lambda entity: entity.et.name == 'Component' and len(entity.data['NonSafComponents'])>0, self.entities.values())
comps = []
proxyComps = []
for c in filter(lambda entity: entity.et.name == 'Component',self.entities.values()):
noProxied = True
#print c.data['name']
#print 'in outer loop'
for nsc in filter(lambda entity: entity.et.name == 'NonSafComponent',self.entities.values()):
#print nsc.data['name']
if self.isProxyOf(c, nsc):
proxyComps.append(c)
noProxied = False
print 'found proxied. break'
break
#print 'continue inner loop'
if noProxied:
comps.append(c)
#print 'continue outer loop'
#if e.et.name == 'Component' and e.data.has_key('NonSafComponents') and len(e.data['NonSafComponents'])==0:
# comps.append(e)
#elif e.et.name == 'Component' and e.data.has_key('NonSafComponents') and len(e.data['NonSafComponents'])>0:
# proxyComps.append(e)
#print 'generateSource: %s' %str(proxyComps)
srcDir = os.sep.join([srcDir, "src"])
files = []
# Create Makefile
files += generate.topMakefile(output, srcDir,[c.data["name"] for c in comps+proxyComps])
for c in comps:
if os.path.exists(srcDir+os.sep+c.data['name']+os.sep+'proxyMain.cxx'):
#print 'model[%d]: We will delete = %s'%(sys._getframe().f_lineno, srcDir+os.sep+c.data['name']+os.sep+'proxymain.cxx')
os.popen('rm -rf '+srcDir+os.sep+c.data['name']+os.sep+'proxyMain.cxx')
files += generate.cpp(output, srcDir, c, c.data)
proxyFiles = []
for proxy in proxyComps:
if os.path.exists(srcDir+os.sep+proxy.data['name']+os.sep+'main.cxx'):
#print 'model[%d]: We will delete = %s'%(sys._getframe().f_lineno, srcDir+os.sep+proxy.data['name']+os.sep+'main.cxx')
os.popen('rm -rf '+srcDir+os.sep+proxy.data['name']+os.sep+'main.cxx')
proxyFiles += generate.cpp(output, srcDir, proxy, proxy.data, True)
# Delete unnecessary .cxx file
for folder in os.popen('ls '+srcDir).read().split():
if folder not in [c.data['name'] for c in comps+proxyComps] and folder != 'Makefile':
#print 'model[%d]: We will delete %s folder'%(sys._getframe().f_lineno, folder)
cmd = 'rm -rf ' + srcDir + os.sep + folder
os.popen('%s'%cmd)
return files,proxyFiles
def load(self, fileOrString):
"""Load an XML representation of the model"""
if fileOrString[0] != "<": # XML must begin with opener
self.filename = common.fileResolver(fileOrString)
with open(self.filename,"r") as f:
fileOrString = f.read()
dom = xml.dom.minidom.parseString(fileOrString)
self.data = microdom.LoadMiniDom(dom.childNodes[0])
self.loadModules()
self.loadDataInfomation()
return True
def getContainmemtArrowPos(self, ideEntities, container, contained):
name = container.data["name"]
containerTags = ideEntities.getElementsByTagName(name)
if not containerTags:
return ((0,0), (0,0), [])
containerTag = containerTags[0]
name = "containmentArrows"
caTags = containerTag.getElementsByTagName(name)
if caTags:
containedEntName = "_"+contained.data["name"]
for caTag in caTags:
t = caTag.getElementsByTagName(containedEntName)
if t:
t = t[0]
beginOffset = common.str2Tuple(t["beginOffset"].data_)
endOffset = common.str2Tuple(t["endOffset"].data_)
if t["midpoints"] and t["midpoints"].data_:
midPoints = [common.str2Tuple(t["midpoints"].data_)]
else:
midPoints = None
return (beginOffset, endOffset, midPoints)
return ((0,0), (0,0), [])
def makeUpAScreenPosition(self):
return (random.randint(0,800),random.randint(0,800))
def save(self, filename=None):
"""Save XML representation of the model"""
if filename is None: filename = self.filename
with open(filename,"w") as f:
f.write(self.xmlify())
def loadModules(self):
"""Load the modules specified in the model"""
for (path, obj) in self.data.find("modules"):
for module in obj.children(microdom.microdomFilter):
filename = module.data_.strip()
#print module.tag_, ": ", filename
if not os.path.dirname(filename) or len(os.path.dirname(filename).strip()) == 0:
filename = os.sep.join([os.path.dirname(self.filename), filename])
if not self.modules.has_key(filename): # really load it since it does not exist
tmp = self.modules[filename] = Module(filename)
self.entityTypes.update(tmp.entityTypes) # make the entity types easily accdef xmlify(self):
for (typName,data) in tmp.ytypes.items():
self.dataTypes[typName] = data
# Set the entityType's context to this model so it can resolve referenced types, etc.
for (name,e) in self.entityTypes.items():
e.context = self
def loadModuleFromFile(self, moduleFile):
"""Load the modules specified in the model"""
if not self.modules.has_key(moduleFile): # really load it since it does not exist
tmp = self.modules[moduleFile] = Module(moduleFile)
self.entityTypes.update(tmp.entityTypes) # make the entity types easily accdef xmlify(self):
for (typName,data) in tmp.ytypes.items():
self.dataTypes[typName] = data
# Set the entityType's context to this model so it can resolve referenced types, etc.
for (name,e) in self.entityTypes.items():
e.context = self
def defaultForType(self,typ):
"""Figure out a reasonable default for the passed type"""
ret = defaultForBuiltinType.get(typ,None) # Is the type a builtin?
if ret: return ret
# TODO: look in the model's type list for this type and figure out a default
return ""
def updateMicrodom(self):
"""Write the dynamically changing information back to the loaded microdom tree.
The reason I don't create an entirely new tree is to preserve any application extensions that might have been put into the file.
"""
# First, update the model to make sure that it is internally consistent
for (name,i) in self.instances.items():
for parent in filter(lambda ent: isinstance(ent, Entity), i.childOf): # If the object has parent pointers, update them. This is pretty specific to SAFplus data types...
fieldName = parent.et.name[0].lower() + parent.et.name[1:] # uncapitalize the first letter to make it use SAFplus bumpycase
if i.data.has_key(fieldName):
i.data[fieldName] = parent.data["name"]
# Locate or create the needed sections in the XML file
# find or create the entity area in the microdom
entities = self.data.getElementsByTagName("entities")
if not entities:
entities = microdom.MicroDom({"tag_":"entities"},[],[])
self.data.addChild(entities)
else:
assert(len(entities)==1)
entities = entities[0]
# Find or create the GUI area in the microdom. The GUI area is structured like:
# ide
# ide_entity_info
# ide_instance_info
ide = self.data.getElementsByTagName("ide")
if not ide:
ide = microdom.MicroDom({"tag_":"ide"},[],[])
self.data.addChild(ide)
else:
assert(len(ide)==1)
ide = ide[0]
ideEntities = ide.getElementsByTagName("ide_entity_info")
if not ideEntities:
ideEntities = microdom.MicroDom({"tag_":"ide_entity_info"},[],[])
ide.addChild(ideEntities)
else:
assert(len(ideEntities)==1)
ideEntities = ideEntities[0]
ideInsts = ide.getElementsByTagName("ide_instance_info")
if not ideInsts:
ideInsts = microdom.MicroDom({"tag_":"ide_instance_info"},[],[])
ide.addChild(ideInsts)
else:
assert(len(ideInsts)==1)
ideInsts = ideInsts[0]
# Write out the entities
# iterate through all entities writing them to the microdom, or changing the existing microdom
for (name,e) in self.entities.items():
# Find the existing DOM nodes for the entity information, creating the node if it is missing
entity = entities.findOneByChild("name",name)
if not entity:
entity = microdom.MicroDom({"tag_":e.et.name},[],[])
entities.addChild(entity)
ideEntity = ideEntities.getElementsByTagName(name)
if ideEntity: ideEntity = ideEntity[0]
else:
ideEntity = microdom.MicroDom({"tag_":name},[],[])
ideEntities.addChild(ideEntity)
# Remove all "None", replacing with the default or ""
temp = {}
for (key,val) in e.data.items():
if val is None:
val = e.et.data[key].get("default",None)
if val is None:
val = ""
if val == "None": val = ""
temp[key] = val
# Write all the data fields into the model's microdom
entity.update(temp)
# write the IDE specific information to the IDE area of the model xml
ideEntity["position"] = str(e.pos)
ideEntity["size"] = str(e.size)
# Now write all the arrows
contains = {} # Create a dictionary to hold all linkages by type
for arrow in e.containmentArrows:
# Add the contained object to the dictionary keyed off of the object's entitytype
tmp = contains.get(arrow.contained.et.name,[])
tmp.append(arrow.contained.data["name"])
contains[arrow.contained.et.name] = tmp
# TODO: write the containment arrow IDE specific information to the IDE area of the model xml
self.writeContainmentArrow(ideEntity, arrow)
# Now erase the missing linkages from the microdom
for (key, val) in self.entityTypes.items(): # Look through all the children for a key that corresponds to the name of an entityType (+ s), eg: "ServiceGroups"
if not contains.has_key(key): # Element is an entity type but no linkages
if entity.child_.has_key(key + 's'): entity.delChild(key + 's')
# Ok now write the linkages to the microdom
for (key, val) in contains.items():
k = key + "s"
if entity.child_.has_key(k): entity.delChild(k)
entity.addChild(microdom.MicroDom({"tag_":k},[",".join(val)],"")) # TODO: do we really need to pluralize? Also validate comma separation is ok
# Building instance lock fields
etType = ide.getElementsByTagName(e.et.name)
if not etType:
etType = microdom.MicroDom({"tag_":e.et.name},[],[])
ide.addChild(etType)
else:
assert(len(etType)==1)
etType = etType[0]
et = etType.getElementsByTagName(name)
if not et:
et = microdom.MicroDom({"tag_":name},[],[])
etType.addChild(et)
else:
assert(len(et)==1)
et = et[0]
et.update(e.instanceLocked)
# Find or create the instance area in the microdom
instances = self.data.getElementsByTagName("instances")
if not instances:
instances = microdom.MicroDom({"tag_":"instances"},[],[])
self.data.addChild(instances)
else:
assert(len(instances)==1)
instances = instances[0]
# iterate through all instances writing them to the microdom, or changing the existing microdom
for (name,e) in self.instances.items():
instance = instances.findOneByChild("name",name)
if not instance:
instance = microdom.MicroDom({"tag_":e.et.name},[],[])
instances.addChild(instance)
# Remove all "None", replacing with the default or ""
temp = {}
for (key,val) in e.data.items():
if val is None:
val = e.et.data[key].get("default",None)
if val is None:
val = ""
if val == "None": val = ""
temp[key] = val
# Add module and xpath attributes
# workaround for NonSafComponent: xpath for it is same as Component
expath = e.entity.et.data["xpath"]
idx = expath.rfind('NonSafComponent')
if idx != -1:
expath = expath[:idx]+'Component'
instance.addAttribute("xpath",expath + ("[@name=\"%s\"]" % e.data["name"]))
instance.addAttribute("module",e.entity.et.data["module"])
# Write all the data fields into the model's microdom
instance.update(temp)
# Now write all the arrows
contains = {} # Create a dictionary to hold all linkages by type
for arrow in e.containmentArrows:
# Add the contained object to the dictionary keyed off of the object's entitytype
# leaf-list entity type with camelCase(s)
key = arrow.contained.et.name[0].lower() + arrow.contained.et.name[1:] + 's'
tmp = contains.get(key,[])
tmp.append(arrow.contained.data["name"])
contains[key] = tmp
# Now erase the missing linkages from the microdom
for (key, val) in self.entityTypes.items(): # Look through all the children for a key that corresponds to the name of an entityType (+ s), eg: "serviceUnits"
key = key[0].lower() + key[1:] + 's'
if not contains.has_key(key): # Element is an entity type but no linkages
if instance.child_.has_key(key): instance.delChild(key)
# Ok now write the linkages to the microdom
for (key, vals) in contains.items():
if instance.child_.has_key(key): instance.delChild(key)
for val in vals:
instance.addChild(microdom.MicroDom({"tag_":key},[val],"")) # TODO: do we really need to pluralize? Also validate comma separation is ok
# Extra parent entity name
entityParentVal = e.entity.data["name"]
entityParentKey = "%sType"%e.et.name
if instance.child_.has_key(entityParentKey): instance.delChild(entityParentKey)
instance.addChild(microdom.MicroDom({"tag_":entityParentKey},[entityParentVal],""))
def createChild(self, parent, childName):
name = childName
childTag = parent.getElementsByTagName(name)
if childTag:
childTag = childTag[0]
else:
childTag = microdom.MicroDom({"tag_":name},[],[])
parent.addChild(childTag)
return childTag
def writeContainmentArrow(self, ideEntity, arrow):
# create <containmentArrows> inside <entity>
name = "containmentArrows"
caTag = self.createChild(ideEntity, name)
# create <containedEntity> inside <containmentArrows>
name = "_"+arrow.contained.data["name"]
containedEntTag = self.createChild(caTag, name)
t = containedEntTag
# create <beginOffset> inside <containedEntity>
name = "beginOffset"
t[name] = str(arrow.beginOffset)
# create <endOffset> inside <containedEntity>
name = "endOffset"
t[name] = str(arrow.endOffset)
# create <midpoints> inside <containedEntity>
name = "midpoints"
if arrow.midpoints:
t[name] = str(tuple(arrow.midpoints[0]))
else:
t[name] = ""
def duplicate(self,entities,recursive=False, flag=False):
"""Duplicate a set of entities or instances and potentially all of their children. The last argument 'flag' indicates that
this is the copy of ServiceUnit or ServiceInstance, otherwise, Component or ComponentServiceInstance. It is the one of
the criteria that childOf attribute of the entity/instance can be duplicated or not. Suppose that you want to copy
ServiceUnit or ServiceInstance, its parents have to be duplicated, of course, however, when its children is duplicated
(Component or ComponentServiceInstance), their parents cannot be duplicated. For example, supposing there is SU1->Comp1,
SG1->SU1, Node1->SU1 (Comp1 is a child of SU1; SU1 is a child of SG1 and Node1). SU1 is copied, suppose SU11 is a copy of SU1.
As a result, SU11 should be a child of SG1 and Node1 but Comp11 (is a copy of Comp1) should only be a child of SU11, NOT SU1.
But another example, if only Comp1 is copied and Comp11 is a copy of Comp1, so, its parent should be duplicated, too.
In this case, Comp11 should be a child of SU1, too (because Comp1 is copied inside SU1)
This fix (with a fix at duplicate() function in entity.py gains this purpose"""
ret = []
addtl = []
for e in entities:
name=entity.NameCreator(e.data["name"]) # Let's see if the instance is already here before we recreate it.
while True:
ei = self.instances.get(name,None)
if ei: # if this instance exists, try to get another name
name=entity.NameCreator(e.data['name'], name)
else:
break
if e.et.name in ('ServiceUnit', 'ServiceInstance'):
dupChildOf = True
else:
if flag:
dupChildOf = False
else:
dupChildOf = True
newEnt = e.duplicate(name, not recursive, dupChildOf=dupChildOf) # if we don't want recursive duplication, then dup the containment arrows.
if recursive: # otherwise dup the containment arrows and the objects they point to
for ca in e.containmentArrows:
(contained,xtra) = self.duplicate([ca.contained],recursive,flag=True)
assert(len(contained)==1) # It must be 1 because we only asked to duplicate one entity
contained = contained[0]
contained.childOf.add(newEnt)
cai = copy.copy(ca)
cai.container = newEnt
cai.contained = contained
newEnt.containmentArrows.append(cai)
addtl.append(contained)
addtl.append(xtra)
ret.append(newEnt)
if isinstance(newEnt,entity.Instance):
self.instances[name] = newEnt
elif isinstance(newEnt,entity.Entity):
self.entities[name] = newEnt
else:
assert(0)
return (ret,addtl)
def getInstantiatedNodes(self):
nodes = []
for (name, i) in self.instances.items():
if i.data['entityType'] == 'Node':
print 'append [%s] to the returned list'%name
nodes.append(i)
return nodes
def needInstantiate(self, ent):
if ent.data['entityType'] == 'ServiceUnit':
nodeList = self.getInstantiatedNodes()
for node in nodeList:
for ca in node.entity.containmentArrows:
if ent == ca.contained:
print '[%s] is the child of [%s]'%(ent.data['name'],node.entity.data['name'])
return True
else:
print '[%s] is NOT the child of [%s]'%(ent.data['name'], node.entity.data['name'])
return False
return True
def recursiveInstantiation(self,ent,instances=None, depth=1):
if not instances: instances = self.instances
children = []
if 1:
name=entity.NameCreator(ent.data["name"]) # Let's see if the instance is already here before we recreate it.
ei = instances.get(name,None)
if not ei:
print 'instantiating [%s]'%name
ei = entity.Instance(ent, None,pos=None,size=None,name=name)
instances[name] = ei
depth = depth + 1
# 2 ways recursive:
# 1. SG -> SI -> CSI
# 2. Node -> SU -> Component
if ent.et.name != "ComponentServiceInstance":
if depth<=MAX_RECURSIVE_INSTANTIATION_DEPTH:
for ca in ent.containmentArrows:
if ca.container.et.name == 'Component':
print 'skip creating instance which is a child (such as NonSafComponent) of Component'
continue
if not self.needInstantiate(ca.contained):
print 'skip instantiating [%s] because its SG or Node are not instantiated yet'%ca.contained.data['name']
continue
(ch, xtra) = self.recursiveInstantiation(ca.contained,instances, depth)
ch.childOf.add(ei)
cai = copy.copy(ca)
cai.container = ei
cai.contained = ch
ei.containmentArrows.append(cai)
print 'created arrow [%s-->%s] for [%s]'% (ei.data['name'],ch.data['name'], ei.data['name'])
children.append(ch)
else:
print 'model::recursiveInstantiation: do not create recursive instance for [%s], type [%s]' % (name, ent.et.name)
return (ei, instances)
def recursiveDuplicateInst(self,inst,instances=None, depth=1):
if not instances: instances = self.instances
if 1:
name=entity.NameCreator(inst.data["name"]) # Let's see if the instance is already here before we recreate it.
print 'model::recursiveAndDuplicateInst: new dup inst name = [%s]' % name
ei = instances.get(name,None)
if not ei:
ei = inst.duplicate(name)
instances[name] = ei
# add the SG and SU relationship
self.addContainmenArrow(inst, ei)
depth = depth + 1
if depth<=MAX_RECURSIVE_INSTANTIATION_DEPTH:
for ca in inst.containmentArrows:
print 'model::recursiveAndDuplicateInst: ca = [%s]' % ca.contained.data["name"]
ch = self.recursiveDuplicateInst(ca.contained,instances, depth)
print 'model::recursiveAndDuplicateInst: ch name = [%s]' % ch.data["name"]
for parent in ch.childOf:
if parent.data['entityType'] == "ServiceUnit" and ei.et.name == "ServiceUnit":
ch.childOf = set()
ch.childOf.add(ei)
cai = copy.copy(ca)
cai.container = ei
cai.contained = ch
ei.containmentArrows.append(cai)
return ei
def addContainmenArrow(self, inst, newinst):
if inst.et.name=="ServiceUnit":
newinst.childOf = set()
for e in filter(lambda entInt: entInt.et.name=="ServiceGroup", self.instances.values()):
for ca in e.containmentArrows:
if ca.contained==inst:
newinst.childOf.add(e)
e.createContainmentArrowTo(newinst)
def xmlify(self):
"""Returns an XML string that defines the IDE Model, for saving to disk"""
self.updateMicrodom()
return self.data.pretty()
def UnitTest(m=None):
"""This unit test relies on a particular model configuration, located in testModel.xml"""
import entity
if not m:
m = Model()
m.load("testModel.xml")
appt = m.entityTypes["Application"]
app = m.entities["app"] = entity.Entity(appt,(0,0),(100,20))
sgt = m.entityTypes["ServiceGroup"]
sg = m.entities["sg"] = entity.Entity(sgt,(0,0),(100,20))
if not app.canContain(sg):
raise "Test failed"
if sg.canContain(app):
raise "Test failed"
if not sg.canBeContained(app):
raise "Test failed"
if app.canBeContained(sg):
raise "Test failed"
# Now hook the sg up and then test it again
app.containmentArrows.append(ContainmentArrow(app,(0,0),sg,(0,0)))
app2 = m.entities["app2"] = entity.Entity(appt,(0,0),(100,20))
if not sg.canBeContained(app):
raise "Test failed: should return true because sg is contained in app"
if sg.canBeContained(app2):
raise "Test failed: should return false because sg can only be contained in one app"
m.entities["appSG"].createInstance((0,0),(100,40),"sg0")
m.instances[sg0.data["name"]] = sg0
def Test():
import pdb
m = Model()
m.load("testModel.xml")
for (path, obj) in m.data.find("modules"):
for module in obj.children(lambda(x): x if (type(x) is types.InstanceType and x.__class__ is microdom.MicroDom) else None):
print module.tag_, ": ", module.data_
print m.entityTypes.keys()
# pdb.set_trace()
#sg0 = m.entities["appSG"].createInstance((0,0),(100,40),"sg0")
(sg,instances) = m.recursiveInstantiation(m.entities["appSG"])
instances["app1"].data["instantiate"]["command"] = "./app1 param"
node = m.entities["SC"].createInstance((0,0),(100,40),False,"sc0")
su = instances["ServiceUnit11"]
m.instances.update(instances)
m.instances[node.data["name"]] = node
m.connect(node,su)
# m.instances[sg0.data["name"]] = sg0
#1. Build flatten entity instance
#2. Build relation ship between instances
# Load instances
#print instances
# UnitTest(m)
m.save("test.xml")
return m
theModel = None
def TestRender(ctx):
posx = 10
posy = 10
for et in theModel.entityTypes.items():
bmp = et[1].iconSvg.instantiate((256,128))
svg.blit(ctx,bmp,(posx,posy),(1,1))
posx += 300
if posx > 900:
posx = 10
posy += 150
def GuiTest():
global theModel
theModel = Test()
import pyGuiWrapper as gui
gui.go(lambda x,y=TestRender: gui.Panel(x,y))
| OpenClovis/SAFplus-Availability-Scalability-Platform | src/ide/model.py | Python | gpl-2.0 | 38,478 |
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
# Global settings used by all scripts in this dir.
# XXX Before any use of the tools in this dir, please make a copy of this file
# named "setting.py"
# XXX This is a template, most values should be OK, but some you’ll have to
# edit (most probably, BLENDER_EXEC and SOURCE_DIR).
import json
import os
import sys
import bpy
###############################################################################
# MISC
###############################################################################
# The languages defined in Blender.
LANGUAGES_CATEGORIES = (
# Min completeness level, UI english label.
( 0.95, "Complete"),
( 0.33, "In Progress"),
( -1.0, "Starting"),
)
LANGUAGES = (
# ID, UI english label, ISO code.
( 0, "Default (Default)", "DEFAULT"),
( 1, "English (English)", "en_US"),
( 2, "Japanese (日本語)", "ja_JP"),
( 3, "Dutch (Nederlandse taal)", "nl_NL"),
( 4, "Italian (Italiano)", "it_IT"),
( 5, "German (Deutsch)", "de_DE"),
( 6, "Finnish (Suomi)", "fi_FI"),
( 7, "Swedish (Svenska)", "sv_SE"),
( 8, "French (Français)", "fr_FR"),
( 9, "Spanish (Español)", "es"),
(10, "Catalan (Català)", "ca_AD"),
(11, "Czech (Český)", "cs_CZ"),
(12, "Portuguese (Português)", "pt_PT"),
(13, "Simplified Chinese (简体中文)", "zh_CN"),
(14, "Traditional Chinese (繁體中文)", "zh_TW"),
(15, "Russian (Русский)", "ru_RU"),
(16, "Croatian (Hrvatski)", "hr_HR"),
(17, "Serbian (Српски)", "sr_RS"),
(18, "Ukrainian (Український)", "uk_UA"),
(19, "Polish (Polski)", "pl_PL"),
(20, "Romanian (Român)", "ro_RO"),
# Using the utf8 flipped form of Arabic (العربية).
(21, "Arabic (ﺔﻴﺑﺮﻌﻟﺍ)", "ar_EG"),
(22, "Bulgarian (Български)", "bg_BG"),
(23, "Greek (Ελληνικά)", "el_GR"),
(24, "Korean (한국 언어)", "ko_KR"),
(25, "Nepali (नेपाली)", "ne_NP"),
# Using the utf8 flipped form of Persian (فارسی).
(26, "Persian (ﯽﺳﺭﺎﻓ)", "fa_IR"),
(27, "Indonesian (Bahasa indonesia)", "id_ID"),
(28, "Serbian Latin (Srpski latinica)", "sr_RS@latin"),
(29, "Kyrgyz (Кыргыз тили)", "ky_KG"),
(30, "Turkish (Türkçe)", "tr_TR"),
(31, "Hungarian (Magyar)", "hu_HU"),
(32, "Brazilian Portuguese (Português do Brasil)", "pt_BR"),
# Using the utf8 flipped form of Hebrew (עִבְרִית)).
(33, "Hebrew (תירִבְעִ)", "he_IL"),
(34, "Estonian (Eestlane)", "et_EE"),
(35, "Esperanto (Esperanto)", "eo"),
(36, "Spanish from Spain (Español de España)", "es_ES"),
(37, "Amharic (አማርኛ)", "am_ET"),
(38, "Uzbek (Oʻzbek)", "uz_UZ"),
(39, "Uzbek Cyrillic (Ўзбек)", "uz_UZ@cyrillic"),
(40, "Hindi (मानक हिन्दी)", "hi_IN"),
)
# Default context, in py!
DEFAULT_CONTEXT = bpy.app.translations.contexts.default
# Name of language file used by Blender to generate translations' menu.
LANGUAGES_FILE = "languages"
# The min level of completeness for a po file to be imported from /branches into /trunk, as a percentage.
IMPORT_MIN_LEVEL = 0.0
# Languages in /branches we do not want to import in /trunk currently...
IMPORT_LANGUAGES_SKIP = {
'am_ET', 'bg_BG', 'fi_FI', 'el_GR', 'et_EE', 'ne_NP', 'pl_PL', 'ro_RO', 'uz_UZ', 'uz_UZ@cyrillic',
}
# Languages that need RTL pre-processing.
IMPORT_LANGUAGES_RTL = {
'ar_EG', 'fa_IR', 'he_IL',
}
# The comment prefix used in generated messages.txt file.
MSG_COMMENT_PREFIX = "#~ "
# The comment prefix used in generated messages.txt file.
MSG_CONTEXT_PREFIX = "MSGCTXT:"
# The default comment prefix used in po's.
PO_COMMENT_PREFIX= "# "
# The comment prefix used to mark sources of msgids, in po's.
PO_COMMENT_PREFIX_SOURCE = "#: "
# The comment prefix used to mark sources of msgids, in po's.
PO_COMMENT_PREFIX_SOURCE_CUSTOM = "#. :src: "
# The general "generated" comment prefix, in po's.
PO_COMMENT_PREFIX_GENERATED = "#. "
# The comment prefix used to comment entries in po's.
PO_COMMENT_PREFIX_MSG= "#~ "
# The comment prefix used to mark fuzzy msgids, in po's.
PO_COMMENT_FUZZY = "#, fuzzy"
# The prefix used to define context, in po's.
PO_MSGCTXT = "msgctxt "
# The prefix used to define msgid, in po's.
PO_MSGID = "msgid "
# The prefix used to define msgstr, in po's.
PO_MSGSTR = "msgstr "
# The 'header' key of po files.
PO_HEADER_KEY = (DEFAULT_CONTEXT, "")
PO_HEADER_MSGSTR = (
"Project-Id-Version: {blender_ver} ({blender_hash})\\n\n"
"Report-Msgid-Bugs-To: \\n\n"
"POT-Creation-Date: {time}\\n\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n\n"
"Language-Team: LANGUAGE <[email protected]>\\n\n"
"Language: {uid}\\n\n"
"MIME-Version: 1.0\\n\n"
"Content-Type: text/plain; charset=UTF-8\\n\n"
"Content-Transfer-Encoding: 8bit\n"
)
PO_HEADER_COMMENT_COPYRIGHT = (
"# Blender's translation file (po format).\n"
"# Copyright (C) {year} The Blender Foundation.\n"
"# This file is distributed under the same license as the Blender package.\n"
"#\n"
)
PO_HEADER_COMMENT = (
"# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\n"
"#"
)
TEMPLATE_ISO_ID = "__TEMPLATE__"
# Num buttons report their label with a trailing ': '...
NUM_BUTTON_SUFFIX = ": "
# Undocumented operator placeholder string.
UNDOC_OPS_STR = "(undocumented operator)"
# The gettext domain.
DOMAIN = "blender"
# Our own "gettext" stuff.
# File type (ext) to parse.
PYGETTEXT_ALLOWED_EXTS = {".c", ".cpp", ".cxx", ".hpp", ".hxx", ".h"}
# Max number of contexts into a BLF_I18N_MSGID_MULTI_CTXT macro...
PYGETTEXT_MAX_MULTI_CTXT = 16
# Where to search contexts definitions, relative to SOURCE_DIR (defined below).
PYGETTEXT_CONTEXTS_DEFSRC = os.path.join("source", "blender", "blenfont", "BLF_translation.h")
# Regex to extract contexts defined in BLF_translation.h
# XXX Not full-proof, but should be enough here!
PYGETTEXT_CONTEXTS = "#define\\s+(BLF_I18NCONTEXT_[A-Z_0-9]+)\\s+\"([^\"]*)\""
# Keywords' regex.
# XXX Most unfortunately, we can't use named backreferences inside character sets,
# which makes the regexes even more twisty... :/
_str_base = (
# Match void string
"(?P<{_}1>[\"'])(?P={_}1)" # Get opening quote (' or "), and closing immediately.
"|"
# Or match non-void string
"(?P<{_}2>[\"'])" # Get opening quote (' or ").
"(?{capt}(?:"
# This one is for crazy things like "hi \\\\\" folks!"...
r"(?:(?!<\\)(?:\\\\)*\\(?=(?P={_}2)))|"
# The most common case.
".(?!(?P={_}2))"
")+.)" # Don't forget the last char!
"(?P={_}2)" # And closing quote.
)
str_clean_re = _str_base.format(_="g", capt="P<clean>")
_inbetween_str_re = (
# XXX Strings may have comments between their pieces too, not only spaces!
r"(?:\s*(?:"
# A C comment
r"/\*.*(?!\*/).\*/|"
# Or a C++ one!
r"//[^\n]*\n"
# And we are done!
r")?)*"
)
# Here we have to consider two different cases (empty string and other).
_str_whole_re = (
_str_base.format(_="{_}1_", capt=":") +
# Optional loop start, this handles "split" strings...
"(?:(?<=[\"'])" + _inbetween_str_re + "(?=[\"'])(?:"
+ _str_base.format(_="{_}2_", capt=":") +
# End of loop.
"))*"
)
_ctxt_re_gen = lambda uid : r"(?P<ctxt_raw{uid}>(?:".format(uid=uid) + \
_str_whole_re.format(_="_ctxt{uid}".format(uid=uid)) + \
r")|(?:[A-Z_0-9]+))"
_ctxt_re = _ctxt_re_gen("")
_msg_re = r"(?P<msg_raw>" + _str_whole_re.format(_="_msg") + r")"
PYGETTEXT_KEYWORDS = (() +
tuple((r"{}\(\s*" + _msg_re + r"\s*\)").format(it)
for it in ("IFACE_", "TIP_", "DATA_", "N_")) +
tuple((r"{}\(\s*" + _ctxt_re + r"\s*,\s*" + _msg_re + r"\s*\)").format(it)
for it in ("CTX_IFACE_", "CTX_TIP_", "CTX_DATA_", "CTX_N_")) +
tuple(("{}\\((?:[^\"',]+,){{1,2}}\\s*" + _msg_re + r"\s*(?:\)|,)").format(it)
for it in ("BKE_report", "BKE_reportf", "BKE_reports_prepend", "BKE_reports_prependf",
"CTX_wm_operator_poll_msg_set")) +
tuple(("{}\\((?:[^\"',]+,){{3}}\\s*" + _msg_re + r"\s*\)").format(it)
for it in ("BMO_error_raise",)) +
tuple(("{}\\((?:[^\"',]+,)\\s*" + _msg_re + r"\s*(?:\)|,)").format(it)
for it in ("modifier_setError",)) +
tuple((r"{}\(\s*" + _msg_re + r"\s*,\s*(?:" +
r"\s*,\s*)?(?:".join(_ctxt_re_gen(i) for i in range(PYGETTEXT_MAX_MULTI_CTXT)) + r")?\s*\)").format(it)
for it in ("BLF_I18N_MSGID_MULTI_CTXT",))
)
# Check printf mismatches between msgid and msgstr.
CHECK_PRINTF_FORMAT = (
r"(?!<%)(?:%%)*%" # Begining, with handling for crazy things like '%%%%%s'
r"[-+#0]?" # Flags (note: do not add the ' ' (space) flag here, generates too much false positives!)
r"(?:\*|[0-9]+)?" # Width
r"(?:\.(?:\*|[0-9]+))?" # Precision
r"(?:[hljztL]|hh|ll)?" # Length
r"[tldiuoxXfFeEgGaAcspn]" # Specifiers (note we have Blender-specific %t and %l ones too)
)
# Should po parser warn when finding a first letter not capitalized?
WARN_MSGID_NOT_CAPITALIZED = True
# Strings that should not raise above warning!
WARN_MSGID_NOT_CAPITALIZED_ALLOWED = {
"", # Simplifies things... :p
"ac3",
"along X",
"along Y",
"along Z",
"along %s X",
"along %s Y",
"along %s Z",
"along local Z",
"ascii",
"author", # Addons' field. :/
"bItasc",
"description", # Addons' field. :/
"dx",
"fBM",
"flac",
"fps: %.2f",
"fps: %i",
"fStop",
"gimbal",
"global",
"iScale",
"iso-8859-15",
"iTaSC",
"iTaSC parameters",
"kb",
"local",
"location", # Addons' field. :/
"locking %s X",
"locking %s Y",
"locking %s Z",
"mkv",
"mm",
"mp2",
"mp3",
"normal",
"ogg",
"p0",
"px",
"re",
"res",
"rv",
"sin(x) / x",
"sqrt(x*x+y*y+z*z)",
"sRGB",
"utf-8",
"var",
"vBVH",
"view",
"wav",
"y",
# Sub-strings.
"available with",
"can't save image while rendering",
"expected a timeline/animation area to be active",
"expected a view3d region",
"expected a view3d region & editcurve",
"expected a view3d region & editmesh",
"image file not found",
"image path can't be written to",
"in memory to enable editing!",
"unable to load movie clip",
"unable to load text",
"unable to open the file",
"unknown error reading file",
"unknown error stating file",
"unknown error writing file",
"unsupported font format",
"unsupported format",
"unsupported image format",
"unsupported movie clip format",
"verts only",
"virtual parents",
}
WARN_MSGID_NOT_CAPITALIZED_ALLOWED |= set(lng[2] for lng in LANGUAGES)
WARN_MSGID_END_POINT_ALLOWED = {
"Numpad .",
"Circle|Alt .",
"Temp. Diff.",
"Float Neg. Exp.",
" RNA Path: bpy.types.",
"Max Ext.",
}
PARSER_CACHE_HASH = 'sha1'
PARSER_TEMPLATE_ID = "__POT__"
PARSER_PY_ID = "__PY__"
PARSER_PY_MARKER_BEGIN = "\n# ##### BEGIN AUTOGENERATED I18N SECTION #####\n"
PARSER_PY_MARKER_END = "\n# ##### END AUTOGENERATED I18N SECTION #####\n"
PARSER_MAX_FILE_SIZE = 2 ** 24 # in bytes, i.e. 16 Mb.
###############################################################################
# PATHS
###############################################################################
# The Python3 executable.You’ll likely have to edit it in your user_settings.py
# if you’re under Windows.
PYTHON3_EXEC = "python3"
# The Blender executable!
# This is just an example, you’ll have to edit it in your user_settings.py!
BLENDER_EXEC = os.path.abspath(os.path.join("foo", "bar", "blender"))
# check for blender.bin
if not os.path.exists(BLENDER_EXEC):
if os.path.exists(BLENDER_EXEC + ".bin"):
BLENDER_EXEC = BLENDER_EXEC + ".bin"
# The gettext msgfmt "compiler". You’ll likely have to edit it in your user_settings.py if you’re under Windows.
GETTEXT_MSGFMT_EXECUTABLE = "msgfmt"
# The FriBidi C compiled library (.so under Linux, .dll under windows...).
# You’ll likely have to edit it in your user_settings.py if you’re under Windows., e.g. using the included one:
# FRIBIDI_LIB = os.path.join(TOOLS_DIR, "libfribidi.dll")
FRIBIDI_LIB = "libfribidi.so.0"
# The name of the (currently empty) file that must be present in a po's directory to enable rtl-preprocess.
RTL_PREPROCESS_FILE = "is_rtl"
# The Blender source root path.
# This is just an example, you’ll have to override it in your user_settings.py!
SOURCE_DIR = os.path.abspath(os.path.join("blender"))
# The bf-translation repository (you'll have to override this in your user_settings.py).
I18N_DIR = os.path.abspath(os.path.join("i18n"))
# The /branches path (relative to I18N_DIR).
REL_BRANCHES_DIR = os.path.join("branches")
# The /trunk path (relative to I18N_DIR).
REL_TRUNK_DIR = os.path.join("trunk")
# The /trunk/po path (relative to I18N_DIR).
REL_TRUNK_PO_DIR = os.path.join(REL_TRUNK_DIR, "po")
# The /trunk/mo path (relative to I18N_DIR).
REL_TRUNK_MO_DIR = os.path.join(REL_TRUNK_DIR, "locale")
# The Blender source path to check for i18n macros (relative to SOURCE_DIR).
REL_POTFILES_SOURCE_DIR = os.path.join("source")
# The template messages file (relative to I18N_DIR).
REL_FILE_NAME_POT = os.path.join(REL_BRANCHES_DIR, DOMAIN + ".pot")
# Mo root datapath.
REL_MO_PATH_ROOT = os.path.join(REL_TRUNK_DIR, "locale")
# Mo path generator for a given language.
REL_MO_PATH_TEMPLATE = os.path.join(REL_MO_PATH_ROOT, "{}", "LC_MESSAGES")
# Mo path generator for a given language (relative to any "locale" dir).
MO_PATH_ROOT_RELATIVE = os.path.join("locale")
MO_PATH_TEMPLATE_RELATIVE = os.path.join(MO_PATH_ROOT_RELATIVE, "{}", "LC_MESSAGES")
# Mo file name.
MO_FILE_NAME = DOMAIN + ".mo"
# Where to search for py files that may contain ui strings (relative to one of the 'resource_path' of Blender).
CUSTOM_PY_UI_FILES = [
os.path.join("scripts", "startup", "bl_ui"),
os.path.join("scripts", "modules", "rna_prop_ui.py"),
]
# An optional text file listing files to force include/exclude from py_xgettext process.
SRC_POTFILES = ""
# A cache storing validated msgids, to avoid re-spellchecking them.
SPELL_CACHE = os.path.join("/tmp", ".spell_cache")
# Threshold defining whether a new msgid is similar enough with an old one to reuse its translation...
SIMILAR_MSGID_THRESHOLD = 0.75
# Additional import paths to add to sys.path (';' separated)...
INTERN_PY_SYS_PATHS = ""
# Custom override settings must be one dir above i18n tools itself!
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
try:
from bl_i18n_settings_override import *
except ImportError: # If no i18n_override_settings available, it’s no error!
pass
# Override with custom user settings, if available.
try:
from settings_user import *
except ImportError: # If no user_settings available, it’s no error!
pass
for p in set(INTERN_PY_SYS_PATHS.split(";")):
if p:
sys.path.append(p)
# The settings class itself!
def _do_get(ref, path):
return os.path.normpath(os.path.join(ref, path))
def _do_set(ref, path):
path = os.path.normpath(path)
# If given path is absolute, make it relative to current ref one (else we consider it is already the case!)
if os.path.isabs(path):
# can't always find the relative path (between drive letters on windows)
try:
return os.path.relpath(path, ref)
except ValueError:
pass
return path
def _gen_get_set_path(ref, name):
def _get(self):
return _do_get(getattr(self, ref), getattr(self, name))
def _set(self, value):
setattr(self, name, _do_set(getattr(self, ref), value))
return _get, _set
def _gen_get_set_paths(ref, name):
def _get(self):
return [_do_get(getattr(self, ref), p) for p in getattr(self, name)]
def _set(self, value):
setattr(self, name, [_do_set(getattr(self, ref), p) for p in value])
return _get, _set
class I18nSettings:
"""
Class allowing persistence of our settings!
Saved in JSon format, so settings should be JSon'able objects!
"""
_settings = None
def __new__(cls, *args, **kwargs):
# Addon preferences are singleton by definition, so is this class!
if not I18nSettings._settings:
cls._settings = super(I18nSettings, cls).__new__(cls)
cls._settings.__dict__ = {uid: data for uid, data in globals().items() if not uid.startswith("_")}
return I18nSettings._settings
def from_json(self, string):
data = dict(json.loads(string))
# Special case... :/
if "INTERN_PY_SYS_PATHS" in data:
self.PY_SYS_PATHS = data["INTERN_PY_SYS_PATHS"]
self.__dict__.update(data)
def to_json(self):
# Only save the diff from default i18n_settings!
glob = globals()
export_dict = {uid: val for uid, val in self.__dict__.items() if glob.get(uid) != val}
return json.dumps(export_dict)
def load(self, fname, reset=False):
if reset:
self.__dict__ = {uid: data for uid, data in globals().items() if not uid.startswith("_")}
if isinstance(fname, str):
if not os.path.isfile(fname):
return
with open(fname) as f:
self.from_json(f.read())
# Else assume fname is already a file(like) object!
else:
self.from_json(fname.read())
def save(self, fname):
if isinstance(fname, str):
with open(fname, 'w') as f:
f.write(self.to_json())
# Else assume fname is already a file(like) object!
else:
fname.write(self.to_json())
BRANCHES_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_BRANCHES_DIR")))
TRUNK_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_DIR")))
TRUNK_PO_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_PO_DIR")))
TRUNK_MO_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_MO_DIR")))
POTFILES_SOURCE_DIR = property(*(_gen_get_set_path("SOURCE_DIR", "REL_POTFILES_SOURCE_DIR")))
FILE_NAME_POT = property(*(_gen_get_set_path("I18N_DIR", "REL_FILE_NAME_POT")))
MO_PATH_ROOT = property(*(_gen_get_set_path("I18N_DIR", "REL_MO_PATH_ROOT")))
MO_PATH_TEMPLATE = property(*(_gen_get_set_path("I18N_DIR", "REL_MO_PATH_TEMPLATE")))
def _get_py_sys_paths(self):
return self.INTERN_PY_SYS_PATHS
def _set_py_sys_paths(self, val):
old_paths = set(self.INTERN_PY_SYS_PATHS.split(";")) - {""}
new_paths = set(val.split(";")) - {""}
for p in old_paths - new_paths:
if p in sys.path:
sys.path.remove(p)
for p in new_paths - old_paths:
sys.path.append(p)
self.INTERN_PY_SYS_PATHS = val
PY_SYS_PATHS = property(_get_py_sys_paths, _set_py_sys_paths)
| Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/scripts/modules/bl_i18n_utils/settings.py | Python | gpl-3.0 | 20,044 |
from NativDebugging.Patterns.PE import *
from NativDebugging.Win32.MemoryReader import *
from NativDebugging.Patterns.Finder import *
from NativDebugging.Win32.InjectDll import *
import codecs
import os
import sys
import time
NtDLLCopy = os.path.abspath('.') + '\\NtCopy.dll'
targetPid = int(sys.argv[1])
print("Attaching memory reader")
m = attach(targetPid)
p = PatternFinder(m)
if not os.path.isfile(NtDLLCopy):
ntdllPath = m.getModulePath('ntdll.dll')
print("Copying %s -> %s" % (ntdllPath, NtDLLCopy))
open(NtDLLCopy, 'wb').write(open(ntdllPath, 'rb').read())
print("Injecting %s to %d" % (NtDLLCopy, targetPid))
inject(targetPid, NtDLLCopy)
# Give the module some time to load
time.sleep(1)
print("Finding NtCopy.dll")
ntCopyAddr = m.findModule('NtCopy.dll')
ntCopyRVA = m.findRVA(ntCopyAddr)
print("NtCopy.dll loaded at 0x%x" % ntCopyAddr)
print("Parsing exports of NtCopy.dll")
ntCopyImg = next(p.search(ImageDosHeader, ntCopyAddr))
exportsDir = ntCopyImg.PE.OptionalHeader.ExportDir.VirtualAddress + ntCopyAddr
exportsInfo = next(p.search(ImageExportDirectory, exportsDir))
exports = []
numProcs = exportsInfo.NumberOfFunctions
numNames = exportsInfo.NumberOfNames
names = exportsInfo.NamesAddress + ntCopyAddr
ordinals = exportsInfo.NameOrdinalsAddress + ntCopyAddr
procs = exportsInfo.FunctionsAddress + ntCopyAddr
for i in range(numProcs):
ordinal = m.readUInt16(ordinals + (i * 2))
if i < numNames:
name = m.readAddr(names + (i * m.getPointerSize()))
if 0 != name:
name = m.readString(ntCopyAddr + name)
else:
name = ""
else:
name = ""
proc = m.readAddr(procs + (ordinal * m.getPointerSize())) + ntCopyAddr
exports.append((name, ordinal, proc))
def fixImports(importsAddr, importsTableSize, dllAddr):
for offset in range(0, importsTableSize, 0x14):
name = m.readAddr(importsAddr + offset + dllAddr + 0xc)
dllName = m.readString(dllAddr + name)
if dllName.startswith("ntdll"):
#print("Found imports from ntdll")
namesTable = m.readAddr(importsAddr + dllAddr + offset) + dllAddr
ptrsTable = m.readAddr(importsAddr + dllAddr + offset + 0x10) + dllAddr
print("Ptrs table: 0x%x Names table: 0x%x" % (ptrsTable, namesTable))
procPtr = m.readAddr(ptrsTable)
while 0 != procPtr:
procName = m.readString(m.readAddr(namesTable) + dllAddr + 2)
if len(procName) >= 2:
newProcAddr = None
for export in exports:
if export[0] == procName:
newProcAddr = export[2]
break
if None == newProcAddr:
raise Exception("Can't find %s in ntCopy.dll" % procName)
ntdllBytes = m.readMemory(procPtr, 3)
copyBytes = m.readMemory(newProcAddr, 3)
if ntdllBytes != copyBytes:
if copyBytes[0] == '\xb8':
print("Patch %s -> %s fixing from 0x%x to 0x%x name: %s (@%x)" % (\
codecs.encode(ntdllBytes 'hex'), \
codecs.encode(copyBytes, 'hex'), \
procPtr, \
newProcAddr, \
procName, \
ptrsTable))
m.writeAddr(ptrsTable, newProcAddr)
else:
print("Patch %s -> %s not fixing from 0x%x to 0x%x name: %s" % (codecs.encode(ntdllBytes, 'hex'), codecs.encode(copyBytes, 'hex'), procPtr, newProcAddr, procName))
namesTable += m.getPointerSize()
ptrsTable += m.getPointerSize()
procPtr = m.readAddr(ptrsTable)
for base, dllName, dllSize in m.enumModules():
if dllName.startswith('NtCopy.dll'):
continue
print("Scanning 0x%x (%s)" % (base, dllName))
img = next(p.search(ImageDosHeader, base))
importsAddr = img.PE.OptionalHeader.ImportDir.VirtualAddress
importsTableSize = img.PE.OptionalHeader.ImportDir.Size
#print("Imports table address 0x%x" % importsAddr)
#print("Imports table size 0x%x" % importsTableSize)
if 0 != (importsTableSize % 0x14):
print("Invalid import dir size %s (%d)" % (dllName, importsTableSize))
continue
fixImports(importsAddr, importsTableSize, base)
| assafnativ/NativDebugging | samples/UserlandRootkitRemover.py | Python | gpl-3.0 | 4,674 |
""" leng - высота цифр, s - число, цифры которого надо напечатать
"""
from time import sleep
leng=int(input())
s=input()
import turtle
t=turtle.Turtle()
t.shape("turtle")
t.color("darkgreen","yellow")
t.shapesize(1)
t.speed(10)
t.penup()
t.backward(300)
def digit(a,b):
"""
Рисование цифр числа начинается
с правой верхней угловой точки цифры
"""
t.penup()
t.fd(2*l)
t.pendown()
for x,k in zip(a,b):
t.left(k)
t.fd(x)
a.reverse()
b.reverse()
for x,k in zip(a,b):
t.backward(x)
t.right(k)
t.penup()
a.reverse()
b.reverse()
l=leng//2
m=l*2**(0.5)
"""
В списках задается последовательность обхода
каждой цифры (длина шага и величина поворота в угловых точках)
"""
a0=[l,l,l,l,l,l]
b0=[180,90,0,90,90,0]
a1=[m,m,l,l]
b1=[225,180,225,0]
a2=[l,l,l,m,l]
b2=[180,180,270,315,135]
a3=[l,l,m,l,m]
b3=[180,180,225,135,225]
a4=[l,l,l,l,l]
b4=[270,0,180,90,270]
a5=[l,l,l,l,l]
b5=[180,90,90,270,270]
a6=[m,l,l,l,l]
b6=[225,45,90,90,90]
a7=[l,l,m,l]
b7=[180,180,225,45]
a8=[l,l,l,l,l,l,l,l]
b8=[180,90,90,270,270,270,270,90]
a9=[l,l,l,l,l,m]
b9=[180,90,90,90,180,315]
"""
Печатаются цифры числа. Выделяется символ
последовательности, преобразуется в целое числа и
в помощью функции digit() печатается
"""
for i in range(len(s)):
p=int(s[i])
if p==0:
digit(a0,b0)
elif p==1:
digit(a1,b1)
elif p==2:
digit(a2,b2)
elif p==3:
digit(a3,b3)
elif p==4:
digit(a4,b4)
elif p==5:
digit(a5,b5)
elif p==6:
digit(a6,b6)
elif p==7:
digit(a7,b7)
elif p==8:
digit(a8,b8)
elif p==9:
digit(a9,b9)
t.hideturtle()
sleep(30) | zernukovag/fox2016 | task_3.py | Python | gpl-3.0 | 2,004 |
# -*- mode: python; indent-tabs-mode: nil -*-
# Part of mlat-server: a Mode S multilateration server
# Copyright (C) 2015 Oliver Jowett <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Clock normalization routines.
"""
import pygraph.classes.graph
import pygraph.algorithms.minmax
from mlat import profile
class _Predictor(object):
"""Simple object for holding prediction state"""
def __init__(self, predict, variance):
self.predict = predict
self.variance = variance
def _identity_predict(x):
return x
def _make_predictors(clocktracker, station0, station1):
"""Return a tuple of predictors (p_01, p_10) where:
p_01 will predict a station1 timestamp given a station0 timestamp
p_10 will predict a station0 timestamp given a station1 timestamp
Returns None if no suitable clock sync model is available for
this pair of stations.
"""
if station0 is station1:
return None
if station0.clock.epoch is not None and station0.clock.epoch == station1.clock.epoch:
# Assume clocks are closely synchronized to the epoch (and therefore to each other)
predictor = _Predictor(_identity_predict, station0.clock.jitter ** 2 + station1.clock.jitter ** 2)
return (predictor, predictor)
if station0 < station1:
pairing = clocktracker.clock_pairs.get((station0, station1))
if pairing is None or not pairing.valid:
return None
return (_Predictor(pairing.predict_peer, pairing.variance),
_Predictor(pairing.predict_base, pairing.variance))
else:
pairing = clocktracker.clock_pairs.get((station1, station0))
if pairing is None or not pairing.valid:
return None
return (_Predictor(pairing.predict_base, pairing.variance),
_Predictor(pairing.predict_peer, pairing.variance))
def _label_heights(g, node, heights):
"""Label each node in the tree with a root of 'node'
with its height, filling the map 'heights' which
should be initially empty. 到达最远叶子节点的距离 , 叶子节点的高度为0"""
# we use heights as a visited-map too.
heights[node] = 0
for each in g.neighbors(node):
if each not in heights:
_label_heights(g, each, heights)
mn = heights[each] + g.edge_weight((node, each))
if mn > heights[node]:
heights[node] = mn
def _tallest_branch(g, node, heights, ignore=None):
"""Find the edge in the tree rooted at 'node' that is part of
the tallest branch. If ignore is not None, ignore that neighbour.
Returns (pathlen,node)查找node分支中最长的路径"""
tallest = (0, None)
for each in g.neighbors(node):
if each is ignore:
continue
eh = heights[each] + g.edge_weight((node, each))
if eh > tallest[0]:
tallest = (eh, each)
return tallest
def _convert_timestamps(g, timestamp_map, predictor_map, node, results, conversion_chain, variance):
"""Rewrite node and all unvisited nodes reachable from node using the
chain of clocksync objects in conversion_chain, populating the results dict.
node: the root node to convert
timestamp_map: dict of node -> [(timestamp, utc), ...] to convert
results: dict of node -> (variance, [(converted timestamp, utc), ...])
conversion_chain: list of predictor tuples to apply to node, in order
variance: the total error introduced by chain: sum([p.variance for p in chain])
"""
# convert our own timestamp using the provided chain
r = []
results[node] = (variance, r) # also used as a visited-map
for ts, utc in timestamp_map[node]:
for predictor in conversion_chain:
ts = predictor.predict(ts)
r.append((ts, utc))
# convert all reachable unvisited nodes using a conversion to our timestamp
# followed by the provided chain
for neighbor in g.neighbors(node):
if neighbor not in results:
predictor = predictor_map[(neighbor, node)]
_convert_timestamps(g, timestamp_map, predictor_map,
neighbor,
results,
[predictor] + conversion_chain, variance + predictor.variance)
@profile.trackcpu
def normalize(clocktracker, timestamp_map):
"""
Given {receiver: [(timestamp, utc), ...]}
return [{receiver: (variance, [(timestamp, utc), ...])}, ...]
where timestamps are normalized to some arbitrary base timescale within each map;
one map is returned per connected subgraph."""
# Represent the stations as a weighted graph where there
# is an edge between S0 and S1 with weight W if we have a
# sufficiently recent clock correlation between S0 and S1 with
# estimated variance W.
#
# This graph may have multiple disconnected components. Treat
# each separately and do this:
#
# Find the minimal spanning tree of the component. This will
# give us the edges to use to convert between timestamps with
# the lowest total error.
#
# Pick a central node of the MST to use as the the timestamp
# basis, where a central node is a node that minimizes the maximum
# path cost from the central node to any other node in the spanning
# tree.
#
# Finally, convert all timestamps in the tree to the basis of the
# central node.
# populate initial graph
g = pygraph.classes.graph.graph()
g.add_nodes(timestamp_map.keys())
# build a weighted graph where edges represent usable clock
# synchronization paths, and the weight of each edge represents
# the estimated variance introducted by converting a timestamp
# across that clock synchronization.
# also build a map of predictor objects corresponding to the
# edges for later use
predictor_map = {}
for si in timestamp_map.keys():
for sj in timestamp_map.keys():
if si < sj:
predictors = _make_predictors(clocktracker, si, sj)
if predictors:
predictor_map[(si, sj)] = predictors[0]
predictor_map[(sj, si)] = predictors[1]
g.add_edge((si, sj), wt=predictors[0].variance)
# find a minimal spanning tree for each component of the graph
mst_forest = pygraph.algorithms.minmax.minimal_spanning_tree(g)
# rebuild the graph with only the spanning edges, retaining weights
# also note the roots of each tree as we go
g = pygraph.classes.graph.graph()
g.add_nodes(mst_forest.keys())
roots = []
for edge in mst_forest.items():
if edge[1] is None:
roots.append(edge[0])
else:
g.add_edge(edge, wt=predictor_map[edge].variance)
# for each spanning tree, find a central node and convert timestamps
components = []
for root in roots:
# label heights of nodes, where the height of a node is
# the length of the most expensive path to a child of the node
heights = {}
_label_heights(g, root, heights)
# Find the longest path in the spanning tree; we want to
# resolve starting at the center of this path, as this minimizes
# the maximum path length to any node
# find the two tallest branches leading from the root
tall1 = _tallest_branch(g, root, heights)
tall2 = _tallest_branch(g, root, heights, ignore=tall1[1])
# Longest path is TALL1 - ROOT - TALL2
# We want to move along the path into TALL1 until the distances to the two
# tips of the path are equal length. This is the same as finding a node on
# the path within TALL1 with a height of about half the longest path.
target = (tall1[0] + tall2[0]) / 2
central = root
step = tall1[1]
while step and abs(heights[central] - target) > abs(heights[step] - target):
central = step
_, step = _tallest_branch(g, central, heights, ignore=central)
# Convert timestamps so they are using the clock units of "central"
# by walking the spanning tree edges. Then finally convert to wallclock
# times as the last step by dividing by the final clock's frequency
results = {}
conversion_chain = [_Predictor(lambda x: x/central.clock.freq, central.clock.jitter**2)]
_convert_timestamps(g, timestamp_map, predictor_map, central, results,
conversion_chain, central.clock.jitter**2)
components.append(results)
return components
| bmeyang/mlat-server | mlat/server/clocknorm.py | Python | agpl-3.0 | 9,256 |
from changes.testutils import APITestCase, SAMPLE_DIFF
from changes.api.project_source_details import ProjectSourceDetailsAPIView
class ProjectSourceDetailsTest(APITestCase):
def test_simple(self):
project = self.create_project()
source = self.create_source(project)
path = '/api/0/projects/{0}/sources/{1}/'.format(
project.id.hex, source.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['id'] == source.id.hex
def test_filter_coverage_for_added_lines(self):
view = ProjectSourceDetailsAPIView()
coverage = ['N'] * 150
coverage[52] = 'C'
coverage[53] = 'C'
coverage[54] = 'C'
coverage_dict = {'ci/run_with_retries.py': coverage}
result = view._filter_coverage_for_added_lines(SAMPLE_DIFF, coverage_dict)
assert len(result) == 24 # 24 additions
assert result == (['N'] * 2) + (['C'] * 3) + (['N'] * 19)
| dropbox/changes | tests/changes/api/test_project_source_details.py | Python | apache-2.0 | 1,018 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file verifies if the camera poses are correct by looking at the epipolar geometry.
Given a pair of images and their relative pose, we sample a bunch of discriminative points in the first image,
we draw its corresponding epipolar line in the other image. If the camera pose is correct,
the epipolar line should pass the ground truth correspondence location.
'''
import cv2
import numpy as np
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
def two_view_geometry(intrinsics1, extrinsics1, intrinsics2, extrinsics2):
'''
:param intrinsics1: 4 by 4 matrix
:param extrinsics1: 4 by 4 W2C matrix
:param intrinsics2: 4 by 4 matrix
:param extrinsics2: 4 by 4 W2C matrix
:return:
'''
relative_pose = extrinsics2.dot(np.linalg.inv(extrinsics1))
R = relative_pose[:3, :3]
T = relative_pose[:3, 3]
tx = skew(T)
E = np.dot(tx, R)
F = np.linalg.inv(intrinsics2[:3, :3]).T.dot(E).dot(np.linalg.inv(intrinsics1[:3, :3]))
return E, F, relative_pose
def drawpointslines(img1, img2, lines1, pts2, color):
'''
draw corresponding epilines on img1 for the points in img2
'''
r, c = img1.shape
img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
for r, pt2, cl in zip(lines1, pts2, color):
x0, y0 = map(int, [0, -r[2]/r[1]])
x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1]])
cl = tuple(cl.tolist())
img1 = cv2.line(img1, (x0,y0), (x1,y1), cl, 1)
img2 = cv2.circle(img2, tuple(pt2), 5, cl, -1)
return img1, img2
def epipolar(coord1, F, img1, img2):
# compute epipole
pts1 = coord1.astype(int).T
color = np.random.randint(0, high=255, size=(len(pts1), 3))
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
if lines2 is None:
return None
lines2 = lines2.reshape(-1,3)
img3, img4 = drawpointslines(img2,img1,lines2,pts1,color)
## print(img3.shape)
## print(np.concatenate((img4, img3)).shape)
## cv2.imwrite('vis.png', np.concatenate((img4, img3), axis=1))
h_max = max(img3.shape[0], img4.shape[0])
w_max = max(img3.shape[1], img4.shape[1])
out = np.ones((h_max, w_max*2, 3))
out[:img4.shape[0], :img4.shape[1], :] = img4
out[:img3.shape[0], w_max:w_max+img3.shape[1], :] = img3
# return np.concatenate((img4, img3), axis=1)
return out
def verify_data(img1, img2, intrinsics1, extrinsics1, intrinsics2, extrinsics2):
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
E, F, relative_pose = two_view_geometry(intrinsics1, extrinsics1,
intrinsics2, extrinsics2)
# sift = cv2.xfeatures2d.SIFT_create(nfeatures=20)
# kp1 = sift.detect(img1, mask=None)
# coord1 = np.array([[kp.pt[0], kp.pt[1]] for kp in kp1]).T
# Initiate ORB detector
orb = cv2.ORB_create()
# find the keypoints with ORB
kp1 = orb.detect(img1, None)
coord1 = np.array([[kp.pt[0], kp.pt[1]] for kp in kp1[:20]]).T
return epipolar(coord1, F, img1, img2)
def calc_angles(c2w_1, c2w_2):
c1 = c2w_1[:3, 3:4]
c2 = c2w_2[:3, 3:4]
c1 = c1 / np.linalg.norm(c1)
c2 = c2 / np.linalg.norm(c2)
return np.rad2deg(np.arccos(np.dot(c1.T, c2)))
if __name__ == '__main__':
import sys
sys.path.append('../../')
import os
from ibrnet.data_loaders.google_scanned_objects import GoogleScannedDataset
from config import config_parser
parser = config_parser()
args = parser.parse_args()
dataset = GoogleScannedDataset(args, mode='train')
out_dir = 'data_verify'
print('saving output to {}...'.format(out_dir))
os.makedirs(out_dir, exist_ok=True)
for k, data in enumerate(dataset):
rgb = data['rgb'].cpu().numpy()
camera = data['camera'].cpu().numpy()
src_rgbs = data['src_rgbs'].cpu().numpy()
src_cameras = data['src_cameras'].cpu().numpy()
i = np.random.randint(low=0, high=len(src_rgbs))
rgb_i = src_rgbs[i]
cameras_i = src_cameras[i]
intrinsics1 = camera[2:18].reshape(4, 4)
intrinsics2 = cameras_i[2:18].reshape(4, 4)
extrinsics1 = np.linalg.inv(camera[-16:].reshape(4, 4))
extrinsics2 = np.linalg.inv(cameras_i[-16:].reshape(4, 4))
im = verify_data(np.uint8(rgb*255.), np.uint8(rgb_i*255.),
intrinsics1, extrinsics1,
intrinsics2, extrinsics2)
if im is not None:
cv2.imwrite(os.path.join(out_dir, '{:03d}.png'.format(k)), im)
| googleinterns/IBRNet | ibrnet/data_loaders/data_verifier.py | Python | apache-2.0 | 5,377 |
__author__ = 'magic'
| elenay/python_training | generator/__init__.py | Python | apache-2.0 | 21 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-12-20 18:03
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("studies", "0054_new-analytics-permissions")]
operations = [
migrations.AddField(
model_name="study",
name="salt",
field=models.UUIDField(default=uuid.uuid4, null=True),
)
]
| CenterForOpenScience/lookit-api | studies/migrations/0055_study_salt.py | Python | apache-2.0 | 467 |
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from website.files.models.osfstorage import OsfStorageFile
from website.settings import API_DOMAIN
from tests.base import ApiTestCase
from tests.factories import (AuthUserFactory, ProjectFactory, RegistrationFactory,
CommentFactory, NodeWikiFactory, CollectionFactory, PrivateLinkFactory)
class TestGuidRedirect(ApiTestCase):
def setUp(self):
super(TestGuidRedirect, self).setUp()
self.user = AuthUserFactory()
def _add_private_link(self, project, anonymous=False):
view_only_link = PrivateLinkFactory(anonymous=anonymous)
view_only_link.nodes.append(project)
view_only_link.save()
return view_only_link
def test_redirect_to_node_view(self):
project = ProjectFactory()
url = '/{}guids/{}/'.format(API_BASE, project._id)
res = self.app.get(url, auth=self.user.auth)
redirect_url = '{}{}nodes/{}/'.format(API_DOMAIN, API_BASE, project._id)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
def test_redirect_to_registration_view(self):
registration = RegistrationFactory()
url = '/{}guids/{}/'.format(API_BASE, registration._id)
res = self.app.get(url, auth=self.user.auth)
redirect_url = '{}{}registrations/{}/'.format(API_DOMAIN, API_BASE, registration._id)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
def test_redirect_to_collections_view(self):
collection = CollectionFactory()
url = '/{}guids/{}/'.format(API_BASE, collection._id)
res = self.app.get(url, auth=self.user.auth)
redirect_url = '{}{}collections/{}/'.format(API_DOMAIN, API_BASE, collection._id)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
def test_redirect_to_file_view(self):
test_file = OsfStorageFile.create(
is_file=True,
node=ProjectFactory(),
path='/test',
name='test',
materialized_path='/test',
)
test_file.save()
guid = test_file.get_guid(create=True)
url = '/{}guids/{}/'.format(API_BASE, guid._id)
res = self.app.get(url, auth=self.user.auth)
redirect_url = '{}{}files/{}/'.format(API_DOMAIN, API_BASE, test_file._id)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
def test_redirect_to_comment_view(self):
comment = CommentFactory()
url = '/{}guids/{}/'.format(API_BASE, comment._id)
res = self.app.get(url, auth=self.user.auth)
redirect_url = '{}{}comments/{}/'.format(API_DOMAIN, API_BASE, comment._id)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
def test_redirect_throws_404_for_invalid_guids(self):
url = '/{}guids/{}/'.format(API_BASE, 'fakeguid')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_redirect_when_viewing_private_project_through_view_only_link(self):
project = ProjectFactory()
view_only_link = self._add_private_link(project)
url = '/{}guids/{}/?view_only={}'.format(API_BASE, project._id, view_only_link.key)
res = self.app.get(url, auth=AuthUserFactory().auth)
redirect_url = '{}{}nodes/{}/?view_only={}'.format(API_DOMAIN, API_BASE, project._id, view_only_link.key)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
def test_redirect_when_viewing_private_project_file_through_view_only_link(self):
project = ProjectFactory()
test_file = OsfStorageFile.create(
is_file=True,
node=project,
path='/test',
name='test',
materialized_path='/test',
)
test_file.save()
guid = test_file.get_guid(create=True)
view_only_link = self._add_private_link(project)
url = '/{}guids/{}/?view_only={}'.format(API_BASE, guid._id, view_only_link.key)
res = self.app.get(url, auth=AuthUserFactory().auth)
redirect_url = '{}{}files/{}/?view_only={}'.format(API_DOMAIN, API_BASE, test_file._id, view_only_link.key)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
def test_redirect_when_viewing_private_project_comment_through_view_only_link(self):
project = ProjectFactory()
view_only_link = self._add_private_link(project)
comment = CommentFactory(node=project)
url = '/{}guids/{}/?view_only={}'.format(API_BASE, comment._id, view_only_link.key)
res = self.app.get(url, auth=AuthUserFactory().auth)
redirect_url = '{}{}comments/{}/?view_only={}'.format(API_DOMAIN, API_BASE, comment._id, view_only_link.key)
assert_equal(res.status_code, 302)
assert_equal(res.location, redirect_url)
| zamattiac/osf.io | api_tests/guids/views/test_guid_redirect.py | Python | apache-2.0 | 5,068 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def foo():
return ['xyz', 100000, -98.6]
def bar():
return 'abc', [42, 'python'], "Guido"
if __name__ == '__main__':
print(foo())
print(bar())
x, y, z = bar()
print(x, y, z)
print(type(foo()))
| Furzoom/learnpython | pythonCore/ch11/return_type.py | Python | mit | 273 |
#!/usr/bin/env python
import sys
for line in open(sys.argv[1], "r"):
print line.strip().split(' ')[-2]
| keyboardsage/codeeval | easy/penultimateword.py | Python | gpl-3.0 | 109 |
from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import Link
from hubcheck.pageobjects.basepageelement import TextReadOnly
class GroupsMenu1(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(GroupsMenu1,self).__init__(owner,locatordict)
# load hub's classes
GroupsMenu_Locators = self.load_class('GroupsMenu_Locators')
# update this object's locator
self.locators.update(GroupsMenu_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.overview = Link(self,{'base':'overview'})
self.members = Link(self,{'base':'members'})
self.nmembers = TextReadOnly(self,{'base':'nmembers'})
self.wiki = Link(self,{'base':'wiki'})
self.nwikis = TextReadOnly(self,{'base':'nwikis'})
self.resources = Link(self,{'base':'resources'})
self.nresources = TextReadOnly(self,{'base':'nresources'})
self.messages = Link(self,{'base':'messages'})
self.discussion = Link(self,{'base':'discussion'})
self.ndiscussions = TextReadOnly(self,{'base':'ndiscussions'})
self.blog = Link(self,{'base':'blog'})
self.wishlist = Link(self,{'base':'wishlist'})
self.calendar = Link(self,{'base':'calendar'})
self._menu_items = ['overview','members','wiki',
'resources','messages','discussion',
'blog','wishlist','calendar']
# update the component's locators with this objects overrides
self._updateLocators()
def get_menu_items(self):
"""return the menu link names"""
return self._menu_items
def goto_menu_item(self,menuitem):
"""click on a menu item"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
w.click()
def is_menu_item_protected(self,menuitem):
"""check to see if the menu item is accessible by the user"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
return 'protected' in w.get_attribute('class')
class GroupsMenu1_Locators_Base(object):
"""locators for GroupsMenu object"""
locators = {
'base' : "css=#page_menu",
'overview' : "css=.group-overview-tab",
'members' : "css=.group-members-tab",
'nmembers' : "css=.group-members-tab .count",
'wiki' : "css=.group-wiki-tab",
'nwikis' : "css=.group-wiki-tab .count",
'resources' : "css=.group-resources-tab",
'nresources' : "css=.group-resources-tab .count",
'messages' : "css=.group-messages-tab",
'discussion' : "css=.group-forum-tab",
'ndiscussions' : "css=.group-forum-tab .count",
'blog' : "css=.group-blog-tab",
'wishlist' : "css=.group-wishlist-tab",
'calendar' : "css=.group-calendar-tab",
}
class GroupsMenu2(BasePageWidget):
"""
Groups Menu for nees.org
Adds datasharing and announcements links
"""
def __init__(self, owner, locatordict={}):
super(GroupsMenu2,self).__init__(owner,locatordict)
# load hub's classes
GroupsMenu_Locators = self.load_class('GroupsMenu_Locators')
# update this object's locator
self.locators.update(GroupsMenu_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.overview = Link(self,{'base':'overview'})
self.members = Link(self,{'base':'members'})
self.nmembers = TextReadOnly(self,{'base':'nmembers'})
self.wiki = Link(self,{'base':'wiki'})
self.nwikis = TextReadOnly(self,{'base':'nwikis'})
self.resources = Link(self,{'base':'resources'})
self.nresources = TextReadOnly(self,{'base':'nresources'})
self.messages = Link(self,{'base':'messages'})
self.discussion = Link(self,{'base':'discussion'})
self.ndiscussions = TextReadOnly(self,{'base':'ndiscussions'})
self.blog = Link(self,{'base':'blog'})
self.wishlist = Link(self,{'base':'wishlist'})
self.datasharing = Link(self,{'base':'datasharing'})
self.calendar = Link(self,{'base':'calendar'})
self.announcements = Link(self,{'base':'announcements'})
self._menu_items = ['overview','members','wiki',
'resources','messages','discussion',
'blog','wishlist','calendar',
'datasharing','announcements']
# update the component's locators with this objects overrides
self._updateLocators()
def get_menu_items(self):
"""return the menu link names"""
return self._menu_items
def goto_menu_item(self,menuitem):
"""click on a menu item"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
w.click()
def is_menu_item_protected(self,menuitem):
"""check to see if the menu item is accessible by the user"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
return 'protected' in w.get_attribute('class')
class GroupsMenu2_Locators_Base(object):
"""locators for GroupsMenu object"""
locators = {
'base' : "css=#page_menu",
'overview' : "css=.group-overview-tab",
'members' : "css=.group-members-tab",
'nmembers' : "css=.group-members-tab .count",
'wiki' : "css=.group-wiki-tab",
'nwikis' : "css=.group-wiki-tab .count",
'resources' : "css=.group-resources-tab",
'nresources' : "css=.group-resources-tab .count",
'messages' : "css=.group-messages-tab",
'discussion' : "css=.group-forum-tab",
'ndiscussions' : "css=.group-forum-tab .count",
'blog' : "css=.group-blog-tab",
'wishlist' : "css=.group-wishlist-tab",
'datasharing' : "css=.group-datasharing-tab",
'calendar' : "css=.group-calendar-tab",
'announcements' : "css=.group-announcements-tab",
}
class GroupsMenu3(BasePageWidget):
"""
Groups Menu for hub version 1.1.5
Adds projects, announcements, collections
"""
def __init__(self, owner, locatordict={}):
super(GroupsMenu3,self).__init__(owner,locatordict)
# load hub's classes
GroupsMenu_Locators = self.load_class('GroupsMenu_Locators')
# update this object's locator
self.locators.update(GroupsMenu_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.overview = Link(self,{'base':'overview'})
self.members = Link(self,{'base':'members'})
self.nmembers = TextReadOnly(self,{'base':'nmembers'})
self.wiki = Link(self,{'base':'wiki'})
self.nwikis = TextReadOnly(self,{'base':'nwikis'})
self.resources = Link(self,{'base':'resources'})
self.nresources = TextReadOnly(self,{'base':'nresources'})
self.discussion = Link(self,{'base':'discussion'})
self.ndiscussions = TextReadOnly(self,{'base':'ndiscussions'})
self.blog = Link(self,{'base':'blog'})
self.nblogs = TextReadOnly(self,{'base':'nblogs'})
self.wishlist = Link(self,{'base':'wishlist'})
self.usage = Link(self,{'base':'usage'})
self.projects = Link(self,{'base':'projects'})
self.nprojects = TextReadOnly(self,{'base':'nprojects'})
self.calendar = Link(self,{'base':'calendar'})
self.ncalendars = TextReadOnly(self,{'base':'ncalendars'})
self.announcements = Link(self,{'base':'announcements'})
self.collections = Link(self,{'base':'collections'})
self._menu_items = ['overview','members',
'wiki','resources','discussion',
'blog','wishlist','usage','projects',
'calendar','announcements','collections']
# update the component's locators with this objects overrides
self._updateLocators()
def get_menu_items(self):
"""return the menu link names"""
return self._menu_items
def goto_menu_item(self,menuitem):
"""click on a menu item"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
w.click()
def is_menu_item_protected(self,menuitem):
"""check to see if the menu item is accessible by the user"""
if not menuitem in self._menu_items:
raise ValueError("invalid menu item: '%s'" % menuitem)
w = getattr(self,menuitem)
return 'protected' in w.get_attribute('class')
class GroupsMenu3_Locators_Base(object):
"""locators for GroupsMenu object"""
locators = {
'base' : "css=#page_menu",
'overview' : "css=.group-overview-tab",
'members' : "css=.group-members-tab",
'nmembers' : "css=.group-members-tab .count",
'wiki' : "css=.group-wiki-tab",
'nwikis' : "css=.group-wiki-tab .count",
'resources' : "css=.group-resources-tab",
'nresources' : "css=.group-resources-tab .count",
'discussion' : "css=.group-forum-tab",
'ndiscussions' : "css=.group-forum-tab .count",
'blog' : "css=.group-blog-tab",
'nblogs' : "css=.group-blog-tab .count",
'wishlist' : "css=.group-wishlist-tab",
'usage' : "css=.group-usage-tab",
'projects' : "css=.group-projects-tab",
'nprojects' : "css=.group-projects-tab .count",
'calendar' : "css=.group-calendar-tab",
'ncalendars' : "css=.group-calendar-tab .count",
'announcements' : "css=.group-announcements-tab",
'collections' : "css=.group-collections-tab",
}
| codedsk/hubcheck | hubcheck/pageobjects/widgets/groups_menu.py | Python | mit | 11,007 |
#!/usr/bin/env python
#
# x17c.c
# The C demo doesn't work yet, and even if it did it probably could
# not be converted to pure Python since it seems to require the Tcl/Tk
# driver.
| pemryan/DAKOTA | packages/plplot/examples/python/x17.py | Python | lgpl-2.1 | 183 |
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class AlertsQuitTest(base_test.WebDriverBaseTest):
def setUp(self):
self.wait = wait.WebDriverWait(self.driver, 5, ignored_exceptions=[exceptions.NoSuchAlertException])
self.driver.get(self.webserver.where_is('modal/res/alerts.html'))
def test_can_quit_when_an_alert_is_present(self):
self.driver.find_element_by_id('alert').click()
alert = self.wait.until(lambda x: x.switch_to_alert())
self.driver.quit()
with self.assertRaises(Exception):
alert.accept()
AlertsQuitTest.driver = None
if __name__ == '__main__':
unittest.main()
| cr/fxos-certsuite | web-platform-tests/tests/webdriver/modal/alerts_quit_test.py | Python | mpl-2.0 | 738 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'aadrest.views.index', name='index'),
url(r'^step1/$', 'aadrest.views.step1', name='step1'),
url(r'^step2/$', 'aadrest.views.step2', name='step2'),
url(r'^step3/$', 'aadrest.views.step3', name='step3'),
url(r'^step4/$', 'aadrest.views.step4', name='step4'),
url(r'^step1_live/$', 'aadrest.views.step1_live', name='step1_live'),
url(r'^step2_live/$', 'aadrest.views.step2_live', name='step2_live'),
url(r'^step3_live/$', 'aadrest.views.step3_live', name='step3_live'),
url(r'^step4_live/$', 'aadrest.views.step4_live', name='step4_live'),
) | sebastus/azure-python-authenticate | djangoSite/aadrest/urls.py | Python | apache-2.0 | 675 |
import os.path
from .sqlite import * # noqa
SLUMBER_USERNAME = 'test'
SLUMBER_PASSWORD = 'test'
SLUMBER_API_HOST = 'http://localhost:8000'
# A bunch of our tests check this value in a returned URL/Domain
PRODUCTION_DOMAIN = 'readthedocs.org'
GROK_API_HOST = 'http://localhost:8888'
try:
from local_settings import * # noqa
except ImportError:
pass
| sils1297/readthedocs.org | readthedocs/settings/test.py | Python | mit | 361 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.test import TestCase
from physical.models import Volume
from physical.tests import factory as physical_factory
from logical.tests import factory as logical_factory
from ..tasks_disk_resize import update_disk
from .factory import TaskHistoryFactory
UPDATE_USED_SIZE_SUCCESS = '---> Used disk size updated. NFS: {}'
UPDATE_USED_SIZE_WRONG_HOST = '---> {} not found for: {}'
UPDATE_USED_SIZE_WITHOUT_VOLUME = \
'---> Could not update disk size used: Instance {} do not have disk'
class DiskResizeTestCase(TestCase):
def setUp(self):
self.task = TaskHistoryFactory()
self.instance = physical_factory.InstanceFactory()
self.databaseinfra = self.instance.databaseinfra
self.database = logical_factory.DatabaseFactory()
self.database.databaseinfra = self.databaseinfra
self.database.save()
def test_can_update_disk_kb(self):
volume = physical_factory.VolumeFactory()
volume.host = self.instance.hostname
volume.save()
old_size = volume.total_size_kb
old_used_size = volume.used_size_kb
self.assertIsNone(self.task.details)
is_updated = update_disk(
database=self.database, task=self.task,
address=self.instance.address, used_size=400, total_size=1000
)
self.assertTrue(is_updated)
expected_message = UPDATE_USED_SIZE_SUCCESS.format(volume.identifier)
self.assertEqual(expected_message, self.task.details)
volume = Volume.objects.get(pk=volume.pk)
self.assertNotEqual(volume.total_size_kb, old_size)
self.assertNotEqual(volume.used_size_kb, old_used_size)
self.assertEqual(volume.total_size_kb, 1000)
self.assertEqual(volume.used_size_kb, 400)
def test_cannot_update_disk_kb_without_volume(self):
is_updated = update_disk(
database=self.database, task=self.task,
address=self.instance.address, used_size=300, total_size=100
)
self.assertFalse(is_updated)
expected_message = UPDATE_USED_SIZE_WITHOUT_VOLUME.format(
self.instance.address
)
self.assertEqual(expected_message, self.task.details)
def test_cannot_update_disk_kb_wrong_host(self):
is_updated = update_disk(
database=self.database, task=self.task,
address=self.instance.address[::-1], used_size=300, total_size=100
)
self.assertFalse(is_updated)
expected_message = UPDATE_USED_SIZE_WRONG_HOST.format(
self.instance.address[::-1], self.database.name
)
self.assertEqual(expected_message, self.task.details)
| globocom/database-as-a-service | dbaas/notification/tests/test_disk_resize.py | Python | bsd-3-clause | 2,733 |
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import sys
import gflags
import matplotlib.pyplot as plt
import modules.tools.common.proto_utils as proto_utils
import modules.map.proto.map_pb2 as map_pb2
import modules.routing.proto.topo_graph_pb2 as topo_graph_pb2
import modules.routing.proto.routing_pb2 as routing_pb2
FLAGS = gflags.FLAGS
gflags.DEFINE_string('map_dir', 'modules/map/data/demo', 'map directory')
def get_map_dir(argv):
sys.argv.insert(1, '--undefok')
flagfile = os.path.normpath(
os.path.join(
os.path.dirname(__file__), '../../common/data',
'global_flagfile.txt'))
sys.argv.insert(2, '--flagfile=' + flagfile)
argv = FLAGS(sys.argv)
mapdir = os.path.normpath(
os.path.join(os.path.dirname(__file__), '../../../', FLAGS.map_dir))
print("Map dir: %s " % FLAGS.map_dir)
return mapdir
def get_mapdata(map_dir):
print('Please wait for loading map data...')
map_data_path = os.path.join(map_dir, 'base_map.bin')
print('File: %s' % map_data_path)
return proto_utils.get_pb_from_bin_file(map_data_path, map_pb2.Map())
def get_topodata(map_dir):
print('Please wait for loading routing topo data...')
topo_data_path = os.path.join(map_dir, 'routing_map.bin')
print("File: %s" % topo_data_path)
return proto_utils.get_pb_from_bin_file(topo_data_path, topo_graph_pb2.Graph())
def get_routingdata():
print('Please wait for loading route response data...')
log_dir = os.path.normpath(
os.path.join(os.path.dirname(__file__), '../../../data/log'))
route_data_path = os.path.join(log_dir, 'passage_region_debug.bin')
print("File: %s" % route_data_path)
return proto_utils.get_pb_from_text_file(route_data_path, routing_pb2.RoutingResponse())
def onclick(event):
"""Event function when mouse left button is clicked"""
print('\nClick captured! x=%f\ty=%f' % (event.xdata, event.ydata))
print('cmd>')
def downsample_array(array, step=5):
"""Down sample given array"""
result = array[::step]
result.append(array[-1])
return result
def draw_boundary(ax, line_segment):
"""
:param line_segment:
:return:
"""
px = [float(p.x) for p in line_segment.point]
py = [float(p.y) for p in line_segment.point]
px = downsample_array(px)
py = downsample_array(py)
ax.plot(px, py, 'k', lw=0.4)
def draw_map(ax, mapfile):
"""Draw map from mapfile"""
for lane in mapfile.lane:
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
draw_boundary(ax, curve.line_segment)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
draw_boundary(ax, curve.line_segment)
plt.draw()
| ApolloAuto/apollo | modules/tools/routing/util.py | Python | apache-2.0 | 3,562 |
# Authors: Denis A. Engemann <[email protected]>
# Teon Brooks <[email protected]>
#
# simplified BSD-3 license
import datetime
import time
import numpy as np
from .egimff import _read_raw_egi_mff
from .events import _combine_triggers
from ..base import BaseRaw
from ..utils import _read_segments_file, _create_chs
from ..meas_info import _empty_info
from ..constants import FIFF
from ...utils import verbose, logger, warn
def _read_header(fid):
"""Read EGI binary header."""
version = np.fromfile(fid, np.int32, 1)[0]
if version > 6 & ~np.bitwise_and(version, 6):
version = version.byteswap().astype(np.uint32)
else:
raise ValueError('Watchout. This does not seem to be a simple '
'binary EGI file.')
def my_fread(*x, **y):
return np.fromfile(*x, **y)[0]
info = dict(
version=version,
year=my_fread(fid, '>i2', 1),
month=my_fread(fid, '>i2', 1),
day=my_fread(fid, '>i2', 1),
hour=my_fread(fid, '>i2', 1),
minute=my_fread(fid, '>i2', 1),
second=my_fread(fid, '>i2', 1),
millisecond=my_fread(fid, '>i4', 1),
samp_rate=my_fread(fid, '>i2', 1),
n_channels=my_fread(fid, '>i2', 1),
gain=my_fread(fid, '>i2', 1),
bits=my_fread(fid, '>i2', 1),
value_range=my_fread(fid, '>i2', 1)
)
unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0
precision = np.bitwise_and(version, 6)
if precision == 0:
raise RuntimeError('Floating point precision is undefined.')
if unsegmented:
info.update(dict(n_categories=0,
n_segments=1,
n_samples=np.fromfile(fid, '>i4', 1)[0],
n_events=np.fromfile(fid, '>i2', 1)[0],
event_codes=[],
category_names=[],
category_lengths=[],
pre_baseline=0))
for event in range(info['n_events']):
event_codes = ''.join(np.fromfile(fid, 'S1', 4).astype('U1'))
info['event_codes'].append(event_codes)
else:
raise NotImplementedError('Only continuous files are supported')
info['unsegmented'] = unsegmented
info['dtype'], info['orig_format'] = {2: ('>i2', 'short'),
4: ('>f4', 'float'),
6: ('>f8', 'double')}[precision]
info['dtype'] = np.dtype(info['dtype'])
return info
def _read_events(fid, info):
"""Read events."""
events = np.zeros([info['n_events'],
info['n_segments'] * info['n_samples']])
fid.seek(36 + info['n_events'] * 4, 0) # skip header
for si in range(info['n_samples']):
# skip data channels
fid.seek(info['n_channels'] * info['dtype'].itemsize, 1)
# read event channels
events[:, si] = np.fromfile(fid, info['dtype'], info['n_events'])
return events
@verbose
def read_raw_egi(input_fname, eog=None, misc=None,
include=None, exclude=None, preload=False,
channel_naming='E%d', verbose=None):
"""Read EGI simple binary as raw object.
Parameters
----------
input_fname : str
Path to the raw file. Files with an extension .mff are automatically
considered to be EGI's native MFF format files.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Default is None.
include : None | list
The event channels to be ignored when creating the synthetic
trigger. Defaults to None.
Note. Overrides ``exclude`` parameter.
exclude : None | list
The event channels to be ignored when creating the synthetic
trigger. Defaults to None. If None, channels that have more than
one event and the ``sync`` and ``TREV`` channels will be
ignored.
%(preload)s
.. versionadded:: 0.11
channel_naming : str
Channel naming convention for the data channels. Defaults to 'E%%d'
(resulting in channel names 'E1', 'E2', 'E3'...). The effective default
prior to 0.14.0 was 'EEG %%03d'.
.. versionadded:: 0.14.0
%(verbose)s
Returns
-------
raw : instance of RawEGI
A Raw object containing EGI data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
The trigger channel names are based on the arbitrary user dependent event
codes used. However this function will attempt to generate a synthetic
trigger channel named ``STI 014`` in accordance with the general
Neuromag / MNE naming pattern.
The event_id assignment equals ``np.arange(n_events) + 1``. The resulting
``event_id`` mapping is stored as attribute to the resulting raw object but
will be ignored when saving to a fiff. Note. The trigger channel is
artificially constructed based on timestamps received by the Netstation.
As a consequence, triggers have only short durations.
This step will fail if events are not mutually exclusive.
"""
if input_fname.endswith('.mff'):
return _read_raw_egi_mff(input_fname, eog, misc, include,
exclude, preload, channel_naming, verbose)
return RawEGI(input_fname, eog, misc, include, exclude, preload,
channel_naming, verbose)
class RawEGI(BaseRaw):
"""Raw object from EGI simple binary file."""
@verbose
def __init__(self, input_fname, eog=None, misc=None,
include=None, exclude=None, preload=False,
channel_naming='E%d', verbose=None): # noqa: D102
if eog is None:
eog = []
if misc is None:
misc = []
with open(input_fname, 'rb') as fid: # 'rb' important for py3k
logger.info('Reading EGI header from %s...' % input_fname)
egi_info = _read_header(fid)
logger.info(' Reading events ...')
egi_events = _read_events(fid, egi_info) # update info + jump
if egi_info['value_range'] != 0 and egi_info['bits'] != 0:
cal = egi_info['value_range'] / 2. ** egi_info['bits']
else:
cal = 1e-6
logger.info(' Assembling measurement info ...')
event_codes = []
if egi_info['n_events'] > 0:
event_codes = list(egi_info['event_codes'])
if include is None:
exclude_list = ['sync', 'TREV'] if exclude is None else exclude
exclude_inds = [i for i, k in enumerate(event_codes) if k in
exclude_list]
more_excludes = []
if exclude is None:
for ii, event in enumerate(egi_events):
if event.sum() <= 1 and event_codes[ii]:
more_excludes.append(ii)
if len(exclude_inds) + len(more_excludes) == len(event_codes):
warn('Did not find any event code with more than one '
'event.', RuntimeWarning)
else:
exclude_inds.extend(more_excludes)
exclude_inds.sort()
include_ = [i for i in np.arange(egi_info['n_events']) if
i not in exclude_inds]
include_names = [k for i, k in enumerate(event_codes)
if i in include_]
else:
include_ = [i for i, k in enumerate(event_codes)
if k in include]
include_names = include
for kk, v in [('include', include_names), ('exclude', exclude)]:
if isinstance(v, list):
for k in v:
if k not in event_codes:
raise ValueError('Could find event named "%s"' % k)
elif v is not None:
raise ValueError('`%s` must be None or of type list' % kk)
event_ids = np.arange(len(include_)) + 1
logger.info(' Synthesizing trigger channel "STI 014" ...')
logger.info(' Excluding events {%s} ...' %
", ".join([k for i, k in enumerate(event_codes)
if i not in include_]))
egi_info['new_trigger'] = _combine_triggers(
egi_events[include_], remapping=event_ids)
self.event_id = dict(zip([e for e in event_codes if e in
include_names], event_ids))
else:
# No events
self.event_id = None
egi_info['new_trigger'] = None
info = _empty_info(egi_info['samp_rate'])
my_time = datetime.datetime(
egi_info['year'], egi_info['month'], egi_info['day'],
egi_info['hour'], egi_info['minute'], egi_info['second'])
my_timestamp = time.mktime(my_time.timetuple())
info['meas_date'] = (my_timestamp, 0)
ch_names = [channel_naming % (i + 1) for i in
range(egi_info['n_channels'])]
ch_names.extend(list(egi_info['event_codes']))
if egi_info['new_trigger'] is not None:
ch_names.append('STI 014') # our new_trigger
nchan = len(ch_names)
cals = np.repeat(cal, nchan)
ch_coil = FIFF.FIFFV_COIL_EEG
ch_kind = FIFF.FIFFV_EEG_CH
chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc)
sti_ch_idx = [i for i, name in enumerate(ch_names) if
name.startswith('STI') or name in event_codes]
for idx in sti_ch_idx:
chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, 'cal': 1.,
'kind': FIFF.FIFFV_STIM_CH,
'coil_type': FIFF.FIFFV_COIL_NONE,
'unit': FIFF.FIFF_UNIT_NONE})
info['chs'] = chs
info._update_redundant()
super(RawEGI, self).__init__(
info, preload, orig_format=egi_info['orig_format'],
filenames=[input_fname], last_samps=[egi_info['n_samples'] - 1],
raw_extras=[egi_info], verbose=verbose)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
egi_info = self._raw_extras[fi]
dtype = egi_info['dtype']
n_chan_read = egi_info['n_channels'] + egi_info['n_events']
offset = 36 + egi_info['n_events'] * 4
trigger_ch = egi_info['new_trigger']
_read_segments_file(self, data, idx, fi, start, stop, cals, mult,
dtype=dtype, n_channels=n_chan_read, offset=offset,
trigger_ch=trigger_ch)
| Teekuningas/mne-python | mne/io/egi/egi.py | Python | bsd-3-clause | 11,044 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from functools import partial
from PyQt4.Qt import (
Qt, QMenu, QPoint, QIcon, QDialog, QGridLayout, QLabel, QLineEdit, QComboBox,
QDialogButtonBox, QSize, QVBoxLayout, QListWidget, QStringList, QRadioButton)
from calibre.gui2 import error_dialog, question_dialog
from calibre.gui2.widgets import ComboBoxWithHelp
from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import ParseException
from calibre.utils.search_query_parser import saved_searches
class SelectNames(QDialog): # {{{
def __init__(self, names, txt, parent=None):
QDialog.__init__(self, parent)
self.l = l = QVBoxLayout(self)
self.setLayout(l)
self.la = la = QLabel(_('Create a Virtual Library based on %s') % txt)
l.addWidget(la)
self._names = QListWidget(self)
self._names.addItems(QStringList(sorted(names, key=sort_key)))
self._names.setSelectionMode(self._names.ExtendedSelection)
l.addWidget(self._names)
self._or = QRadioButton(_('Match any of the selected %s names')%txt)
self._and = QRadioButton(_('Match all of the selected %s names')%txt)
self._or.setChecked(True)
l.addWidget(self._or)
l.addWidget(self._and)
self.bb = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.bb.accepted.connect(self.accept)
self.bb.rejected.connect(self.reject)
l.addWidget(self.bb)
self.resize(self.sizeHint())
@property
def names(self):
for item in self._names.selectedItems():
yield unicode(item.data(Qt.DisplayRole).toString())
@property
def match_type(self):
return ' and ' if self._and.isChecked() else ' or '
# }}}
MAX_VIRTUAL_LIBRARY_NAME_LENGTH = 40
def _build_full_search_string(gui):
search_templates = (
'',
'{cl}',
'{cr}',
'(({cl}) and ({cr}))',
'{sb}',
'(({cl}) and ({sb}))',
'(({cr}) and ({sb}))',
'(({cl}) and ({cr}) and ({sb}))'
)
sb = gui.search.current_text
db = gui.current_db
cr = db.data.get_search_restriction()
cl = db.data.get_base_restriction()
dex = 0
if sb:
dex += 4
if cr:
dex += 2
if cl:
dex += 1
template = search_templates[dex]
return template.format(cl=cl, cr=cr, sb=sb).strip()
class CreateVirtualLibrary(QDialog): # {{{
def __init__(self, gui, existing_names, editing=None):
QDialog.__init__(self, gui)
self.gui = gui
self.existing_names = existing_names
if editing:
self.setWindowTitle(_('Edit virtual library'))
else:
self.setWindowTitle(_('Create virtual library'))
self.setWindowIcon(QIcon(I('lt.png')))
gl = QGridLayout()
self.setLayout(gl)
self.la1 = la1 = QLabel(_('Virtual library &name:'))
gl.addWidget(la1, 0, 0)
self.vl_name = QComboBox()
self.vl_name.setEditable(True)
self.vl_name.lineEdit().setMaxLength(MAX_VIRTUAL_LIBRARY_NAME_LENGTH)
la1.setBuddy(self.vl_name)
gl.addWidget(self.vl_name, 0, 1)
self.editing = editing
self.saved_searches_label = QLabel('')
self.saved_searches_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
gl.addWidget(self.saved_searches_label, 2, 0, 1, 2)
self.la2 = la2 = QLabel(_('&Search expression:'))
gl.addWidget(la2, 1, 0)
self.vl_text = QLineEdit()
self.vl_text.textChanged.connect(self.search_text_changed)
la2.setBuddy(self.vl_text)
gl.addWidget(self.vl_text, 1, 1)
self.vl_text.setText(_build_full_search_string(self.gui))
self.sl = sl = QLabel('<p>'+_('Create a virtual library based on: ')+
('<a href="author.{0}">{0}</a>, '
'<a href="tag.{1}">{1}</a>, '
'<a href="publisher.{2}">{2}</a>, '
'<a href="series.{3}">{3}</a>, '
'<a href="search.{4}">{4}</a>.').format(_('Authors'), _('Tags'),
_('Publishers'), _('Series'), _('Saved Searches')))
sl.setWordWrap(True)
sl.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
sl.linkActivated.connect(self.link_activated)
gl.addWidget(sl, 3, 0, 1, 2)
gl.setRowStretch(3,10)
self.hl = hl = QLabel(_('''
<h2>Virtual Libraries</h2>
<p>Using <i>virtual libraries</i> you can restrict calibre to only show
you books that match a search. When a virtual library is in effect, calibre
behaves as though the library contains only the matched books. The Tag Browser
display only the tags/authors/series/etc. that belong to the matched books and any searches
you do will only search within the books in the virtual library. This
is a good way to partition your large library into smaller and easier to work with subsets.</p>
<p>For example you can use a Virtual Library to only show you books with the Tag <i>"Unread"</i>
or only books by <i>"My Favorite Author"</i> or only books in a particular series.</p>
<p>More information and examples are available in the
<a href="http://manual.calibre-ebook.com/virtual_libraries.html">User Manual</a>.</p>
'''))
hl.setWordWrap(True)
hl.setOpenExternalLinks(True)
hl.setFrameStyle(hl.StyledPanel)
gl.addWidget(hl, 0, 3, 4, 1)
bb = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
gl.addWidget(bb, 4, 0, 1, 0)
if editing:
db = self.gui.current_db
virt_libs = db.prefs.get('virtual_libraries', {})
for dex,vl in enumerate(sorted(virt_libs.keys(), key=sort_key)):
self.vl_name.addItem(vl, virt_libs.get(vl, ''))
if vl == editing:
self.vl_name.setCurrentIndex(dex)
self.original_index = dex
self.original_search = virt_libs.get(editing, '')
self.vl_text.setText(self.original_search)
self.new_name = editing
self.vl_name.currentIndexChanged[int].connect(self.name_index_changed)
self.vl_name.lineEdit().textEdited.connect(self.name_text_edited)
self.resize(self.sizeHint()+QSize(150, 25))
def search_text_changed(self, txt):
searches = [_('Saved searches recognized in the expression:')]
txt = unicode(txt)
while txt:
p = txt.partition('search:')
if p[1]: # found 'search:'
possible_search = p[2]
if possible_search: # something follows the 'search:'
if possible_search[0] == '"': # strip any quotes
possible_search = possible_search[1:].partition('"')
else: # find end of the search name. Is EOL, space, rparen
sp = possible_search.find(' ')
pp = possible_search.find(')')
if pp < 0 or (sp > 0 and sp <= pp):
# space in string before rparen, or neither found
possible_search = possible_search.partition(' ')
else:
# rparen in string before space
possible_search = possible_search.partition(')')
txt = possible_search[2] # grab remainder of the string
search_name = possible_search[0]
if search_name.startswith('='):
search_name = search_name[1:]
if search_name in saved_searches().names():
searches.append(search_name + '=' +
saved_searches().lookup(search_name))
else:
txt = ''
else:
txt = ''
if len(searches) > 1:
self.saved_searches_label.setText('\n'.join(searches))
else:
self.saved_searches_label.setText('')
def name_text_edited(self, new_name):
self.new_name = unicode(new_name)
def name_index_changed(self, dex):
if self.editing and (self.vl_text.text() != self.original_search or
self.new_name != self.editing):
if not question_dialog(self.gui, _('Search text changed'),
_('The virtual library name or the search text has changed. '
'Do you want to discard these changes?'),
default_yes=False):
self.vl_name.blockSignals(True)
self.vl_name.setCurrentIndex(self.original_index)
self.vl_name.lineEdit().setText(self.new_name)
self.vl_name.blockSignals(False)
return
self.new_name = self.editing = self.vl_name.currentText()
self.original_index = dex
self.original_search = unicode(self.vl_name.itemData(dex).toString())
self.vl_text.setText(self.original_search)
def link_activated(self, url):
db = self.gui.current_db
f, txt = unicode(url).partition('.')[0::2]
if f == 'search':
names = saved_searches().names()
else:
names = getattr(db, 'all_%s_names'%f)()
d = SelectNames(names, txt, parent=self)
if d.exec_() == d.Accepted:
prefix = f+'s' if f in {'tag', 'author'} else f
if f == 'search':
search = ['(%s)'%(saved_searches().lookup(x)) for x in d.names]
else:
search = ['%s:"=%s"'%(prefix, x.replace('"', '\\"')) for x in d.names]
if search:
if not self.editing:
self.vl_name.lineEdit().setText(d.names.next())
self.vl_name.lineEdit().setCursorPosition(0)
self.vl_text.setText(d.match_type.join(search))
self.vl_text.setCursorPosition(0)
def accept(self):
n = unicode(self.vl_name.currentText()).strip()
if not n:
error_dialog(self.gui, _('No name'),
_('You must provide a name for the new virtual library'),
show=True)
return
if n.startswith('*'):
error_dialog(self.gui, _('Invalid name'),
_('A virtual library name cannot begin with "*"'),
show=True)
return
if n in self.existing_names and n != self.editing:
if not question_dialog(self.gui, _('Name already in use'),
_('That name is already in use. Do you want to replace it '
'with the new search?'),
default_yes=False):
return
v = unicode(self.vl_text.text()).strip()
if not v:
error_dialog(self.gui, _('No search string'),
_('You must provide a search to define the new virtual library'),
show=True)
return
try:
db = self.gui.library_view.model().db
recs = db.data.search_getting_ids('', v, use_virtual_library=False)
except ParseException as e:
error_dialog(self.gui, _('Invalid search'),
_('The search in the search box is not valid'),
det_msg=e.msg, show=True)
return
if not recs and not question_dialog(
self.gui, _('Search found no books'),
_('The search found no books, so the virtual library '
'will be empty. Do you really want to use that search?'),
default_yes=False):
return
self.library_name = n
self.library_search = v
QDialog.accept(self)
# }}}
class SearchRestrictionMixin(object):
no_restriction = _('<None>')
def __init__(self):
self.checked = QIcon(I('ok.png'))
self.empty = QIcon(I('blank.png'))
self.search_based_vl_name = None
self.search_based_vl = None
self.virtual_library_menu = QMenu()
self.virtual_library.clicked.connect(self.virtual_library_clicked)
self.virtual_library_tooltip = \
_('Use a "virtual library" to show only a subset of the books present in this library')
self.virtual_library.setToolTip(self.virtual_library_tooltip)
self.search_restriction = ComboBoxWithHelp(self)
self.search_restriction.setVisible(False)
self.search_count.setText(_("(all books)"))
self.ar_menu = QMenu(_('Additional restriction'))
self.edit_menu = QMenu(_('Edit Virtual Library'))
self.rm_menu = QMenu(_('Remove Virtual Library'))
def add_virtual_library(self, db, name, search):
virt_libs = db.prefs.get('virtual_libraries', {})
virt_libs[name] = search
db.prefs.set('virtual_libraries', virt_libs)
def do_create_edit(self, name=None):
db = self.library_view.model().db
virt_libs = db.prefs.get('virtual_libraries', {})
cd = CreateVirtualLibrary(self, virt_libs.keys(), editing=name)
if cd.exec_() == cd.Accepted:
if name:
self._remove_vl(name, reapply=False)
self.add_virtual_library(db, cd.library_name, cd.library_search)
if not name or name == db.data.get_base_restriction_name():
self.apply_virtual_library(cd.library_name)
def virtual_library_clicked(self):
m = self.virtual_library_menu
m.clear()
a = m.addAction(_('Create Virtual Library'))
a.triggered.connect(partial(self.do_create_edit, name=None))
a = self.edit_menu
self.build_virtual_library_list(a, self.do_create_edit)
m.addMenu(a)
a = self.rm_menu
self.build_virtual_library_list(a, self.remove_vl_triggered)
m.addMenu(a)
m.addSeparator()
db = self.library_view.model().db
a = self.ar_menu
a.clear()
a.setIcon(self.checked if db.data.get_search_restriction_name() else self.empty)
self.build_search_restriction_list()
m.addMenu(a)
m.addSeparator()
current_lib = db.data.get_base_restriction_name()
if current_lib == '':
a = m.addAction(self.checked, self.no_restriction)
else:
a = m.addAction(self.empty, self.no_restriction)
a.triggered.connect(partial(self.apply_virtual_library, library=''))
a = m.addAction(self.empty, _('*current search'))
a.triggered.connect(partial(self.apply_virtual_library, library='*'))
if self.search_based_vl_name:
a = m.addAction(
self.checked if db.data.get_base_restriction_name().startswith('*')
else self.empty,
self.search_based_vl_name)
a.triggered.connect(partial(self.apply_virtual_library,
library=self.search_based_vl_name))
m.addSeparator()
virt_libs = db.prefs.get('virtual_libraries', {})
for vl in sorted(virt_libs.keys(), key=sort_key):
a = m.addAction(self.checked if vl == current_lib else self.empty, vl)
a.triggered.connect(partial(self.apply_virtual_library, library=vl))
p = QPoint(0, self.virtual_library.height())
self.virtual_library_menu.popup(self.virtual_library.mapToGlobal(p))
def apply_virtual_library(self, library=None):
db = self.library_view.model().db
virt_libs = db.prefs.get('virtual_libraries', {})
if not library:
db.data.set_base_restriction('')
db.data.set_base_restriction_name('')
elif library == '*':
if not self.search.current_text:
error_dialog(self, _('No search'),
_('There is no current search to use'), show=True)
return
txt = _build_full_search_string(self)
try:
db.data.search_getting_ids('', txt, use_virtual_library=False)
except ParseException as e:
error_dialog(self, _('Invalid search'),
_('The search in the search box is not valid'),
det_msg=e.msg, show=True)
return
self.search_based_vl = txt
db.data.set_base_restriction(txt)
self.search_based_vl_name = self._trim_restriction_name('*' + txt)
db.data.set_base_restriction_name(self.search_based_vl_name)
elif library == self.search_based_vl_name:
db.data.set_base_restriction(self.search_based_vl)
db.data.set_base_restriction_name(self.search_based_vl_name)
elif library in virt_libs:
db.data.set_base_restriction(virt_libs[library])
db.data.set_base_restriction_name(library)
self.virtual_library.setToolTip(self.virtual_library_tooltip + '\n' +
db.data.get_base_restriction())
self._apply_search_restriction(db.data.get_search_restriction(),
db.data.get_search_restriction_name())
def build_virtual_library_list(self, menu, handler):
db = self.library_view.model().db
virt_libs = db.prefs.get('virtual_libraries', {})
menu.clear()
menu.setIcon(self.empty)
def add_action(name, search):
a = menu.addAction(name)
a.triggered.connect(partial(handler, name=name))
a.setIcon(self.empty)
libs = sorted(virt_libs.keys(), key=sort_key)
if libs:
menu.setEnabled(True)
for n in libs:
add_action(n, virt_libs[n])
else:
menu.setEnabled(False)
def remove_vl_triggered(self, name=None):
if not question_dialog(self, _('Are you sure?'),
_('Are you sure you want to remove '
'the virtual library {0}').format(name),
default_yes=False):
return
self._remove_vl(name, reapply=True)
def _remove_vl(self, name, reapply=True):
db = self.library_view.model().db
virt_libs = db.prefs.get('virtual_libraries', {})
virt_libs.pop(name, None)
db.prefs.set('virtual_libraries', virt_libs)
if reapply and db.data.get_base_restriction_name() == name:
self.apply_virtual_library('')
def _trim_restriction_name(self, name):
return name[0:MAX_VIRTUAL_LIBRARY_NAME_LENGTH].strip()
def build_search_restriction_list(self):
m = self.ar_menu
m.clear()
current_restriction_text = None
if self.search_restriction.count() > 1:
txt = unicode(self.search_restriction.itemText(2))
if txt.startswith('*'):
current_restriction_text = txt
self.search_restriction.clear()
current_restriction = self.library_view.model().db.data.get_search_restriction_name()
m.setIcon(self.checked if current_restriction else self.empty)
def add_action(txt, index):
self.search_restriction.addItem(txt)
txt = self._trim_restriction_name(txt)
if txt == current_restriction:
a = m.addAction(self.checked, txt if txt else self.no_restriction)
else:
a = m.addAction(self.empty, txt if txt else self.no_restriction)
a.triggered.connect(partial(self.search_restriction_triggered,
action=a, index=index))
add_action('', 0)
add_action(_('*current search'), 1)
dex = 2
if current_restriction_text:
add_action(current_restriction_text, 2)
dex += 1
for n in sorted(saved_searches().names(), key=sort_key):
add_action(n, dex)
dex += 1
def search_restriction_triggered(self, action=None, index=None):
self.search_restriction.setCurrentIndex(index)
self.apply_search_restriction(index)
def apply_named_search_restriction(self, name):
if not name:
r = 0
else:
r = self.search_restriction.findText(name)
if r < 0:
r = 0
self.search_restriction.setCurrentIndex(r)
self.apply_search_restriction(r)
def apply_text_search_restriction(self, search):
search = unicode(search)
if not search:
self.search_restriction.setCurrentIndex(0)
self._apply_search_restriction('', '')
else:
s = '*' + search
if self.search_restriction.count() > 1:
txt = unicode(self.search_restriction.itemText(2))
if txt.startswith('*'):
self.search_restriction.setItemText(2, s)
else:
self.search_restriction.insertItem(2, s)
else:
self.search_restriction.insertItem(2, s)
self.search_restriction.setCurrentIndex(2)
self._apply_search_restriction(search, self._trim_restriction_name(s))
def apply_search_restriction(self, i):
if i == 1:
self.apply_text_search_restriction(unicode(self.search.currentText()))
elif i == 2 and unicode(self.search_restriction.currentText()).startswith('*'):
self.apply_text_search_restriction(
unicode(self.search_restriction.currentText())[1:])
else:
r = unicode(self.search_restriction.currentText())
if r is not None and r != '':
restriction = 'search:"%s"'%(r)
else:
restriction = ''
self._apply_search_restriction(restriction, r)
def clear_additional_restriction(self):
self._apply_search_restriction('', '')
def _apply_search_restriction(self, restriction, name):
self.saved_search.clear()
# The order below is important. Set the restriction, force a '' search
# to apply it, reset the tag browser to take it into account, then set
# the book count.
self.library_view.model().db.data.set_search_restriction(restriction)
self.library_view.model().db.data.set_search_restriction_name(name)
self.search.clear(emit_search=True)
self.tags_view.recount()
self.set_number_of_books_shown()
self.current_view().setFocus(Qt.OtherFocusReason)
self.set_window_title()
v = self.current_view()
if not v.currentIndex().isValid():
v.set_current_row()
v.refresh_book_details()
def set_number_of_books_shown(self):
db = self.library_view.model().db
if self.current_view() == self.library_view and db is not None and \
db.data.search_restriction_applied():
restrictions = [x for x in (db.data.get_base_restriction_name(),
db.data.get_search_restriction_name()) if x]
t = ' :: '.join(restrictions)
if len(t) > 20:
t = t[:19] + u'…'
self.search_count.setStyleSheet(
'QLabel { border-radius: 6px; background-color: %s }' %
tweaks['highlight_virtual_library'])
else: # No restriction or not library view
t = ''
self.search_count.setStyleSheet(
'QLabel { background-color: transparent; }')
self.search_count.setText(t)
if __name__ == '__main__':
from calibre.gui2 import Application
from calibre.gui2.preferences import init_gui
app = Application([])
app
gui = init_gui()
d = CreateVirtualLibrary(gui, [])
d.exec_()
| kobolabs/calibre | src/calibre/gui2/search_restriction_mixin.py | Python | gpl-3.0 | 24,447 |
"""
This module contains the default values for all settings used by Scrapy.
For more information about these settings you can read the settings
documentation in docs/topics/settings.rst
Scrapy developers, if you add a setting here remember to:
* add it in alphabetical order
* group similar settings without leaving blank lines
* add its documentation to the available settings documentation
(docs/topics/settings.rst)
"""
import os
import sys
from importlib import import_module
from os.path import join, abspath, dirname
AJAXCRAWL_ENABLED = False
BOT_NAME = 'scrapybot'
CLOSESPIDER_TIMEOUT = 0
CLOSESPIDER_PAGECOUNT = 0
CLOSESPIDER_ITEMCOUNT = 0
CLOSESPIDER_ERRORCOUNT = 0
COMMANDS_MODULE = ''
COMPRESSION_ENABLED = True
CONCURRENT_ITEMS = 100
CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 8
CONCURRENT_REQUESTS_PER_IP = 0
COOKIES_ENABLED = True
COOKIES_DEBUG = False
DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
DEPTH_LIMIT = 0
DEPTH_STATS = True
DEPTH_PRIORITY = 0
DNSCACHE_ENABLED = True
DOWNLOAD_DELAY = 0
DOWNLOAD_HANDLERS = {}
DOWNLOAD_HANDLERS_BASE = {
'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',
}
DOWNLOAD_TIMEOUT = 180 # 3mins
DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'
DOWNLOADER_MIDDLEWARES = {}
DOWNLOADER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.ajaxcrawl.AjaxCrawlMiddleware': 560,
'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
# Downloader side
}
DOWNLOADER_STATS = True
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
try:
EDITOR = os.environ['EDITOR']
except KeyError:
if sys.platform == 'win32':
EDITOR = '%s -m idlelib.idle'
else:
EDITOR = 'vi'
EXTENSIONS = {}
EXTENSIONS_BASE = {
'scrapy.contrib.corestats.CoreStats': 0,
'scrapy.webservice.WebService': 0,
'scrapy.telnet.TelnetConsole': 0,
'scrapy.contrib.memusage.MemoryUsage': 0,
'scrapy.contrib.memdebug.MemoryDebugger': 0,
'scrapy.contrib.closespider.CloseSpider': 0,
'scrapy.contrib.feedexport.FeedExporter': 0,
'scrapy.contrib.logstats.LogStats': 0,
'scrapy.contrib.spiderstate.SpiderState': 0,
'scrapy.contrib.throttle.AutoThrottle': 0,
}
FEED_URI = None
FEED_URI_PARAMS = None # a function to extend uri arguments
FEED_FORMAT = 'jsonlines'
FEED_STORE_EMPTY = False
FEED_STORAGES = {}
FEED_STORAGES_BASE = {
'': 'scrapy.contrib.feedexport.FileFeedStorage',
'file': 'scrapy.contrib.feedexport.FileFeedStorage',
'stdout': 'scrapy.contrib.feedexport.StdoutFeedStorage',
's3': 'scrapy.contrib.feedexport.S3FeedStorage',
'ftp': 'scrapy.contrib.feedexport.FTPFeedStorage',
}
FEED_EXPORTERS = {}
FEED_EXPORTERS_BASE = {
'json': 'scrapy.contrib.exporter.JsonItemExporter',
'jsonlines': 'scrapy.contrib.exporter.JsonLinesItemExporter',
'csv': 'scrapy.contrib.exporter.CsvItemExporter',
'xml': 'scrapy.contrib.exporter.XmlItemExporter',
'marshal': 'scrapy.contrib.exporter.MarshalItemExporter',
'pickle': 'scrapy.contrib.exporter.PickleItemExporter',
}
HTTPCACHE_ENABLED = False
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_MISSING = False
HTTPCACHE_STORAGE = 'scrapy.contrib.httpcache.FilesystemCacheStorage'
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_IGNORE_SCHEMES = ['file']
HTTPCACHE_DBM_MODULE = 'anydbm'
HTTPCACHE_POLICY = 'scrapy.contrib.httpcache.DummyPolicy'
ITEM_PROCESSOR = 'scrapy.contrib.pipeline.ItemPipelineManager'
ITEM_PIPELINES = {}
ITEM_PIPELINES_BASE = {}
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
LOG_STDOUT = False
LOG_LEVEL = 'DEBUG'
LOG_FILE = None
LOG_UNSERIALIZABLE_REQUESTS = False
LOGSTATS_INTERVAL = 60.0
MAIL_HOST = 'localhost'
MAIL_PORT = 25
MAIL_FROM = 'scrapy@localhost'
MAIL_PASS = None
MAIL_USER = None
MEMDEBUG_ENABLED = False # enable memory debugging
MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
MEMUSAGE_ENABLED = False
MEMUSAGE_LIMIT_MB = 0
MEMUSAGE_NOTIFY_MAIL = []
MEMUSAGE_REPORT = False
MEMUSAGE_WARNING_MB = 0
METAREFRESH_ENABLED = True
METAREFRESH_MAXDELAY = 100
NEWSPIDER_MODULE = ''
RANDOMIZE_DOWNLOAD_DELAY = True
REDIRECT_ENABLED = True
REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
REDIRECT_PRIORITY_ADJUST = +2
REFERER_ENABLED = True
RETRY_ENABLED = True
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 408]
RETRY_PRIORITY_ADJUST = -1
ROBOTSTXT_OBEY = False
SCHEDULER = 'scrapy.core.scheduler.Scheduler'
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
SPIDER_MANAGER_CLASS = 'scrapy.spidermanager.SpiderManager'
SPIDER_MIDDLEWARES = {}
SPIDER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
# Spider side
}
SPIDER_MODULES = []
STATS_CLASS = 'scrapy.statscol.MemoryStatsCollector'
STATS_DUMP = True
STATSMAILER_RCPTS = []
TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))
URLLENGTH_LIMIT = 2083
USER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__
TELNETCONSOLE_ENABLED = 1
TELNETCONSOLE_PORT = [6023, 6073]
TELNETCONSOLE_HOST = '0.0.0.0'
WEBSERVICE_ENABLED = True
WEBSERVICE_LOGFILE = None
WEBSERVICE_PORT = [6080, 7030]
WEBSERVICE_HOST = '0.0.0.0'
WEBSERVICE_RESOURCES = {}
WEBSERVICE_RESOURCES_BASE = {
'scrapy.contrib.webservice.crawler.CrawlerResource': 1,
'scrapy.contrib.webservice.enginestatus.EngineStatusResource': 1,
'scrapy.contrib.webservice.stats.StatsResource': 1,
}
SPIDER_CONTRACTS = {}
SPIDER_CONTRACTS_BASE = {
'scrapy.contracts.default.UrlContract': 1,
'scrapy.contracts.default.ReturnsContract': 2,
'scrapy.contracts.default.ScrapesContract': 3,
}
| joshlk/scrapy | scrapy/settings/default_settings.py | Python | bsd-3-clause | 7,772 |
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""Shared utils for Federated Reconstruction training and evaluation."""
from typing import Callable, Optional, Tuple
import tensorflow as tf
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.reconstruction import model as model_lib
# Type alias for a function that takes in a TF dataset and produces two TF
# datasets. This is consumed by training and evaluation computation builders.
# The first is iterated over during reconstruction and the second is iterated
# over post-reconstruction, for both training and evaluation. This can be useful
# for e.g. splitting the dataset into disjoint halves for each stage, doing
# multiple local epochs of reconstruction/training, skipping reconstruction
# entirely, etc. See `build_dataset_split_fn` for a builder, although users can
# also specify their own `DatasetSplitFn`s (see `simple_dataset_split_fn` for an
# example).
DatasetSplitFn = Callable[[tf.data.Dataset, tf.Tensor], Tuple[tf.data.Dataset,
tf.data.Dataset]]
def simple_dataset_split_fn(
client_dataset: tf.data.Dataset) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""An example of a `DatasetSplitFn` that returns the original client data.
Both the reconstruction data and post-reconstruction data will result from
iterating over the same tf.data.Dataset. Note that depending on any
preprocessing steps applied to client tf.data.Datasets, this may not produce
exactly the same data in the same order for both reconstruction and
post-reconstruction. For example, if
`client_dataset.shuffle(reshuffle_each_iteration=True)` was applied,
post-reconstruction data will be in a different order than reconstruction
data.
Args:
client_dataset: `tf.data.Dataset` representing client data.
Returns:
A tuple of two `tf.data.Datasets`, the first to be used for reconstruction,
the second to be used post-reconstruction.
"""
return client_dataset, client_dataset
def build_dataset_split_fn(recon_epochs: int = 1,
recon_steps_max: Optional[int] = None,
post_recon_epochs: int = 1,
post_recon_steps_max: Optional[int] = None,
split_dataset: bool = False) -> DatasetSplitFn:
"""Builds a `DatasetSplitFn` for Federated Reconstruction training/evaluation.
Returned `DatasetSplitFn` parameterizes training and evaluation computations
and enables reconstruction for multiple local epochs, multiple epochs of
post-reconstruction training, limiting the number of steps for both stages,
and splitting client datasets into disjoint halves for each stage.
Note that the returned function is used during both training and evaluation:
during training, "post-reconstruction" refers to training of global variables,
and during evaluation, it refers to calculation of metrics using reconstructed
local variables and fixed global variables.
Args:
recon_epochs: The integer number of iterations over the dataset to make
during reconstruction.
recon_steps_max: If not None, the integer maximum number of steps (batches)
to iterate through during reconstruction. This maximum number of steps is
across all reconstruction iterations, i.e. it is applied after
`recon_epochs`. If None, this has no effect.
post_recon_epochs: The integer constant number of iterations to make over
client data after reconstruction.
post_recon_steps_max: If not None, the integer maximum number of steps
(batches) to iterate through after reconstruction. This maximum number of
steps is across all post-reconstruction iterations, i.e. it is applied
after `post_recon_epochs`. If None, this has no effect.
split_dataset: If True, splits `client_dataset` in half for each user, using
even-indexed entries in reconstruction and odd-indexed entries after
reconstruction. If False, `client_dataset` is used for both reconstruction
and post-reconstruction, with the above arguments applied. If True,
splitting requires that mupltiple iterations through the dataset yield the
same ordering. For example if
`client_dataset.shuffle(reshuffle_each_iteration=True)` has been called,
then the split datasets may have overlap. If True, note that the dataset
should have more than one batch for reasonable results, since the
splitting does not occur within batches.
Returns:
A `SplitDatasetFn`.
"""
# Functions for splitting dataset if needed.
recon_condition = lambda i, entry: tf.equal(tf.math.floormod(i, 2), 0)
post_recon_condition = lambda i, entry: tf.greater(tf.math.floormod(i, 2), 0)
get_entry = lambda i, entry: entry
def dataset_split_fn(
client_dataset: tf.data.Dataset
) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""A `DatasetSplitFn` built with the given arguments.
Args:
client_dataset: `tf.data.Dataset` representing client data.
Returns:
A tuple of two `tf.data.Datasets`, the first to be used for
reconstruction, the second to be used post-reconstruction.
"""
# Split dataset if needed. This assumes the dataset has a consistent
# order across iterations.
if split_dataset:
recon_dataset = client_dataset.enumerate().filter(recon_condition).map(
get_entry)
post_recon_dataset = client_dataset.enumerate().filter(
post_recon_condition).map(get_entry)
else:
recon_dataset = client_dataset
post_recon_dataset = client_dataset
# Apply `recon_epochs` before limiting to a maximum number of batches
# if needed.
recon_dataset = recon_dataset.repeat(recon_epochs)
if recon_steps_max is not None:
recon_dataset = recon_dataset.take(recon_steps_max)
# Do the same for post-reconstruction.
post_recon_dataset = post_recon_dataset.repeat(post_recon_epochs)
if post_recon_steps_max is not None:
post_recon_dataset = post_recon_dataset.take(post_recon_steps_max)
return recon_dataset, post_recon_dataset
return dataset_split_fn
def get_global_variables(model: model_lib.Model) -> model_utils.ModelWeights:
"""Gets global variables from a `Model` as `ModelWeights`."""
return model_utils.ModelWeights(
trainable=model.global_trainable_variables,
non_trainable=model.global_non_trainable_variables)
def get_local_variables(model: model_lib.Model) -> model_utils.ModelWeights:
"""Gets local variables from a `Model` as `ModelWeights`."""
return model_utils.ModelWeights(
trainable=model.local_trainable_variables,
non_trainable=model.local_non_trainable_variables)
def has_only_global_variables(model: model_lib.Model) -> bool:
"""Returns `True` if the model has no local variables."""
local_variables_list = (
list(model.local_trainable_variables) +
list(model.local_non_trainable_variables))
if local_variables_list:
return False
return True
| tensorflow/federated | tensorflow_federated/python/learning/reconstruction/reconstruction_utils.py | Python | apache-2.0 | 7,804 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import string, xbmc, xbmcgui, xbmcplugin, xbmcaddon, os, sys, urllib, urllib2, cookielib, time, codecs, datetime, re
import socket
import sqlite3 as db
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
import calendar
socket.setdefaulttimeout(35000)
PLUGIN_NAME = 'YaTv'
siteUrl = 'm.tv.yandex.ru'
httpSiteUrl = 'http://' + siteUrl
sid_file = os.path.join(xbmc.translatePath('special://temp/'), 'script.module.YaTv.cookies.sid')
addon = xbmcaddon.Addon(id='script.module.YaTv')
handle = addon.getAddonInfo('id')
__settings__ = xbmcaddon.Addon(id='script.module.YaTv')
thumb = os.path.join( addon.getAddonInfo('path'), "icon.png" )
fanart = os.path.join( addon.getAddonInfo('path'), "fanart.jpg" )
icon = os.path.join( addon.getAddonInfo('path'), 'icon.png')
db_name = os.path.join( addon.getAddonInfo('path'), "move_info.db" )
c = db.connect(database=db_name, check_same_thread=False)
cu = c.cursor()
def ru(x):return unicode(x,'utf8', 'ignore')
def xt(x):return xbmc.translatePath(x)
def showMessage(heading, message, times = 3000):
xbmc.executebuiltin('XBMC.Notification("%s", "%s", %s, "%s")'%(heading, message, times, icon))
def GET(target, referer, post=None,cookie=None):
try:
req = urllib2.Request(url = target, data = post)
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)')
try:req.add_header('Cookie', cookie)
except Exception, e:
pass
resp = urllib2.urlopen(req, timeout=50000)
http1 = resp.read()
try:http=http1.replace('<br />',' ').replace('<br>',' ').replace(' ','')
except Exception, e:
pass
return http
except Exception, e:
print 'HTTP ERROR '+ target
def GetCookie(target, post=None):
try:
req = urllib2.Request(url = target, data = post)
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)')
resp = urllib2.urlopen(req)
#print resp.read()
cookie = resp.headers['Set-Cookie'].split(";")[0]
return cookie
except Exception, e:
print e
tzd = {"0":"2", "-1":"3", "-2":"1", "-3":"6", "-4":"9", "-5":"10", "-6":"12", "-7":"16", "-8":"17", "-9":"18", "-10":"19", "-11":"20", "-12":"21"}
def GetCh():
zone = str(time.timezone / 3600)
try:
tz = tzd[zone]
tdelta = 0
#print "YATV timezone="+str(time.timezone)
#print "YATV tz="+str(tz)
except:
tz = "9"
tdelta = (4*3600 + time.altzone)
#print "YATV Except: timezone="+str(time.timezone)
#print "YATV Except: tz="+str(tz)
channels = ["997","998","994","996","993","734","758","328","853","314","21","916","1016","688","328","1013","140","1016","949","950","951","947","899","934","991","767","557","558","539","906","752","863","1003","900","1000","1005","970","550","548","571","926","543","547","546","509","405","564","862","730","909","542","645","924","556","863","538","625","74","809","240","901","549","965","529","967","702","942","943","751","98","917","788","339","598","392","670","904","763","887","842","536","796","910","797","911","889","845","572","402","695","748","777","145","727","761","513","790","759","481","448","773","139","348","843","696","666","663","143","664","754","455","296","382","272","457","532","111","726","737","557","504","505","692","799","353","716"]
data = urllib.urlencode({'timezone' : tz})
cookie = GetCookie('http://www.vsetv.com/rewrite_url.php', data)
for ch in channels:
dt=datetime.datetime.strftime(datetime.datetime.now() ,"%Y-%m-%d")
s_time = ""
f_time = ""
title_pr = ""
timezone = urllib.urlencode({
'timezone' : tz,
'submit.x' : '13',
'submit.y' : '9',
'selected_channel' : 'channel_'+ch+'',
'selected_date' : 'day_'+dt+''
})
Url = 'http://www.vsetv.com/rewrite_url.php'
http = GET(Url, Url, timezone, cookie)
beautifulSoup = BeautifulSoup(http)
el = beautifulSoup.findAll('div', attrs={'id': 'schedule_container'})
d=[]
prog=[]
i=0
if tdelta == 0:
for gr in el:
for g in gr:
if not g['class'].find('pasttime')>-1 and not g['class'].find('pastprname2')>-1 and not g['class'].find('pastdesc')>-1:
if g['class'].find('time')>-1 or g['class'].find('onair')>-1:
i+=1
if i==1: k=g.string.encode('utf-8')
f_time = g.string.encode('utf-8')
try:
if not s_time == "" and not title_pr == "":
if datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
start_t_d = datetime.datetime.date(datetime.datetime.today())
start_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6]))))
start_t1 = datetime.datetime.combine(start_t_d, start_t_t)
start_t = time.mktime(start_t1.timetuple())
if datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
finish_t_d = datetime.datetime.date(datetime.datetime.today())
finish_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6]))))
finish_t1 = datetime.datetime.combine(finish_t_d, finish_t_t)
finish_t = time.mktime(finish_t1.timetuple())
prog.append({"start":start_t, "finish":finish_t, "program":{"title":title_pr.replace("\\/","//"), "description": description.replace("\\/","//")}})
except Exception, e:
print e
s_time = g.string.encode('utf-8')
elif g['class'].find('prname2')>-1:
try:
try:
try:
title_pr = g.string.encode('utf-8')
description = ""
except:
title_pr = g.contents[0].encode('utf-8')
description = ""
except Exception, e:
title_pr = g.contents[1].encode('utf-8')
description = ""
except:
try:
m=g.findAll('a')
for p in m:
title_pr = p.string.encode('utf-8')
desc = p['href']
Url = 'http://www.vsetv.com/'+str(desc)
http = GET(Url, Url)
beautifulSoup = BeautifulSoup(http)
el = beautifulSoup.findAll('span', attrs={'class': 'big'})
description = ""
for e in el:
description = description + e.string.encode('utf-8')
except Exception, e:
description = ""
print 'ERROR description: '+str(title_pr)+' '+str(ch)+'---'+str(e)
else:
for gr in el:
for g in gr:
if not g['class'].find('pastdesc')>-1:
if g['class'].find('time')>-1 or g['class'].find('onair')>-1 or g['class'].find('pasttime')>-1:
i+=1
if i==1: k=g.string.encode('utf-8')
f_time = g.string.encode('utf-8')
try:
if not s_time == "" and not title_pr == "":
#print s_time
#print f_time
#print title_pr
if datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
start_t_d = datetime.datetime.date(datetime.datetime.today())
start_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6]))))
start_t1 = datetime.datetime.combine(start_t_d, start_t_t)
start_t = time.mktime(start_t1.timetuple())-tdelta
if datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
finish_t_d = datetime.datetime.date(datetime.datetime.today())
finish_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6]))))
finish_t1 = datetime.datetime.combine(finish_t_d, finish_t_t)
finish_t = time.mktime(finish_t1.timetuple())-tdelta
prog.append({"start":start_t, "finish":finish_t, "program":{"title":title_pr.replace("\\/","//")}})
except Exception, e:
print e
s_time = g.string.encode('utf-8')
elif g['class'].find('prname2')>-1 or g['class'].find('pastprname2')>-1:
try:
try:
try:
title_pr = g.string.encode('utf-8')
except:
title_pr = g.contents[0].encode('utf-8')
except Exception, e:
title_pr = g.contents[1].encode('utf-8')
except:
m=g.findAll('a')
for p in m:
title_pr = p.string.encode('utf-8')
dt=datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(days=1) ,"%Y-%m-%d")
timezone = urllib.urlencode({
'timezone' : tz,
'submit.x' : '13',
'submit.y' : '9',
'selected_channel' : 'channel_'+ch+'',
'selected_date' : 'day_'+dt+''
})
Url = 'http://www.vsetv.com/rewrite_url.php'
http = GET(Url, Url, timezone, cookie)
beautifulSoup = BeautifulSoup(http)
el = beautifulSoup.findAll('div', attrs={'id': 'schedule_container'})
#d=[]
#prog=[]
i=0
for gr in el:
for g in gr:
if not g['class'].find('pastdesc')>-1:
if g['class'].find('time')>-1 or g['class'].find('onair')>-1 or g['class'].find('pasttime')>-1:
i+=1
if i==1: k=g.string.encode('utf-8')
f_time = g.string.encode('utf-8')
try:
if not s_time == "" and not title_pr == "":
if datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and i!=1:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
elif datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5 and i!=1:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
else:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
start_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6]))))
start_t1 = datetime.datetime.combine(start_t_d, start_t_t)
start_t = time.mktime(start_t1.timetuple())-tdelta
if datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and i!=1:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
elif datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5 and i!=1:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
else:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
finish_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6]))))
finish_t1 = datetime.datetime.combine(finish_t_d, finish_t_t)
finish_t = time.mktime(finish_t1.timetuple())-tdelta
prog.append({"start":start_t, "finish":finish_t, "program":{"title":title_pr.replace("\\/","//")}})
except Exception, e:
print e
s_time = g.string.encode('utf-8')
elif g['class'].find('prname2')>-1 or g['class'].find('pastprname2')>-1:
try:
try:
try:
title_pr = g.string.encode('utf-8')
except:
title_pr = g.contents[0].encode('utf-8')
except Exception, e:
title_pr = g.contents[1].encode('utf-8')
except:
m=g.findAll('a')
for p in m:
title_pr = p.string.encode('utf-8')
d.append({"events":prog})
add_to_db_New(str(ch)+"vsetv", d[0])
def GetChCache():
zone = str(time.timezone / 3600)
try:
tz = tzd[zone]
tdelta = 0
#print "YATV timezone="+str(time.timezone)
#print "YATV tz="+str(tz)
except:
tz = "9"
tdelta = (4*3600 + time.altzone)
#print "YATV Except: timezone="+str(time.timezone)
#print "YATV Except: tz="+str(tz)
channels = ["997","998","994","996","993","734","758","328","853","314","21","916","1016","688","328","1013","140","1016","949","950","951","947","899","934","991","767","557","558","539","906","752","863","1003","900","1000","1005","970","550","548","571","926","543","547","546","509","405","564","862","730","909","542","645","924","556","863","538","625","74","809","240","901","549","965","529","967","702","942","943","751","98","917","788","339","598","392","670","904","763","887","842","536","796","910","797","911","889","845","572","402","695","748","777","145","727","761","513","790","759","481","448","773","139","348","843","696","666","663","143","664","754","455","296","382","272","457","532","111","726","737","557","504","505","692","799","353","716"]
data = urllib.urlencode({'timezone' : tz})
cookie = GetCookie('http://www.vsetv.com/rewrite_url.php', data)
for ch in channels:
dt=datetime.datetime.strftime(datetime.datetime.now() ,"%Y-%m-%d")
s_time = ""
f_time = ""
title_pr = ""
timezone = urllib.urlencode({
'timezone' : tz,
'submit.x' : '13',
'submit.y' : '9',
'selected_channel' : 'channel_'+ch+'',
'selected_date' : 'day_'+dt+''
})
Url = 'http://www.vsetv.com/rewrite_url.php'
http = GET(Url, Url, timezone, cookie)
#if ch == "1016":
#print http
beautifulSoup = BeautifulSoup(http)
el = beautifulSoup.findAll('div', attrs={'id': 'schedule_container'})
#beautifulSoup = BeautifulSoup(http)
tzsite = beautifulSoup.findAll('select', attrs={'name': 'timezone'})
print "tzsite= "+str(tzsite)
d=[]
prog=[]
i=0
if tdelta == 0:
for gr in el:
for g in gr:
if not g['class'].find('pasttime')>-1 and not g['class'].find('pastprname2')>-1 and not g['class'].find('pastdesc')>-1:
if g['class'].find('time')>-1 or g['class'].find('onair')>-1:
i+=1
if i==1: k=g.string.encode('utf-8')
f_time = g.string.encode('utf-8')
try:
if not s_time == "" and not title_pr == "":
#if ch == "818":
#print s_time
#print f_time
#print title_pr
if datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
start_t_d = datetime.datetime.date(datetime.datetime.today())
start_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6]))))
start_t1 = datetime.datetime.combine(start_t_d, start_t_t)
start_t = time.mktime(start_t1.timetuple())
if datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
finish_t_d = datetime.datetime.date(datetime.datetime.today())
finish_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6]))))
finish_t1 = datetime.datetime.combine(finish_t_d, finish_t_t)
finish_t = time.mktime(finish_t1.timetuple())
prog.append({"start":start_t, "finish":finish_t, "program":{"title":title_pr.replace("\\/","//")}})
except Exception, e:
print e
s_time = g.string.encode('utf-8')
elif g['class'].find('prname2')>-1:
try:
try:
try:
title_pr = g.string.encode('utf-8')
except:
title_pr = g.contents[0].encode('utf-8')
except Exception, e:
title_pr = g.contents[1].encode('utf-8')
except:
m=g.findAll('a')
for p in m:
title_pr = p.string.encode('utf-8')
else:
for gr in el:
for g in gr:
if not g['class'].find('pastdesc')>-1:
if g['class'].find('time')>-1 or g['class'].find('onair')>-1 or g['class'].find('pasttime')>-1:
i+=1
if i==1: k=g.string.encode('utf-8')
f_time = g.string.encode('utf-8')
try:
if not s_time == "" and not title_pr == "":
if datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
start_t_d = datetime.datetime.date(datetime.datetime.today())
start_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6]))))
start_t1 = datetime.datetime.combine(start_t_d, start_t_t)
start_t = time.mktime(start_t1.timetuple())-tdelta
if datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
elif datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
else:
finish_t_d = datetime.datetime.date(datetime.datetime.today())
finish_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6]))))
finish_t1 = datetime.datetime.combine(finish_t_d, finish_t_t)
finish_t = time.mktime(finish_t1.timetuple())-tdelta
#if ch == "1016":
#print "title= "+str(title_pr)
#print "f_time= "+str(f_time)
#print "finish_t= "+str(finish_t)
#print "timetuple= "+str(finish_t1.timetuple())
#print "time.mktime= "+str(time.mktime(finish_t1.timetuple()))
prog.append({"start":start_t, "finish":finish_t, "program":{"title":title_pr.replace("\\/","//")}})
except Exception, e:
print e
s_time = g.string.encode('utf-8')
elif g['class'].find('prname2')>-1 or g['class'].find('pastprname2')>-1:
try:
try:
try:
title_pr = g.string.encode('utf-8')
except:
title_pr = g.contents[0].encode('utf-8')
except Exception, e:
title_pr = g.contents[1].encode('utf-8')
except:
m=g.findAll('a')
for p in m:
title_pr = p.string.encode('utf-8')
dt=datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(days=1) ,"%Y-%m-%d")
timezone = urllib.urlencode({
'timezone' : tz,
'submit.x' : '13',
'submit.y' : '9',
'selected_channel' : 'channel_'+ch+'',
'selected_date' : 'day_'+dt+''
})
Url = 'http://www.vsetv.com/rewrite_url.php'
http = GET(Url, Url, timezone, cookie)
beautifulSoup = BeautifulSoup(http)
el = beautifulSoup.findAll('div', attrs={'id': 'schedule_container'})
#d=[]
#prog=[]
i=0
for gr in el:
for g in gr:
if not g['class'].find('pastdesc')>-1:
if g['class'].find('time')>-1 or g['class'].find('onair')>-1 or g['class'].find('pasttime')>-1:
i+=1
if i==1: k=g.string.encode('utf-8')
f_time = g.string.encode('utf-8')
try:
if not s_time == "" and not title_pr == "":
if datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and i!=1:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
elif datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5 and i!=1:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
else:
start_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
start_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(s_time, "%H:%M")[0:6]))))
start_t1 = datetime.datetime.combine(start_t_d, start_t_t)
start_t = time.mktime(start_t1.timetuple())-tdelta
if datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour < datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and i!=1:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
elif datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6])).timetuple().tm_hour >= datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour and datetime.datetime(*(time.strptime(k, "%H:%M")[0:6])).timetuple().tm_hour<5 and i!=1:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=2)
else:
finish_t_d = datetime.datetime.date(datetime.datetime.today())+ datetime.timedelta(days=1)
finish_t_t = datetime.datetime.time((datetime.datetime(*(time.strptime(f_time, "%H:%M")[0:6]))))
finish_t1 = datetime.datetime.combine(finish_t_d, finish_t_t)
finish_t = time.mktime(finish_t1.timetuple())-tdelta
prog.append({"start":start_t, "finish":finish_t, "program":{"title":title_pr.replace("\\/","//")}})
except Exception, e:
print e
s_time = g.string.encode('utf-8')
elif g['class'].find('prname2')>-1 or g['class'].find('pastprname2')>-1:
try:
try:
try:
title_pr = g.string.encode('utf-8')
except:
title_pr = g.contents[0].encode('utf-8')
except Exception, e:
title_pr = g.contents[1].encode('utf-8')
except:
m=g.findAll('a')
for p in m:
title_pr = p.string.encode('utf-8')
d.append({"events":prog})
save_cache(d[0], str(ch)+"vsetv")
def GetChannels(Url):
http = GET(Url, Url)
ht={}
if http == None:
showMessage('YaTV:', 'Сервер не отвечает', 1000)
http = GET(Url, Url)
if http == None:
http = GET(Url, Url)
if http == None:
return None
else:
http = http.replace(':false',':0').replace(':true',':1')
http=eval(http)
for channel in http["schedules"]:
if xbmc.abortRequested:
break
else:
save_cache(channel, channel["channel"]["id"])
else:
http = http.replace(':false',':0').replace(':true',':1')
http=eval(http)
for channel in http["schedules"]:
if xbmc.abortRequested:
break
else:
save_cache(channel, channel["channel"]["id"])
else:
http = http.replace(':false',':0').replace(':true',':1')
http=eval(http)
for channel in http["schedules"]:
if xbmc.abortRequested:
break
else:
save_cache(channel, channel["channel"]["id"])
def UpdCache():
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[146,711,649,162,187,515,353,304,18,79],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[427,405,511,698,291,740,323,557,898,150],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[421,655,335,161,334,916,917,918,919,921],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[922,924,925,926,927,928,929,932,933,934],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[935,710,579,658,365,516,463,601,495,325],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[409,437,60,23,850,288,661,429,575,608],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[102,567,55,127,267,309,589,213,521,277],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[346,454,669,66,923,834,273,123,798,462],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[22,71,542,618,675,518,12,485,783,617],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[566,638,743,53,406,663,447,181,173,163],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[794,716,180,779,686,61,16,502,410,659],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[615,810,520,352,19,494,598,646,51,138],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[741,15,801,145,82,765,223,328,31,644],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[37,434,384,648,313,119,125,789,547,156],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[455,333,604,376,769,705,21,626,637,477],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1008,918,852,1039,1033,1436],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[275,776,555,308,332,849,388,897,425,774],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[258,389,680,723,154,367,505,595,6,737],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[481,726,423,113,713,111,662,201,681,322],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[377,499,134,664,183,697,358,563,311,217],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[24,799,821,614,153,415,250,8,401,306],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[557,1003,1021,747,987,988,1035,1032,1329],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[214,851,923,920,931,930,911,912,983,984],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[990,989,986,987,988,756,828,355,312,715],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[777,284,278,797,319,831,757,393,461,631],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[59,315,442,804,533,25,642,141,552,247],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[132,39,591,331,731,491,91,554,531,473],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[412,430,431,11,121,807,363,685,509,464],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[151,730,560,178,35,382,576,349,270,237],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[852,165,257,249,777,984,412,382,178,655],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[119,994,1037,1377,916,161,579,1371,1372,463],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[929,495,355,1330,393,1394,1026,801,921,1359],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[716,589,1012,1013,1011,613,124,996,1036,1392],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1331,897,1332,1000,638,1322,933,789,922],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1396,1562,59,925],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[620,583,586,680,937,281,709,228,430,167],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[280,76,627,939,677,808,453,632,788,128],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[422,140,507,85,773,940,143,181,670,650],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[479,326,90,666,753,702,315,649,18,391],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[458,1009,1555,1478,1525,1487,1492,1557,1488,1528],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1511,1500,1537,1499,1477,1508,1552,1466,1464,1476],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1548,1495,1550,1507,1534,1538,1470,1533,1530,1535],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1473,1481,1522,1521,1513,1483,1512,1510,1553,1547],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1503,1516,1480,1468,1502,1489,1514,1465,1515,1505],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1506,1469,1462,1485,1498,1484,1559,1493,1467,1471],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1526,1456,1505,1463,1472,1531],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannels(Url)
GetChCache()
def GetChannelsFull(Url):
http = GET(Url, Url)
if http == None:
showMessage('YaTV:', 'Сервер не отвечает', 1000)
http = GET(Url, Url)
if http == None:
http = GET(Url, Url)
if http == None:
return None
else:
http = http.replace(':false',':0').replace(':true',':1')
http = eval(http.replace("\\/","//"))
for channel in http["schedules"]:
if xbmc.abortRequested:
break
else:
add_to_db_New(channel["channel"]["id"], channel)
xbmc.sleep(250)
else:
http = http.replace(':false',':0').replace(':true',':1')
http = eval(http.replace("\\/","//"))
for channel in http["schedules"]:
if xbmc.abortRequested:
break
else:
add_to_db_New(channel["channel"]["id"], channel)
xbmc.sleep(250)
else:
http = http.replace(':false',':0').replace(':true',':1')
http = eval(http.replace("\\/","//"))
for channel in http["schedules"]:
if xbmc.abortRequested:
break
else:
add_to_db_New(channel["channel"]["id"], channel)
xbmc.sleep(250)
def UpdFull():
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[146,711,649,162,187,515,353,304,18,79],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[427,405,511,698,291,740,323,557,898,150],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[421,655,335,161,334,916,917,918,919,921],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[922,924,925,926,927,928,929,932,933,934],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[935,710,579,658,365,516,463,601,495,325],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[409,437,60,23,850,288,661,429,575,608],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[102,567,55,127,267,309,589,213,521,277],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[346,454,669,66,923,834,273,123,798,462],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[22,71,542,618,675,518,12,485,783,617],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[566,638,743,53,406,663,447,181,173,163],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[794,716,180,779,686,61,16,502,410,659],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[615,810,520,352,19,494,598,646,51,138],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[741,15,801,145,82,765,223,328,31,644],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[37,434,384,648,313,119,125,789,547,156],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[455,333,604,376,769,705,21,626,637,477],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1008,918,852,1039,1033,1436],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[275,776,555,308,332,849,388,897,425,774],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[258,389,680,723,154,367,505,595,6,737],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[481,726,423,113,713,111,662,201,681,322],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[377,499,134,664,183,697,358,563,311,217],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[24,799,821,614,153,415,250,8,401,306],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[557,1003,1021,747,987,988,1035,1032,1329],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[214,851,923,920,931,930,911,912,983,984],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[990,989,986,987,988,756,828,355,312,715],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[777,284,278,797,319,831,757,393,461,631],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[59,315,442,804,533,25,642,141,552,247],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[132,39,591,331,731,491,91,554,531,473],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[412,430,431,11,121,807,363,685,509,464],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[151,730,560,178,35,382,576,349,270,237],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[852,165,257,249,777,984,412,382,178,655],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[119,994,1037,1377,916,161,579,1371,1372,463],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[929,495,355,1330,393,1394,1026,801,921,1359],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[716,589,1012,1013,1011,613,124,996,1036,1392],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1331,897,1332,1000,638,1322,933,789,922],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1396,1562,59,925],"duration":43200,"lang":"ru"}&userRegion=213&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[620,583,586,680,937,281,709,228,430,167],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[280,76,627,939,677,808,453,632,788,128],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[422,140,507,85,773,940,143,181,670,650],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[479,326,90,666,753,702,315,649,18,391],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[458,1009,1555,1478,1525,1487,1492,1557,1488,1528],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1511,1500,1537,1499,1477,1508,1552,1466,1464,1476],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1548,1495,1550,1507,1534,1538,1470,1533,1530,1535],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1473,1481,1522,1521,1513,1483,1512,1510,1553,1547],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1503,1516,1480,1468,1502,1489,1514,1465,1515,1505],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1506,1469,1462,1485,1498,1484,1559,1493,1467,1471],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
Url = 'http://tv.yandex.ru/ajax/i-tv-region/get?params={"channelIds":[1526,1456,1505,1463,1472,1531],"duration":43200,"lang":"ru"}&userRegion=187&resource=schedule'
GetChannelsFull(Url)
GetCh()
def GetPr(id2,format=None):
#print "запрос"
try:
#print id2
#GetChCache()
try:import torr2xbmc
except:pass
try: d=eval(get_inf_db_New(id2)[0][0].replace("#z","\\").replace("#y",'"'))
except Exception, e:
#print e
try: d=get_cache(id2)
except Exception, e:
#print "ERROR CACHE " + str(e)
return {}
except Exception, e:
print e
return {}
rez={}
#title = d["channel"]["title"]
#print d
#idch = d["channel"]["id"]
try:logo = d["channel"]["logo"]["src"]
except:logo=""
prlist= d["events"]
#print prlist
plot = []
k=0
plt=""
prog1=[]
p_list=[]
type1=""
img=""
st=""
year=""
age=""
plt_time=""
plt_prog=""
tdlocal = (datetime.datetime.now().timetuple().tm_hour - datetime.datetime.utcnow().timetuple().tm_hour)*3600
for j in prlist:
#print j["program"]["title"]
try:
td=datetime.timedelta(hours=int(j["finish"][-4]))
start_t = time.mktime((datetime.datetime(*(time.strptime(j["start"][:j["start"].count(j["start"])-7], "%Y-%m-%dT%H:%M:%S")[0:6]))-td).timetuple())+tdlocal#-time.timezone + time.localtime().tm_isdst*3600
finish_t = time.mktime((datetime.datetime(*(time.strptime(j["finish"][:j["finish"].count(j["finish"])-7], "%Y-%m-%dT%H:%M:%S")[0:6]))-td).timetuple())+tdlocal#-time.timezone + time.localtime().tm_isdst*3600
except Exception, e:
#print e
try:
start_t = j["start"]
finish_t = j["finish"]
except Exception, e:
print e
#print 'finish_t'+str(finish_t)
if finish_t > time.time():
#print 'finish_t'+str(finish_t)
try:
start = time.localtime(float(start_t))
finish = time.localtime(float(finish_t))
if finish.tm_min<10: f_time_m = ":0"+str(finish.tm_min)
else: f_time_m = ":"+str(finish.tm_min)
if finish.tm_hour<10: f_time_h = "0"+str(finish.tm_hour)
else: f_time_h = str(finish.tm_hour)
if start.tm_min<10: s_time_m = ":0"+str(start.tm_min)
else: s_time_m = ":"+str(start.tm_min)
if start.tm_hour<10: s_time_h = "0"+str(start.tm_hour)
else: s_time_h = str(start.tm_hour)
f_time=f_time_h+f_time_m
s_time=s_time_h+s_time_m
except Exception, e:
print 'ERROR TIME '+str(e)
try:
title_pr = j["program"]["title"]
#print title_pr
except Exception, e:
print e
try:type2= j["program"]["type"]
except: type2=""
try:age = j["program"]["age"]
except: age="0"
k+=1
if k==1:
st = start_t
year = finish.tm_year
try:type1=j["program"]["type"]["name"]
except: type1=""
try:
img=j["program"]["images"][0]["sizes"]["200"]["src"].replace("normal","orig").replace("//","http://")
except:img=""
try:
if torr2xbmc.__addon__.getSetting('description') == 'false':
try:
if j["program"]["description"] == "":description = ""
else:description = j["program"]["description"] + chr(10)
except:description = ""
else:description = ""
except:description = ""
plt = plt +"[B][COLOR FF0084FF]"+ s_time+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+"-"+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+ f_time+"[/COLOR][/B][COLOR FF111111] [B][COLOR FFFFFFFF]"+title_pr+"[/COLOR][/B][COLOR FF999999]"+chr(10)+description+"[/COLOR]"
try:
prog_color = torr2xbmc.__addon__.getSetting('prog_color')
prog_b = torr2xbmc.__addon__.getSetting('prog_b')
time_color = torr2xbmc.__addon__.getSetting('time_color')
prog_i = torr2xbmc.__addon__.getSetting('prog_i')
if prog_b == "true":
if prog_i == "true":
plt_time = "[I][B][COLOR FF"+time_color+"]" + s_time+"-" + f_time+"[/COLOR]"
plt_prog = "[I][B][COLOR FF"+prog_color+"]"+title_pr+"[/COLOR][/B][/I]"
else:
plt_time = "[B][COLOR FF"+time_color+"]" + s_time+"-" + f_time+"[/COLOR]"
plt_prog = "[B][COLOR FF"+prog_color+"]"+title_pr+"[/COLOR][/B]"
else:
if prog_i == "true":
plt_time = "[I][COLOR FF"+time_color+"]" + s_time+"-" + f_time+"[/COLOR]"
plt_prog = "[I][COLOR FF"+prog_color+"]"+title_pr+"[/COLOR][/I]"
else:
plt_time = "[COLOR FF"+time_color+"]" + s_time+"-" + f_time+"[/COLOR]"
plt_prog = "[COLOR FF"+prog_color+"]"+title_pr+"[/COLOR]"
except:
plt_time = "[B][COLOR FF0084FF]" + s_time+"-" + f_time+"[/COLOR]"
plt_prog = "[B][COLOR FFFFFFFF]"+title_pr+"[/COLOR][/B]"
elif k<=4:
#print k
prog1.append({"time":"[B][COLOR FF0084FF]"+ s_time+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+"-"+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+ f_time+"[/COLOR][/B]", "title":"[B][COLOR FFFFFFFF]"+ title_pr +"[/COLOR][/B]"})
plt = plt +"[B][COLOR FF0084FF]"+ s_time+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+"-"+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+ f_time+"[/COLOR] [COLOR FFFFFFFF]"+ title_pr +"[/COLOR][/B]"+chr(10)
#print 'prog1---'+str(prog1)
else:
#print k
plt = plt +"[B][COLOR FF0084FF]"+ s_time+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+"-"+"[/COLOR] [COLOR FFFFFFFF]"+"[B][COLOR FF0084FF]"+ f_time+"[/COLOR] [COLOR FFFFFFFF]"+ title_pr +"[/COLOR][/B]"+chr(10)
p_list.append({"start": start_t, "finish": finish_t, "s_time": s_time, "f_time": f_time, "title_pr": title_pr, "type":type2, "age":age})
#print "plt---"+str(plt)
try:
rez={"plot":plt, "img":img, "ico":logo, "genre": type1, "year":year, "mpaa":str(age)+"+", "strt":st, "p_list": p_list, "prog1":prog1, "plttime":plt_time, "pltprog":plt_prog}
except Exception, e:
print e
pass
#print 'rez'+str(rez)
return rez
def GetLastUpdate():
c = db.connect(database=db_name)
cu = c.cursor()
cu.execute("CREATE TABLE IF NOT EXISTS settings (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE, lastupdate datetime, success integer, dbver INTEGER NOT NULL DEFAULT '0');")
c.commit()
cu.execute('SELECT lastupdate FROM settings WHERE id = 1')
res = cu.fetchone()
if res == None:
c.close()
return None
else:
try:
dt = datetime.datetime.fromtimestamp(time.mktime(time.strptime(res[0], '%Y-%m-%d %H:%M:%S.%f')))
c.close()
return dt
except Exception, e:
print e
dt = datetime.datetime.now() - datetime.timedelta(hours = 23)
c.close()
return dt
return None
def GetUpdateProg():
try:
throwaway = datetime.datetime.strptime('20110101','%Y%m%d')
except:
time.sleep(250)
throwaway = datetime.datetime.strptime('20110101','%Y%m%d')
lupd = GetLastUpdate()
global c
global cu
if lupd == None:
try:
cu.execute('INSERT INTO settings (id, lastupdate, dbver) VALUES (1, "%s", 1);' % datetime.datetime.now())
c.commit()
UpdCache()
UpdFull()
cu.execute('UPDATE settings SET success = 1 WHERE id = 1;')
c.commit()
c.close()
except Exception, e:
print e
else:
nupd = lupd + datetime.timedelta(hours = 12)
ver = ""
try:
c = db.connect(database=db_name, check_same_thread=False)
cu = c.cursor()
cu.execute('SELECT dbver FROM settings WHERE id = 1')
ver = cu.fetchone()[0]
except Exception, e:
ver=0
if nupd < datetime.datetime.now() or ver <> 1:
print 'remove db'
try:
try:
c.close()
os.remove(db_name)
except:
try:
c.close()
xbmc.sleep(250)
os.remove(db_name)
except Exception, e:
print 'Не удалось удалить старую БД программы: '+ str(e)
return
c = db.connect(database=db_name)
cu = c.cursor()
cu.execute("CREATE TABLE IF NOT EXISTS settings (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE, lastupdate datetime, success integer, dbver INTEGER NOT NULL DEFAULT '0');")
c.commit()
cu.execute('INSERT INTO settings (id, lastupdate, dbver) VALUES (1, "%s", 1);' % datetime.datetime.now())
c.commit()
UpdCache()
UpdFull()
cu.execute('UPDATE settings SET success = 1 WHERE id = 1;')
c.commit()
c.close()
except Exception, e:
print 'Ошибка: ' + str(e)
return
elif nupd > datetime.datetime.now():
cu.execute('SELECT success FROM settings WHERE id = 1;')
sc = cu.fetchone()
if not sc[0]==1:
UpdCache()
UpdFull()
cu.execute('UPDATE settings SET success = 1 WHERE id = 1;')
c.commit()
c.close()
else:
if lupd.timetuple().tm_mday != datetime.datetime.now().timetuple().tm_mday:
GetCh()
def save_cache(item, id):
s="YAcache="+str(item)
path=ru(os.path.join(addon.getAddonInfo('path'),"lib"))
fl = open(os.path.join( path ,"cache"+str(id)+".py"), "w")
fl.write("# -*- coding: utf-8 -*-"+chr(10))
fl.write(s)
fl.close()
def get_cache(id):
cache = __import__ ("cache"+str(id))
YAcache = getattr(cache, "YAcache")
return YAcache
def add_to_db_New(id, item):
err=0
item=str(item)
try:
cu.execute('CREATE TABLE "%s" (db_item VARCHAR(255));' %id)
c.commit()
except Exception, e:
err=1
if err==0:
cu.execute('INSERT INTO "%s" (db_item) VALUES ("%s");' %(id, item.replace("\\","#z").replace('"',"#y")))
c.commit()
elif err==1:
cu.execute('UPDATE "%s" SET db_item = "%s";' %(id, item.replace("\\","#z").replace('"',"#y")))
c.commit()
def get_inf_db_New(id):
#tor_id="n"+n
cu.execute(str('SELECT db_item FROM "%s";' %id))
c.commit()
info = cu.fetchall()
#c.close()
return info | sshnaidm/ru | script.module.YaTv/lib/YaTv.py | Python | gpl-2.0 | 65,597 |
from django.conf.urls import url, include
urlpatterns = [
url(r'^bookmarks/', include('bookmarks.api_urls', namespace='bookmarks')),
url(r'^tags/', include('wptags.api_urls', namespace='tags')),
]
| hellsgate1001/waypoints | waypoints/waypoints/api_urls.py | Python | mit | 206 |
# In Python 3.2 JUMP_ABSOLUTE's (which can
# turn into COME_FROM's) are not optimized as
# they are in later Python's.
#
# So an if statement can jump to the end of a for loop
# which in turn jump's back to the beginning of that loop.
#
# Should handle in Python 3.2
#
# 98 JUMP_BACK '16' statement after: names.append(name) to loop head
# 101_0 COME_FROM '50' statement: if name == ...to fictional "end if"
# 101 JUMP_BACK '16' jump as before to loop head
# RUNNABLE!
def _slotnames(cls):
names = []
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
for name in slots:
if name == "__dict__":
continue
else:
names.append(name) # 3.2 bug here jumping to outer for
# From 3.2.6 pdb.py
# There is no "come_from" after the "if" since the
# if jumps back to the loop. For this we use
# grammar rule "ifstmtl"
def lasti2lineno(linestarts, a):
for i in linestarts:
if a:
return a
return -1
assert lasti2lineno([], True) == -1
assert lasti2lineno([], False) == -1
assert lasti2lineno([1], False) == -1
assert lasti2lineno([1], True) == 1
# From 3.7 test_builtin.py
# Bug was allowing if condition jump back to the
# "for" loop as an acceptable "ifstmtl" rule.
# RUNNABLE!
def test_pow(m, b, c):
for a in m:
if a or \
b or \
c:
c = 1
return c
assert test_pow([], 2, 3) == 3
assert test_pow([1], 0, 5) == 1
assert test_pow([1], 4, 2) == 1
assert test_pow([0], 0, 0) == 0
| rocky/python-uncompyle6 | test/simple_source/looping/10_for_if_loopback.py | Python | gpl-3.0 | 1,639 |
"""
Testing for (Mean) Average Precision metric.
"""
from . import helpers
import itertools
import numpy as np
import pyltr
class TestAP(helpers.TestMetric):
def get_metric(self):
return pyltr.metrics.AP(k=4)
def get_queries_with_values(self):
yield [], 0.0
yield [0], 0.0
yield [1], 1.0
yield [1, 0], 1.0
yield [0, 1], 0.5
yield [1, 0, 1, 0], 5.0 / 6
yield [0, 1, 1, 1], 23.0 / 36
yield [1, 0, 1, 0, 1], 5.0 / 9
yield [1, 0, 1, 0, 0], 5.0 / 6
def get_queries(self):
for i in range(0, 7):
for tup in itertools.product(*([(0, 1)] * i)):
yield np.array(tup)
| jma127/pyltr | pyltr/metrics/tests/test_ap.py | Python | bsd-3-clause | 691 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR dataset input module.
"""
import tensorflow as tf
def build_input(dataset, data_path, batch_size, mode):
"""Build CIFAR image and labels.
Args:
dataset: Either 'cifar10' or 'cifar100'.
data_path: Filename for data.
batch_size: Input batch size.
mode: Either 'train' or 'eval'.
Returns:
images: Batches of images. [batch_size, image_size, image_size, 3]
labels: Batches of labels. [batch_size, num_classes]
Raises:
ValueError: when the specified dataset is not supported.
"""
image_size = 32
if dataset == 'cifar10':
label_bytes = 1
label_offset = 0
num_classes = 10
elif dataset == 'cifar100':
label_bytes = 1
label_offset = 1
num_classes = 100
else:
raise ValueError('Not supported dataset %s', dataset)
depth = 3
image_bytes = image_size * image_size * depth
record_bytes = label_bytes + label_offset + image_bytes
data_files = tf.gfile.Glob(data_path)
file_queue = tf.train.string_input_producer(data_files, shuffle=True)
# Read examples from files in the filename queue.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
_, value = reader.read(file_queue)
# Convert these examples to dense labels and processed images.
record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])
label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32)
# Convert from string to [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.slice(record, [label_bytes], [image_bytes]),
[depth, image_size, image_size])
# Convert from [depth, height, width] to [height, width, depth].
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
if mode == 'train':
image = tf.image.resize_image_with_crop_or_pad(
image, image_size+4, image_size+4)
image = tf.random_crop(image, [image_size, image_size, 3])
image = tf.image.random_flip_left_right(image)
# Brightness/saturation/constrast provides small gains .2%~.5% on cifar.
# image = tf.image.random_brightness(image, max_delta=63. / 255.)
# image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
# image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.image.per_image_standardization(image)
example_queue = tf.RandomShuffleQueue(
capacity=16 * batch_size,
min_after_dequeue=8 * batch_size,
dtypes=[tf.float32, tf.int32],
shapes=[[image_size, image_size, depth], [1]])
num_threads = 16
else:
image = tf.image.resize_image_with_crop_or_pad(
image, image_size, image_size)
image = tf.image.per_image_standardization(image)
example_queue = tf.FIFOQueue(
3 * batch_size,
dtypes=[tf.float32, tf.int32],
shapes=[[image_size, image_size, depth], [1]])
num_threads = 1
example_enqueue_op = example_queue.enqueue([image, label])
tf.train.add_queue_runner(tf.train.queue_runner.QueueRunner(
example_queue, [example_enqueue_op] * num_threads))
# Read 'batch' labels + images from the example queue.
images, labels = example_queue.dequeue_many(batch_size)
labels = tf.reshape(labels, [batch_size, 1])
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
labels = tf.sparse_to_dense(
tf.concat(values=[indices, labels], axis=1),
[batch_size, num_classes], 1.0, 0.0)
assert len(images.get_shape()) == 4
assert images.get_shape()[0] == batch_size
assert images.get_shape()[-1] == 3
assert len(labels.get_shape()) == 2
assert labels.get_shape()[0] == batch_size
assert labels.get_shape()[1] == num_classes
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, labels
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/resnet/cifar_input.py | Python | bsd-2-clause | 4,463 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayHttpListener(SubResource):
"""Http listener of an application gateway.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: Frontend IP configuration resource of an
application gateway.
:type frontend_ip_configuration:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param frontend_port: Frontend port resource of an application gateway.
:type frontend_port: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param protocol: Protocol. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayProtocol
:param host_name: Host name of HTTP listener.
:type host_name: str
:param ssl_certificate: SSL certificate resource of an application
gateway.
:type ssl_certificate: ~azure.mgmt.network.v2017_09_01.models.SubResource
:param require_server_name_indication: Applicable only if protocol is
https. Enables SNI for multi-hosting.
:type require_server_name_indication: bool
:param provisioning_state: Provisioning state of the HTTP listener
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'},
'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, frontend_ip_configuration=None, frontend_port=None, protocol=None, host_name: str=None, ssl_certificate=None, require_server_name_indication: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayHttpListener, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.frontend_port = frontend_port
self.protocol = protocol
self.host_name = host_name
self.ssl_certificate = ssl_certificate
self.require_server_name_indication = require_server_name_indication
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_http_listener_py3.py | Python | mit | 3,718 |
#/###################/#
# Import modules
#
#ImportModules
import ShareYourSystem as SYS
#/###################/#
# Build the model
#
#set
BrianingDebugVariable=25.
#set
AgentUnitsInt=1000
#Define
MyPredicter=SYS.PredicterClass(
).mapSet(
{
'BrianingStepTimeFloat':0.01,
'-Populations':[
('|Sensor',{
'RecordingLabelVariable':[0,1],
#'BrianingDebugVariable':BrianingDebugVariable,
'-Interactions':{
'|Encod':{
'BrianingDebugVariable':BrianingDebugVariable
}
}
}),
('|Agent',{
'RecordingLabelVariable':[0,1],
#'BrianingDebugVariable':BrianingDebugVariable,
'-Interactions':{
'|Fast':{
'BrianingDebugVariable':BrianingDebugVariable
}
},
#'LeakingNoiseStdVariable':0.01
}),
('|Decoder',{
'RecordingLabelVariable':[0,1],
#'BrianingDebugVariable':BrianingDebugVariable
'-Interactions':{
'|Slow':{
#'BrianingDebugVariable':BrianingDebugVariable,
#'LeakingWeigthVariable':0.
}
}
})
]
}
).predict(
_AgentUnitsInt = AgentUnitsInt,
_CommandVariable = (
'#custom:#clock:25*ms',
[
"2.*(1.*mV+0.5*mV*(int(t==25*ms)+int(t==50*ms)))",
"2.*(1.*mV-0.5*mV*(int(t==25*ms)+int(t==50*ms)))"
]
),
_DecoderVariable = "#array",
_DecoderStdFloat = 1./SYS.numpy.sqrt(AgentUnitsInt),
_DecoderMeanFloat = AgentUnitsInt * 0.5, #need to make an individual PSP around 1 mV
_AgentResetVariable = -70., #big cost to reset neurons and make the noise then decide who is going to spike next
_AgentNoiseVariable = 1., #noise to make neurons not spiking at the same timestep
#_AgentThresholdVariable = -53., #increase the threshold in order to have a linear cost
_AgentRefractoryVariable=0.5,
_InteractionStr = "Spike"
).simulate(
100.
)
#/###################/#
# View
#
MyPredicter.mapSet(
{
'PyplotingFigureVariable':{
'figsize':(10,8)
},
'PyplotingGridVariable':(30,30),
'-Panels':[
(
'|Run',
[
(
'-Charts',
[
(
'|Sensor_I_Command',
{
'PyplotingLegendDict':{
'fontsize':10,
'ncol':2
}
}
),
(
'|Sensor_U',
{
'PyplotingLegendDict':{
'fontsize':10,
'ncol':2
}
}
),
(
'|Agent_U',
{
'PyplotingLegendDict':{
'fontsize':10,
'ncol':1
}
}
),
(
'|Agent_Default_Events',{}
),
(
'|Decoder_U',
{
'PyplotingLegendDict':{
'fontsize':10,
'ncol':1
}
}
)
]
)
]
)
]
}
).view(
).pyplot(
).show(
)
#/###################/#
# Print
#
#Definition the AttestedStr
print('MyPredicter is ')
SYS._print(MyPredicter)
| Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Specials/Predicters/Predicter/tests/02_tests_auto_spike/07_test_spike_multiagents_homogeneous_twosensors_ExampleCell.py | Python | mit | 2,921 |
#!/usr/bin/env python
strings=['hey','guys','i','am','a','string']
parameter_list=[[strings]]
def features_string_char (strings):
from shogun import StringCharFeatures, RAWBYTE
from numpy import array
#create string features
f=StringCharFeatures(strings, RAWBYTE)
#and output several stats
#print("max string length", f.get_max_vector_length())
#print("number of strings", f.get_num_vectors())
#print("length of first string", f.get_vector_length(0))
#print("string[5]", ''.join(f.get_feature_vector(5)))
#print("strings", f.get_features())
#replace string 0
f.set_feature_vector(array(['t','e','s','t']), 0)
#print("strings", f.get_features())
return f.get_string_list(), f
if __name__=='__main__':
print('StringCharFeatures')
features_string_char(*parameter_list[0])
| lambday/shogun | examples/undocumented/python/features_string_char.py | Python | bsd-3-clause | 792 |
"""
Created by Manuel Peuster <[email protected]>
This is a stupid MANO plugin used for testing.
"""
import logging
import json
import time
import sys
import os
import yaml
import uuid
sys.path.append("../son-mano-base")
from sonmanobase.plugin import ManoBasePlugin
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("plugin:test-cleaner")
LOG.setLevel(logging.DEBUG)
class DemoPlugin1(ManoBasePlugin):
"""
This is a very simple example plugin to demonstrate
some APIs.
It does the following:
1. registers itself to the plugin manager
2. send a service remove reques to the IA
3. wait for the response and parse it
4. de-registers itself
"""
def __init__(self):
self.correlation_id = str(uuid.uuid4())
# call super class to do all the messaging and registration overhead
super(self.__class__, self).__init__(version="0.1-dev")
def __del__(self):
super(self.__class__, self).__del__()
def declare_subscriptions(self):
"""
Declare topics to listen.
"""
# We have to call our super class here
super(self.__class__, self).declare_subscriptions()
# Examples to demonstrate how a plugin can listen to certain events:
self.manoconn.register_async_endpoint(self._on_example_request, "example.plugin.*.request")
self.manoconn.register_notification_endpoint(self._on_example_notification,"example.plugin.*.notification")
def run(self):
"""
Plugin logic. Does nothing in our example.
"""
LOG.info('corr_id:'+self.correlation_id)
time.sleep(120)
LOG.info('Timeout')
self.deregister()
os._exit(1)
def on_registration_ok(self):
"""
Event that is triggered after a successful registration process.
"""
LOG.info("Registration OK.")
def on_lifecycle_start(self, ch, method, properties, message):
super(self.__class__, self).on_lifecycle_start(ch, method, properties, message)
id = os.environ.get("instance_uuid", 'aaaaa-aaaaa-aaaaa-aaaaa')
LOG.info('Found instance_uuid in ENV: '+ id)
self.removeService(id)
def on_infrastructure_adaptor_reply(self, ch, method, properties, message):
LOG.debug(json.loads(message))
msg = json.loads(message)
if 'status' in msg.keys() and (properties.correlation_id == self.correlation_id):
if msg['status'] == 'SUCCESS':
LOG.info('instance removed succesfully')
self.deregister()
os._exit(0)
else:
LOG.error('error during service remove')
self.deregister()
os._exit(1)
def removeService(self,id):
LOG.info('removing the deployed service: %r' % id)
vim_message = json.dumps({'instance_uuid':id})
LOG.debug('sending message:'+vim_message)
self.manoconn.call_async(self.on_infrastructure_adaptor_reply, 'infrastructure.service.remove',vim_message,correlation_id=self.correlation_id)
time.sleep(30)
def callback_print(self, ch, method, properties, message):
LOG.info('correlation_id: ' + str(properties.correlation_id))
LOG.info('message: ' + str(message))
def _on_example_request(self, ch, method, properties, message):
"""
Only used for the examples.
"""
LOG.info("Example message: %r " % message)
return json.dumps({"content" : "my response"})
def _on_example_request_response(self, ch, method, properties, message):
"""
Only used for the examples.
"""
LOG.info("Example message: %r " % message)
def _on_example_notification(self, ch, method, properties, message):
"""
Only used for the examples.
"""
LOG.info("Example message: %r " % message)
def on_list(self, ch, method, properties, message):
LOG.info(properties)
def main():
# reduce log level to have a nice output for demonstration
# reduce messaging log level to have a nicer output for this plugin
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
logging.getLogger("son-mano-base:plugin").setLevel(logging.INFO)
DemoPlugin1()
if __name__ == '__main__':
main()
| sonata-nfv/son-tests | int-slm-infrabstractV1/test-cleaner/test-cleaner/exampleplugin.py | Python | apache-2.0 | 4,340 |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.db import exception as db_exc
import sqlalchemy as sa
from neutron.common import exceptions as q_exc
from neutron.common import log
from neutron.common import utils
from neutron.db import model_base
from neutron.extensions import dvr as ext_dvr
from neutron import manager
from neutron.openstack.common import log as logging
from oslo.config import cfg
from sqlalchemy.orm import exc
LOG = logging.getLogger(__name__)
dvr_mac_address_opts = [
cfg.StrOpt('dvr_base_mac',
default="fa:16:3f:00:00:00",
help=_('The base mac address used for unique '
'DVR instances by Neutron')),
]
cfg.CONF.register_opts(dvr_mac_address_opts)
class DistributedVirtualRouterMacAddress(model_base.BASEV2):
"""Represents a v2 neutron distributed virtual router mac address."""
__tablename__ = 'dvr_host_macs'
host = sa.Column(sa.String(255), primary_key=True, nullable=False)
mac_address = sa.Column(sa.String(32), nullable=False, unique=True)
class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
"""Mixin class to add dvr mac address to db_plugin_base_v2."""
@property
def plugin(self):
try:
if self._plugin is not None:
return self._plugin
except AttributeError:
pass
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin
def _get_dvr_mac_address_by_host(self, context, host):
try:
query = context.session.query(DistributedVirtualRouterMacAddress)
dvrma = query.filter(
DistributedVirtualRouterMacAddress.host == host).one()
except exc.NoResultFound:
raise ext_dvr.DVRMacAddressNotFound(host=host)
return dvrma
def _create_dvr_mac_address(self, context, host):
"""Create DVR mac address for a given host."""
base_mac = cfg.CONF.dvr_base_mac.split(':')
max_retries = cfg.CONF.mac_generation_retries
for attempt in reversed(range(max_retries)):
try:
with context.session.begin(subtransactions=True):
mac_address = utils.get_random_mac(base_mac)
dvr_mac_binding = DistributedVirtualRouterMacAddress(
host=host, mac_address=mac_address)
context.session.add(dvr_mac_binding)
LOG.debug("Generated DVR mac for host %(host)s "
"is %(mac_address)s",
{'host': host, 'mac_address': mac_address})
dvr_macs = self.get_dvr_mac_address_list(context)
# TODO(vivek): improve scalability of this fanout by
# sending a single mac address rather than the entire set
self.notifier.dvr_mac_address_update(context, dvr_macs)
return self._make_dvr_mac_address_dict(dvr_mac_binding)
except db_exc.DBDuplicateEntry:
LOG.debug("Generated DVR mac %(mac)s exists."
" Remaining attempts %(attempts_left)s.",
{'mac': mac_address, 'attempts_left': attempt})
LOG.error(_("MAC generation error after %s attempts"), max_retries)
raise ext_dvr.MacAddressGenerationFailure(host=host)
def delete_dvr_mac_address(self, context, host):
query = context.session.query(DistributedVirtualRouterMacAddress)
(query.
filter(DistributedVirtualRouterMacAddress.host == host).
delete(synchronize_session=False))
def get_dvr_mac_address_list(self, context):
with context.session.begin(subtransactions=True):
return (context.session.
query(DistributedVirtualRouterMacAddress).all())
def get_dvr_mac_address_by_host(self, context, host):
"""Determine the MAC for the DVR port associated to host."""
if not host:
return
try:
return self._get_dvr_mac_address_by_host(context, host)
except ext_dvr.DVRMacAddressNotFound:
return self._create_dvr_mac_address(context, host)
def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None):
return {'host': dvr_mac_entry['host'],
'mac_address': dvr_mac_entry['mac_address']}
@log.log
def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
# FIXME(vivek, salv-orlando): improve this query by adding the
# capability of filtering by binding:host_id
vm_ports_by_host = []
filter = {'fixed_ips': {'subnet_id': [subnet]}}
ports = self.plugin.get_ports(context, filters=filter)
LOG.debug("List of Ports on subnet %(subnet)s received as %(ports)s",
{'subnet': subnet, 'ports': ports})
for port in ports:
if 'compute:' in port['device_owner']:
if port['binding:host_id'] == host:
port_dict = self.plugin._make_port_dict(
port, process_extensions=False)
vm_ports_by_host.append(port_dict)
LOG.debug("Returning list of VM Ports on host %(host)s for subnet "
"%(subnet)s ports %(ports)s",
{'host': host, 'subnet': subnet, 'ports': vm_ports_by_host})
return vm_ports_by_host
@log.log
def get_subnet_for_dvr(self, context, subnet):
try:
subnet_info = self.plugin.get_subnet(context, subnet)
except q_exc.SubnetNotFound:
return {}
else:
# retrieve the gateway port on this subnet
filter = {'fixed_ips': {'subnet_id': [subnet],
'ip_address': [subnet_info['gateway_ip']]}}
internal_gateway_ports = self.plugin.get_ports(
context, filters=filter)
if not internal_gateway_ports:
LOG.error(_("Could not retrieve gateway port "
"for subnet %s"), subnet_info)
return {}
internal_port = internal_gateway_ports[0]
subnet_info['gateway_mac'] = internal_port['mac_address']
return subnet_info
| virtualopensystems/neutron | neutron/db/dvr_mac_db.py | Python | apache-2.0 | 6,855 |
#!/usr/bin/env python
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
PACKAGE_NAME = "azure-identity"
PACKAGE_PPRINT_NAME = "Identity"
package_folder_path = PACKAGE_NAME.replace("-", "/")
namespace_name = PACKAGE_NAME.replace("-", ".")
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__ # type: ignore
raise Exception(
"This package is incompatible with azure=={}. ".format(ver) + 'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
with open(os.path.join(package_folder_path, "_version.py"), "r") as fd:
VERSION = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) # type: ignore
if not VERSION:
raise RuntimeError("Cannot find version information")
with open("README.md", encoding="utf-8") as f:
README = f.read()
with open("CHANGELOG.md", encoding="utf-8") as f:
CHANGELOG = f.read()
setup(
name=PACKAGE_NAME,
version=VERSION,
include_package_data=True,
description="Microsoft Azure {} Library for Python".format(PACKAGE_PPRINT_NAME),
long_description=README + "\n\n" + CHANGELOG,
long_description_content_type="text/markdown",
license="MIT License",
author="Microsoft Corporation",
author_email="[email protected]",
url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
packages=find_packages(
exclude=[
"tests",
# Exclude packages that will be covered by PEP420 or nspkg
"azure",
]
),
python_requires=">=3.6",
install_requires=[
"azure-core<2.0.0,>=1.11.0",
"cryptography>=2.5",
"msal<2.0.0,>=1.12.0",
"msal-extensions~=0.3.0",
"six>=1.12.0",
],
)
| Azure/azure-sdk-for-python | sdk/identity/azure-identity/setup.py | Python | mit | 2,651 |
"""
The algorithm finds the pattern in given text using following rule.
The bad-character rule considers the mismatched character in Text.
The next occurrence of that character to the left in Pattern is found,
If the mismatched character occurs to the left in Pattern,
a shift is proposed that aligns text block and pattern.
If the mismatched character does not occur to the left in Pattern,
a shift is proposed that moves the entirety of Pattern past
the point of mismatch in the text.
If there no mismatch then the pattern matches with text block.
Time Complexity : O(n/m)
n=length of main string
m=length of pattern string
"""
from __future__ import annotations
class BoyerMooreSearch:
def __init__(self, text: str, pattern: str):
self.text, self.pattern = text, pattern
self.textLen, self.patLen = len(text), len(pattern)
def match_in_pattern(self, char: str) -> int:
"""finds the index of char in pattern in reverse order
Parameters :
char (chr): character to be searched
Returns :
i (int): index of char from last in pattern
-1 (int): if char is not found in pattern
"""
for i in range(self.patLen - 1, -1, -1):
if char == self.pattern[i]:
return i
return -1
def mismatch_in_text(self, currentPos: int) -> int:
"""
find the index of mis-matched character in text when compared with pattern
from last
Parameters :
currentPos (int): current index position of text
Returns :
i (int): index of mismatched char from last in text
-1 (int): if there is no mismatch between pattern and text block
"""
for i in range(self.patLen - 1, -1, -1):
if self.pattern[i] != self.text[currentPos + i]:
return currentPos + i
return -1
def bad_character_heuristic(self) -> list[int]:
# searches pattern in text and returns index positions
positions = []
for i in range(self.textLen - self.patLen + 1):
mismatch_index = self.mismatch_in_text(i)
if mismatch_index == -1:
positions.append(i)
else:
match_index = self.match_in_pattern(self.text[mismatch_index])
i = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
text = "ABAABA"
pattern = "AB"
bms = BoyerMooreSearch(text, pattern)
positions = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| TheAlgorithms/Python | strings/boyer_moore_search.py | Python | mit | 2,738 |
# Have the function SimpleSymbols(str) take the str parameter being passed and
# determine if it is an acceptable sequence by either returning the string true
# or false. The str parameter will be composed of + and = symbols with several
# letters between them (ie. ++d+===+c++==a) and for the string to be true each
# letter must be surrounded by a + symbol. So the string to the left would be
# false. The string will not be empty and will have at least one letter.
def SimpleSymbols(str):
prev = ""
in_cmp = False
for ch in str:
if ch.isalpha():
if prev != "+":
return 'false'
in_cmp = True
elif prev.isalpha():
if ch != '+':
return 'false'
in_cmp = False
prev = ch
return 'false' if in_cmp else 'true'
# Same return expression written with and/or logic
#return in_cmp and 'false' or 'true'
# keep this function call here
# to see how to enter arguments in Python scroll down
print SimpleSymbols(raw_input())
| serdardalgic/coderbyte-challenges | easy-07-simple-symbols.py | Python | gpl-2.0 | 1,044 |
# -*- coding: utf-8 -*-
import base64
from django.conf.urls import url
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.http import HttpBadRequest
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash, dict_strip_unicode_keys
from employee_title.models import EmployeeTitle
from djgap.corsresource import CorsResourceBase
class EmployeeTitleResource(CorsResourceBase, ModelResource):
class Meta:
queryset = EmployeeTitle.objects.all()
allowed_method = ['get']
resource_name = 'employee_title'
authorization = DjangoAuthorization()
authentication = ApiKeyAuthentication()
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/add_titles%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('add_titles'), name="api_add_titles"),
url(r"^(?P<resource_name>%s)/get_titles%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_titles'), name="api_get_titles"),
]
def get_object_list(self, request):
return EmployeeTitle.objects.none()
def add_titles(self, request, **kwargs):
self.method_check(request, allowed=['post'])
self.is_authenticated(request)
if not request.user.is_superuser:
raise ImmediateHttpResponse(HttpBadRequest('Not a superuser'))
deserialized = self.deserialize(request, request.body,
format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
file_data = bundle.data.get('file')
if isinstance(file_data, basestring) and file_data.startswith('data:text'):
_, str = file_data.split(';base64,')
file_data = base64.b64decode(str)
else:
raise ImmediateHttpResponse(HttpBadRequest('该文件无法被阅读'))
rows = file_data.split("\n")
rows = [x.rstrip() for x in rows if x != ""]
for row in rows:
if EmployeeTitle.objects.filter(username=row, title=EmployeeTitle.TITLE_TEACHER).count() == 0:
EmployeeTitle(username=row, title=EmployeeTitle.TITLE_TEACHER).save()
object_list = {
'objects': len(rows),
}
return self.create_response(request, object_list)
def get_titles(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
if not request.user.is_superuser:
raise ImmediateHttpResponse(HttpBadRequest('Not a superuser'))
data = [e.username for e in EmployeeTitle.objects.filter(title=EmployeeTitle.TITLE_TEACHER).all()]
data.sort()
object_list = {
'objects': data
}
return self.create_response(request, object_list)
| NealSun96/High5App | src/employee_title/api.py | Python | gpl-2.0 | 3,099 |
from __future__ import annotations
import logging
import sys
import traceback
from typing import Any, Mapping, TYPE_CHECKING
from ai.backend.common.events import AgentErrorEvent
from ai.backend.common.logging import BraceStyleAdapter
from ai.backend.common.types import (
AgentId,
LogSeverity,
)
from ai.backend.common.plugin.monitor import AbstractErrorReporterPlugin
from ..models import error_logs
if TYPE_CHECKING:
from ai.backend.manager.api.context import RootContext
log = BraceStyleAdapter(logging.getLogger(__name__))
class ErrorMonitor(AbstractErrorReporterPlugin):
async def init(self, context: Any = None) -> None:
if context is None:
log.warning(
"manager.plugin.error_monitor is initialized without the root context. "
"The plugin is disabled.",
)
self.enabled = False
return
else:
self.enabled = True
root_ctx: RootContext = context['_root.context'] # type: ignore
self.event_dispatcher = root_ctx.event_dispatcher
self._evh = self.event_dispatcher.consume(AgentErrorEvent, None, self.handle_agent_error)
self.db = root_ctx.db
async def cleanup(self) -> None:
if self.enabled:
self.event_dispatcher.unconsume(self._evh)
async def update_plugin_config(self, plugin_config: Mapping[str, Any]) -> None:
pass
async def capture_message(self, message: str) -> None:
pass
async def capture_exception(
self,
exc_instance: Exception = None,
context: Mapping[str, Any] = None,
) -> None:
if not self.enabled:
return
if exc_instance:
tb = exc_instance.__traceback__
else:
_, sys_exc_instance, tb = sys.exc_info()
if (
isinstance(sys_exc_instance, BaseException)
and not isinstance(sys_exc_instance, Exception)
):
# bypass BaseException as they are used for controlling the process/coroutine lifecycles
# instead of indicating actual errors
return
exc_instance = sys_exc_instance
exc_type: Any = type(exc_instance)
if context is None or 'severity' not in context:
severity = LogSeverity.ERROR
else:
severity = context['severity']
if context is None or 'user' not in context:
user = None
else:
user = context['user']
message = ''.join(traceback.format_exception_only(exc_type, exc_instance)).strip()
async with self.db.begin() as conn:
query = error_logs.insert().values({
'severity': severity,
'source': 'manager',
'user': user,
'message': message,
'context_lang': 'python',
'context_env': context,
'traceback': ''.join(traceback.format_tb(tb)).strip(),
})
await conn.execute(query)
log.debug(
"collected an error log [{}] \"{}\" from manager",
severity.name, message,
)
async def handle_agent_error(
self,
context: None,
source: AgentId,
event: AgentErrorEvent,
) -> None:
if not self.enabled:
return
async with self.db.begin() as conn:
query = error_logs.insert().values({
'severity': event.severity,
'source': source,
'user': event.user,
'message': event.message,
'context_lang': 'python',
'context_env': event.context_env,
'traceback': event.traceback,
})
await conn.execute(query)
log.debug(
"collected an error log [{}] \"{}\" from agent:{}",
event.severity.name, event.message, source,
)
| lablup/backend.ai-manager | src/ai/backend/manager/plugin/error_monitor.py | Python | lgpl-3.0 | 3,974 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61968.PaymentMetering.Shift import Shift
class CashierShift(Shift):
"""The operating shift for a cashier, during which he may transact against the CashierShift, subject to VendorShift being open.
"""
def __init__(self, cashFloat=0.0, Receipts=None, Transactions=None, Cashier=None, PointOfSale=None, *args, **kw_args):
"""Initialises a new 'CashierShift' instance.
@param cashFloat: The amount of cash that the cashier brings with him to start his shift and that he will take away at the end of his shift; i.e. the cash float does not get banked.
@param Receipts: All Receipts recorded for this Shift.
@param Transactions:
@param Cashier: Cashier operating this shift.
@param PointOfSale: Point of sale that is in operation during this shift.
"""
#: The amount of cash that the cashier brings with him to start his shift and that he will take away at the end of his shift; i.e. the cash float does not get banked.
self.cashFloat = cashFloat
self._Receipts = []
self.Receipts = [] if Receipts is None else Receipts
self._Transactions = []
self.Transactions = [] if Transactions is None else Transactions
self._Cashier = None
self.Cashier = Cashier
self._PointOfSale = None
self.PointOfSale = PointOfSale
super(CashierShift, self).__init__(*args, **kw_args)
_attrs = ["cashFloat"]
_attr_types = {"cashFloat": float}
_defaults = {"cashFloat": 0.0}
_enums = {}
_refs = ["Receipts", "Transactions", "Cashier", "PointOfSale"]
_many_refs = ["Receipts", "Transactions"]
def getReceipts(self):
"""All Receipts recorded for this Shift.
"""
return self._Receipts
def setReceipts(self, value):
for x in self._Receipts:
x.CashierShift = None
for y in value:
y._CashierShift = self
self._Receipts = value
Receipts = property(getReceipts, setReceipts)
def addReceipts(self, *Receipts):
for obj in Receipts:
obj.CashierShift = self
def removeReceipts(self, *Receipts):
for obj in Receipts:
obj.CashierShift = None
def getTransactions(self):
return self._Transactions
def setTransactions(self, value):
for x in self._Transactions:
x.CashierShift = None
for y in value:
y._CashierShift = self
self._Transactions = value
Transactions = property(getTransactions, setTransactions)
def addTransactions(self, *Transactions):
for obj in Transactions:
obj.CashierShift = self
def removeTransactions(self, *Transactions):
for obj in Transactions:
obj.CashierShift = None
def getCashier(self):
"""Cashier operating this shift.
"""
return self._Cashier
def setCashier(self, value):
if self._Cashier is not None:
filtered = [x for x in self.Cashier.CashierShifts if x != self]
self._Cashier._CashierShifts = filtered
self._Cashier = value
if self._Cashier is not None:
if self not in self._Cashier._CashierShifts:
self._Cashier._CashierShifts.append(self)
Cashier = property(getCashier, setCashier)
def getPointOfSale(self):
"""Point of sale that is in operation during this shift.
"""
return self._PointOfSale
def setPointOfSale(self, value):
if self._PointOfSale is not None:
filtered = [x for x in self.PointOfSale.CashierShifts if x != self]
self._PointOfSale._CashierShifts = filtered
self._PointOfSale = value
if self._PointOfSale is not None:
if self not in self._PointOfSale._CashierShifts:
self._PointOfSale._CashierShifts.append(self)
PointOfSale = property(getPointOfSale, setPointOfSale)
| rwl/PyCIM | CIM14/IEC61968/PaymentMetering/CashierShift.py | Python | mit | 5,075 |
import pymongo
import datetime
def shareFeedback():
client = pymongo.MongoClient('mongodb://54.86.201.60:27000/')
database = client['music']
collection = database['feedback']
post = {
"authorName" : "Jeremy Offer",
"authorMessage" : "Supper dooppeeer",
"mobileDevice" : "iOS",
"sharingTime" : datetime.datetime.utcnow()
}
feedback = database.feedback
post_id = feedback.insert_one(post).inserted_id
print post_id
shareFeedback() | groschovskiy/lerigos_music | Server/API/module/feedback.py | Python | apache-2.0 | 496 |
import os
COMPUTER_NAME = os.environ['COMPUTERNAME']
print("Computer: ", COMPUTER_NAME)
WORKER_POOL_SIZE = 8
TARGET_VOXEL_MM = 1.00
MEAN_PIXEL_VALUE_NODULE = 41
LUNA_SUBSET_START_INDEX = 0
SEGMENTER_IMG_SIZE = 320
BASE_DIR_SSD = "C:/werkdata/kaggle/ndsb3/"
BASE_DIR = "D:/werkdata/kaggle/ndsb3/"
EXTRA_DATA_DIR = "resources/"
NDSB3_RAW_SRC_DIR = BASE_DIR + "ndsb_raw/stage12/"
LUNA16_RAW_SRC_DIR = BASE_DIR + "luna_raw/"
NDSB3_EXTRACTED_IMAGE_DIR = BASE_DIR_SSD + "ndsb3_extracted_images/"
LUNA16_EXTRACTED_IMAGE_DIR = BASE_DIR_SSD + "luna16_extracted_images/"
NDSB3_NODULE_DETECTION_DIR = BASE_DIR_SSD + "ndsb3_nodule_predictions/"
| juliandewit/kaggle_ndsb2017 | settings.py | Python | mit | 659 |
from xml.etree import ElementTree
from .models import Product, Program
def _simple_fixture_generator(user, name, fields, data_fn, last_sync=None):
project = user.project
if not project or not project.commtrack_enabled:
return []
# expand this here to prevent two separate couch calls
data = data_fn()
if not should_sync(data, last_sync):
return []
name_plural = "{}s".format(name)
root = ElementTree.Element('fixture',
attrib={
'id': 'commtrack:{}'.format(name_plural),
'user_id': user.user_id
})
list_elem = ElementTree.Element(name_plural)
root.append(list_elem)
for data_item in data:
item_elem = ElementTree.Element(name, {'id': data_item.get_id})
list_elem.append(item_elem)
for field_name in fields:
field_elem = ElementTree.Element(field_name)
val = getattr(data_item, field_name, None)
if isinstance(val, dict):
if val:
for k, v in val.items():
sub_el = ElementTree.Element(k)
sub_el.text = unicode(v if v is not None else '')
field_elem.append(sub_el)
item_elem.append(field_elem)
else:
field_elem.text = unicode(val if val is not None else '')
item_elem.append(field_elem)
return [root]
def should_sync(data, last_sync):
"""
Determine if a data collection needs to be synced.
"""
# definitely sync if we haven't synced before
if not last_sync or not last_sync.date:
return True
# check if any items have been modified since last sync
for data_item in data:
# >= used because if they are the same second, who knows
# which actually happened first
if not data_item.last_modified or data_item.last_modified >= last_sync.date:
return True
return False
def product_fixture_generator(user, version, last_sync):
fields = [
'name',
'unit',
'code',
'description',
'category',
'program_id',
'cost',
'product_data'
]
data_fn = lambda: Product.by_domain(user.domain)
return _simple_fixture_generator(user, "product", fields, data_fn, last_sync)
def program_fixture_generator(user, version, last_sync):
fields = [
'name',
'code'
]
data_fn = lambda: Program.by_domain(user.domain)
return _simple_fixture_generator(user, "program", fields, data_fn, last_sync)
| SEL-Columbia/commcare-hq | corehq/apps/commtrack/fixtures.py | Python | bsd-3-clause | 2,684 |
"""
A simple PyUSBTMC-based interface for the Rigol DS1102E Oscilloscope.
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='Rigol_USBTMC',
version='0.0.2',
description='A simple PyUSBTMC-based interface for the Rigol DS1102E Oscilloscope.',
author='Kevin D. Nielson, Boddmg',
author_email='',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
keywords='usbtmc rigol',
install_requires=[
'python-usbtmc',
'numpy',
'pyusb'
],
)
| AIMAtlanta/Rigol_USBTMC | setup.py | Python | mit | 1,044 |
# -*- coding: utf-8 -*-
# This source file is part of mc4p,
# the Minecraft Portable Protocol-Parsing Proxy.
#
# Copyright (C) 2011 Matthew J. McGill, Simon Marti
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details
import socket
import sys
from struct import pack, unpack
from optparse import OptionParser
from contextlib import closing
from random import choice
from zlib import compress, decompress
import encryption
from messages import protocol
from parsing import parse_unsigned_byte
from authentication import Authenticator
from proxy import UnsupportedPacketException
class MockTerminal(object):
class ColorTag(str):
def __call__(self, string):
return string
def __str__(self):
return ""
def __getattr__(self, name):
return self.ColorTag()
output_width = 79
if sys.platform == "win32":
is_terminal = False
t = MockTerminal()
else:
from blessings import Terminal
t = Terminal()
is_terminal = t.is_a_tty
if is_terminal:
output_width = max(t.width, output_width)
def parse_args():
"""Return options, or print usage and exit."""
usage = "Usage: %prog [options] [port]"
desc = ("Create a Minecraft server listening for a client connection.")
parser = OptionParser(usage=usage, description=desc)
parser.add_option("-c", "--send-chunks", dest="send_chunks",
action="store_true", default=False,
help="send some more packets after the encrypted " +
"connection was established")
parser.add_option("-s", "--stay-connected", dest="stay_connected",
action="store_true", default=False,
help="don't disconnect after a successfull handshake")
(opts, args) = parser.parse_args()
if not 0 <= len(args) <= 1:
parser.error("Incorrect number of arguments.")
port = 25565
if len(args) > 0:
try:
port = int(args[0])
except ValueError:
parser.error("Invalid port %s" % args[0])
return (port, opts)
class LoggingSocketStream(object):
def __init__(self, sock):
self.sock = sock
def __enter__(self):
sys.stdout.write(t.bold("Receiving raw bytes: "))
def read(self, length):
data = ""
for i in range(length):
data += self._read_byte()
sys.stdout.write(" ")
return data
def _read_byte(self):
byte = self.sock.recv(1)
if byte == '':
raise EOFException()
sys.stdout.write("%02x " % ord(byte))
return byte
def __exit__(self, exception_type, exception_value, traceback):
sys.stdout.write("\n")
class LoggingSocketCipherStream(LoggingSocketStream):
def __init__(self, sock, cipher):
self.sock = sock
self.cipher = cipher
self.data = []
self.pos = 21
def __enter__(self):
if is_terminal:
sys.stdout.write(t.bold("Receiving raw bytes: ") + t.move_down)
sys.stdout.write(t.bold("Decrypted bytes: ") + t.move_up)
else:
sys.stdout.write("Receiving raw bytes: ")
def read(self, length):
data = super(LoggingSocketCipherStream, self).read(length)
self.pos += 1
self.data.append(data)
return data
def _read_byte(self):
byte = super(LoggingSocketCipherStream, self)._read_byte()
decrypted = self.cipher.decrypt(byte)
if is_terminal:
sys.stdout.write(''.join((t.move_down, t.move_right * self.pos,
"%02x " % ord(decrypted), t.move_up)))
self.pos += 3
return decrypted
def __exit__(self, exception_type, exception_value, traceback):
if is_terminal:
sys.stdout.write(t.move_down * 2)
else:
sys.stdout.write("\n")
sys.stdout.write("Decrypted bytes: ")
print " ".join(
" ".join("%02x" % ord(c) for c in field) for field in self.data
)
class PacketFormatter(object):
IGNORE = ('msgtype', 'raw_bytes')
@classmethod
def print_packet(cls, packet):
formatter = "_format_packet_%02x" % packet['msgtype']
substitutes = {}
lengths = [len(field) for field in packet if field not in cls.IGNORE]
if not lengths:
return
maxlen = max(lengths)
if hasattr(cls, formatter):
substitutes = getattr(cls, formatter)(packet, maxlen + 4)
print t.bold("Packet content:")
for field in packet:
if field in cls.IGNORE:
continue
if field in substitutes:
value = substitutes[field]
if value is None:
continue
if isinstance(value, tuple) or isinstance(value, list):
value = cls._multi_line(value, maxlen + 4)
else:
value = packet[field]
print " %s:%s %s" % (field, " " * (maxlen - len(field)), value)
@classmethod
def bytes(cls, bytes, prefix="", prefix_format=None):
prefix_length = len(prefix)
if prefix_format is not None:
prefix = prefix_format(prefix)
return cls._multi_line(cls._bytes(bytes, prefix, 0, prefix_length), 0)
@classmethod
def _format_packet_fd(cls, packet, prelen):
key = encryption.decode_public_key(packet['public_key'])
modulus = cls._split_lines("%x" % key.key.n, "modulus: 0x", prelen)
token = ' '.join("%02x" % ord(c) for c in packet['challenge_token'])
raw = cls._bytes(packet['public_key'], "raw: ", prelen)
return {'challenge_token': token,
'public_key': ["exponent: 0x%x" % key.key.e] + modulus + raw}
@classmethod
def _format_packet_38(cls, packet, prelen):
data = cls._bytes(packet['chunks']['data'], "data: ", prelen)
meta = cls._table(packet['chunks']['metadata'], "meta: ", prelen)
return {'chunks': data + meta}
@classmethod
def _format_packet_fc(cls, packet, prelen):
token = cls._bytes(packet['challenge_token'], prelen=prelen)
secret = cls._bytes(packet['shared_secret'], prelen=prelen)
return {'challenge_token': token,
'shared_secret': secret}
@staticmethod
def _table(items, prefix, prelen=0):
if not items:
return [prefix + "Empty"]
titles = items[0].keys()
maxlen = [len(title) for title in titles]
for i in range(len(titles)):
title = titles[i]
for item in items:
if len(str(item[title])) > maxlen[i]:
maxlen[i] = len(str(item[title]))
def row(values, title=False):
if title:
line = prefix
else:
line = " " * len(prefix)
for i in range(len(values)):
value = values[i]
l = maxlen[i]
if isinstance(value, str):
line += value + " " * (l - len(value) + 1)
else:
line += " " * (l - len(str(value))) + str(value) + " "
return line
def separator():
return " " * len(prefix) + "-".join("-" * l for l in maxlen)
lines = [row(titles, title=True), separator()]
for item in items:
lines.append(row([item[title] for title in titles]))
return lines
@classmethod
def _bytes(cls, bytes, prefix="", prelen=0, prefix_length=None):
return cls._split_lines(" ".join("%02x" % ord(c) for c in bytes),
prefix=prefix, prelen=prelen,
prefix_length=prefix_length, partlen=3)
@staticmethod
def _split_lines(text, prefix="", prelen=0, prefix_length=None, partlen=1):
lines = []
prefix_length = prefix_length or len(prefix)
length = output_width - prelen - prefix_length
length = length - length % partlen
for i in range(0, len(text), length):
line = prefix if i == 0 else " " * prefix_length
line += text[i:min(i + length, len(text))]
lines.append(line)
return lines
@staticmethod
def _multi_line(lines, offset):
return ("\n" + " " * offset).join(lines)
class Server(object):
def __init__(self, port, send_chunks=False, stay_connected=False):
self.port = port
self.send_chunks = send_chunks
self.stay_connected = stay_connected
def start(self):
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as srvsock:
srvsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvsock.bind(("", self.port))
srvsock.listen(1)
print t.bold("Listening on port %s" % self.port)
while True:
try:
self._handle_client(srvsock.accept())
except UnexpectedPacketException, e:
print t.bold_red("\nError:"),
print "Received unexpected 0x%02x packet" % (e.id_)
if e.encrypted_id is not None:
print t.bold("\nExpected message id (encrypted):"),
print "%02x" % e.encrypted_id
except UnsupportedPacketException, e:
print t.bold_red("\nError:"),
print "Received unsupported 0x%02x packet" % (e.id_)
except EOFException:
print t.bold_red("\nError:"),
print "Socket closed by client"
print t.bold("\nConnection closed")
def _handle_client(self, connection):
with closing(connection[0]) as sock:
clt_spec, srv_spec = protocol[0]
print t.bold("\nConnected to %s:%s" % connection[1])
print t.bold_cyan("\nExpecting Server Ping (0xfe) " +
"or Handshake (0x02) packet")
packet = parse_packet(sock, clt_spec)
if packet['msgtype'] == 0xfe:
motd = '\x00'.join((u'§1', # magic
str(max(protocol.keys())), # version
'latest', # version string
'mc4p debugger', # motd
'0',
'1'))
send_packet(sock, srv_spec, {'msgtype': 0xff,
'reason': motd})
return
elif packet['msgtype'] != 0x02:
raise UnexpectedPacketException(packet['msgtype'])
if packet['proto_version'] < 38:
print t.bold_red("Error:"),
print "Unsupported protocol version"
return
username = packet['username']
clt_spec, srv_spec = protocol[packet['proto_version']]
print t.bold("\nGenerating RSA key pair")
key = encryption.generate_key_pair()
challenge = encryption.generate_challenge_token()
server_id = encryption.generate_server_id()
packet = {'msgtype': 0xfd,
'server_id': server_id,
'public_key': encryption.encode_public_key(key),
'challenge_token': challenge}
send_packet(sock, srv_spec, packet)
packet = parse_packet(sock, clt_spec, 0xfc)
try:
decrypted_token = encryption.decrypt_shared_secret(
packet['challenge_token'], key
)
except:
decrypted_token = None
if decrypted_token is None:
try:
decrypted_token = key.decrypt(packet['challenge_token'])
except:
pass
if decrypted_token == challenge:
print t.bold_red("\nError:"),
print ("The challenge token was not padded " +
"correctly. See ftp://ftp.rsasecurity.com/pub/" +
"pkcs/pkcs-1/pkcs-1v2-1.pdf section 7.2.1 if " +
"your library does not support PKCS#1 padding.")
else:
print t.bold_red("\nError:"),
print "The challenge token is not encrypted correctly.\n"
print PacketFormatter.bytes(decrypted_token,
"Decrypted bytes: ", t.bold)
return
elif decrypted_token != challenge:
print t.bold_red("\nError:"),
print "Received challenge token does not",
print "match the expected value.\n"
print PacketFormatter.bytes(decrypted_token,
"Received bytes: ", t.bold)
print
print PacketFormatter.bytes(challenge,
"Expected bytes: ", t.bold)
return
secret = encryption.decrypt_shared_secret(packet['shared_secret'],
key)
if secret is None:
print t.bold_red("\nError:"),
print ("The shared secret was not padded" +
"correctly. See ftp://ftp.rsasecurity.com/pub/" +
"pkcs/pkcs-1/pkcs-1v2-1.pdf section 7.2.1 if " +
"your library does not support PKCS#1 padding.")
return
print PacketFormatter.bytes(secret, "Shared secret: ", t.bold)
if len(secret) != 16:
print t.bold_red("\nError:"),
print "The shared secret must be 16 bytes long",
print "(received length is %s)" % len(secret)
return
print t.bold_cyan("\nAuthentication")
print PacketFormatter.bytes(server_id, "Server ID: ", t.bold)
print PacketFormatter.bytes(secret, "Shared secret: ", t.bold)
print PacketFormatter.bytes(encryption.encode_public_key(key),
"Public key: ", t.bold)
print t.bold("Login hash: "),
print Authenticator.login_hash(server_id, secret, key)
if Authenticator.check_player(username, server_id, secret, key):
print t.bold_green("Success:"), "You are authenticated"
else:
print t.bold_yellow("Warning:"), "You are not authenticated"
send_packet(sock, srv_spec, {'msgtype': 0xfc,
'challenge_token': '',
'shared_secret': ''})
print t.bold("\nStarting AES encryption")
clt_cipher = encryption.AES128CFB8(secret)
srv_cipher = encryption.AES128CFB8(secret)
backup_cipher = encryption.AES128CFB8(secret)
parse_packet(sock, clt_spec, 0xcd, clt_cipher, backup_cipher)
send_packet(sock, srv_spec, {'msgtype': 0x01,
'eid': 1337,
'level_type': 'flat',
'server_mode': 0,
'dimension': 0,
'difficulty': 2,
'unused': 0,
'max_players': 20}, srv_cipher)
if self.send_chunks:
while True:
print
packet = parse_packet(sock, clt_spec, cipher=clt_cipher)
if packet['msgtype'] == 0x0d:
break
x, y, z = 5, 9, 5
send_packet(sock, srv_spec, {'msgtype': 0x06,
'x': x,
'y': y,
'z': z}, srv_cipher)
send_packet(sock, srv_spec, {'msgtype': 0xca,
'abilities': 0b0100,
'walking_speed': 25,
'flying_speed': 12}, srv_cipher)
send_packet(sock, srv_spec, {'msgtype': 0x04,
'time': 0}, srv_cipher)
send_packet(sock, srv_spec, multi_chunk_packet(), srv_cipher)
send_packet(sock, srv_spec, {'msgtype': 0x0d,
'x': x,
'y': y,
'stance': y + 1.5,
'z': z,
'yaw': 0,
'pitch': 0,
'on_ground': False}, srv_cipher)
buffer = StringSocket()
send_packet(buffer, srv_spec,
{'msgtype': 0x03,
'chat_msg': 'First message'},
srv_cipher)
send_packet(buffer, srv_spec,
{'msgtype': 0x03,
'chat_msg': 'Second message'}, srv_cipher)
sock.sendall(buffer.data)
if self.stay_connected:
while True:
packet = parse_packet(sock, clt_spec, cipher=clt_cipher,
title=True)
if packet['msgtype'] == 0xff:
break
elif packet['msgtype'] == 0x00:
send_packet(buffer, srv_spec, {'msgtype': 0x00,
'id': 0}, srv_cipher)
break
else:
send_packet(sock, srv_spec,
{'msgtype': 0xff,
'reason': "Successfully logged in"}, srv_cipher)
def parse_packet(sock, msg_spec, expecting=None,
cipher=None, backup_cipher=None, title=False):
if expecting is not None:
packet = msg_spec[expecting]
print t.bold_cyan("\nExpecting %s (0x%02x) packet" % (packet.name,
expecting))
if title and is_terminal:
sys.stdout.write(t.move_down * 2)
elif title:
print
if cipher is None:
stream = LoggingSocketStream(sock)
else:
stream = LoggingSocketCipherStream(sock, cipher)
with stream:
msgtype = parse_unsigned_byte(stream)
if expecting is not None and msgtype != expecting:
if backup_cipher is None:
raise UnexpectedPacketException(msgtype)
else:
raise UnexpectedPacketException(
msgtype, ord(backup_cipher.encrypt(chr(expecting)))
)
if not msg_spec[msgtype]:
raise UnsupportedPacketException(msgtype)
msg_parser = msg_spec[msgtype]
msg = msg_parser.parse(stream)
if title:
if is_terminal:
sys.stdout.write(t.move_up * 3)
sys.stdout.write(t.bold_cyan("Received %s (0x%02x) packet" %
(msg_parser.name, msgtype)))
if is_terminal:
sys.stdout.write(t.move_down * 3)
else:
print
PacketFormatter.print_packet(msg)
if backup_cipher is not None:
backup_cipher.encrypt(msg_parser.emit(msg))
return msg
def send_packet(sock, msg_spec, msg, cipher=None):
packet = msg_spec[msg['msgtype']]
msgbytes = packet.emit(msg)
print t.bold_cyan("\nSending %s (0x%02x) packet" % (packet.name,
msg['msgtype']))
if cipher is None:
print t.bold("Raw bytes:"), ''.join("%02x " % ord(c) for c in msgbytes)
else:
print t.bold("Raw bytes: "),
print ''.join("%02x " % ord(c) for c in msgbytes)
msgbytes = cipher.encrypt(msgbytes)
print t.bold("Encrypted bytes:"),
print ''.join("%02x " % ord(c) for c in msgbytes)
PacketFormatter.print_packet(msg)
sock.sendall(msgbytes)
def multi_chunk_packet(radius=4):
d = 1 + 2 * radius
data = compress("".join(random_chunk() for i in range(d ** 2)))
meta = [{'x': i / d - radius,
'z': i % d - radius,
'bitmap': 1,
'add_bitmap': 0} for i in range(d ** 2)]
return {'msgtype': 0x38,
'chunks': {'data': data, 'metadata': meta}}
def random_chunk():
block_ids = [chr(x) for x in range(1, 6)]
blocks = "".join(choice(block_ids) for i in range(16 * 16 * 8))
blocks += '\x00' * 8 * 16 * 16
meta = '\x00' * 16 * 16 * 8
light = '\x00' * 12 * 16 * 16 + '\xff' * 4 * 16 * 16
biome = '\x01' * 16 * 16
return blocks + meta + light + biome
def flat_chunk():
blocks = '\x07' * 16 * 16
blocks += '\x03' * 2 * 16 * 16
blocks += '\x02' * 16 * 16
blocks += '\x00' * 12 * 16 * 16
meta = '\x00' * 16 * 16 * 8
light = '\x00' * 10 * 16 * 16 + '\xff' * 6 * 16 * 16
biome = '\x01' * 16 * 16
return blocks + meta + light + biome
class UnexpectedPacketException(Exception):
def __init__(self, id_, encrypted_id=None):
self.id_ = id_
self.encrypted_id = encrypted_id
class EOFException(Exception):
pass
class StringSocket(object):
def __init__(self):
self.data = ""
def send(self, data):
self.data += data
def sendall(self, data):
self.send(data)
if __name__ == "__main__":
raise NotImplementedError
(port, opts) = parse_args()
try:
Server(port, opts.send_chunks, opts.stay_connected).start()
except KeyboardInterrupt:
pass
| sadimusi/mc4p | mc4p/debug.py | Python | gpl-2.0 | 22,431 |
# Lint as: python2, python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python TFLite metrics helper."""
from typing import Optional, Text
from tensorflow.python.eager import monitoring
class TFLiteMetrics(object):
"""TFLite metrics helper for prod (borg) environment.
Attributes:
md5: A string containing the MD5 hash of the model binary.
model_path: A string containing the path of the model for debugging
purposes.
"""
_counter_interpreter_creation = monitoring.Counter(
'/tensorflow/lite/interpreter/created',
'Counter for number of interpreter created in Python.', 'python')
def __init__(self,
md5: Optional[Text] = None,
model_path: Optional[Text] = None) -> None:
del self # Temporarily removing self until parameter logic is implemented.
if md5 and not model_path or not md5 and model_path:
raise ValueError('Both model metadata(md5, model_path) should be given '
'at the same time.')
if md5:
# TODO(b/180400857): Create stub once the service is implemented.
pass
def increase_counter_interpreter_creation(self):
self._counter_interpreter_creation.get_cell('python').increase_by(1)
| annarev/tensorflow | tensorflow/lite/python/metrics_nonportable.py | Python | apache-2.0 | 1,871 |
import logging
from lml.plugin import PluginInfo
from moban import constants
LOG = logging.getLogger(__name__)
class ContentProcessor(PluginInfo):
"""
@ContentProcessor('strip', 'Stripping', 'Stripped'):
def strip(template_file: str) -> str:
ret = template_file.strip()
return ret
"""
def __init__(self, action, action_continuing_tense, action_past_tense):
super(ContentProcessor, self).__init__(
constants.TEMPLATE_ENGINE_EXTENSION, tags=[action]
)
self.action = action
self.action_continuing_tense = action_continuing_tense
self.action_past_tense = action_continuing_tense
def __call__(self, a_content_processor_function):
continuing_tense = self.action_continuing_tense
past_tense = self.action_past_tense
class CustomEngine(object):
ACTION_IN_PRESENT_CONTINUOUS_TENSE = continuing_tense
ACTION_IN_PAST_TENSE = past_tense
def __init__(self, template_fs, options=None):
self.template_fs = template_fs
self.options = options
def get_template(self, template_file):
content = self.template_fs.readbytes(template_file)
return content
def get_template_from_string(self, a_string):
return a_string
def apply_template(self, template, *_):
ret = a_content_processor_function(template, self.options)
return ret
super(ContentProcessor, self).__call__(CustomEngine)
return a_content_processor_function
| chfw/moban | moban/core/content_processor.py | Python | mit | 1,622 |
# https://oj.leetcode.com/problems/remove-element/
# 12:38 - 12:44
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
aLen = len(A)
if aLen > 0:
i = 0
while i < aLen:
if A[i] == elem:
A[i] = A[aLen-1]
aLen -= 1
continue
i += 1
return aLen
s = Solution()
print s.removeElement([], 2)
print s.removeElement([2,2,2], 2)
print s.removeElement([1,2,3,2,4,2], 2)
| yaoxuanw007/forfun | leetcode/python/removeElement.py | Python | mit | 555 |
from bdnyc_app import app_bdnyc
import os
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app_bdnyc.run(host='0.0.0.0', port=port, debug=False) | dr-rodriguez/AstrodbWeb | runapp.py | Python | mit | 172 |
#!/usr/bin/env python3
# Copyright 2022 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Rebuilds PNG files intended for use as icon images on the Mac. It is
# reasonably robust, but is not designed to be robust against adversarially-
# constructed files.
#
# This is an opinionated script and makes assumptions about the desired
# characteristics of a PNG file for use as a Mac icon. All users of this script
# must verify that those are the correct assumptions for their use case before
# using it.
import argparse
import binascii
import os
import struct
import sys
_PNG_MAGIC = b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
_CHUNK_HEADER_STRUCT = struct.Struct('>I4s')
_CHUNK_CRC_STRUCT = struct.Struct('>I')
class FormatError(Exception):
pass
def _process_path(path):
with open(path, 'r+b') as file:
return _process_file(file)
def _process_file(file):
magic = file.read(len(_PNG_MAGIC))
if magic != _PNG_MAGIC:
raise FormatError(file, file.tell(), 'bad magic', magic, _PNG_MAGIC)
chunks = {}
while True:
chunk_header = file.read(_CHUNK_HEADER_STRUCT.size)
(chunk_length, chunk_type) = _CHUNK_HEADER_STRUCT.unpack(chunk_header)
chunk = chunk_header + file.read(chunk_length + _CHUNK_CRC_STRUCT.size)
if chunk_type in chunks:
raise FormatError(file, file.tell(), 'duplicate chunk', chunk_type)
chunks[chunk_type] = chunk
if chunk_type == b'IEND':
break
eof = file.read(1)
if len(eof) != 0:
raise FormatError(file, '\'IEND\' chunk not at end')
ihdr = chunks[b'IHDR'][_CHUNK_HEADER_STRUCT.size:-_CHUNK_CRC_STRUCT.size]
(ihdr_width, ihdr_height, ihdr_bit_depth, ihdr_color_type,
ihdr_compression_method, ihdr_filter_method,
ihdr_interlace_method) = struct.unpack('>2I5b', ihdr)
# The only two color types that have transparency and can be used for icons
# are types 3 (indexed) and 6 (direct RGBA).
if ihdr_color_type not in (3, 6):
raise FormatError(file, 'disallowed color type', color_type)
if ihdr_color_type == 3 and b'PLTE' not in chunks:
raise FormatError(file, 'indexed color requires \'PLTE\' chunk')
if ihdr_color_type == 3 and b'tRNS' not in chunks:
raise FormatError(file, 'indexed color requires \'tRNS\' chunk')
if b'IDAT' not in chunks:
raise FormatError(file, 'missing \'IDAT\' chunk')
if b'iCCP' in chunks:
raise FormatError(file, 'disallowed color profile; sRGB only')
if b'sRGB' not in chunks:
# Note that a value of 0 is a perceptual rendering intent (e.g.
# photographs) while a value of 1 is a relative colorimetric rendering
# intent (e.g. icons). Every macOS icon that has an 'sRGB' chunk uses 0
# so that is what is used here. Please forgive us, UX.
#
# Reference:
# http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.sRGB
srgb_chunk_type = struct.pack('>4s', b'sRGB')
srgb_chunk_data = struct.pack('>b', 0) # Perceptual
srgb_chunk_length = struct.pack('>I', len(srgb_chunk_data))
srgb_chunk_crc = struct.pack(
'>I', binascii.crc32(srgb_chunk_type + srgb_chunk_data))
chunks[b'sRGB'] = (
srgb_chunk_length + srgb_chunk_type + srgb_chunk_data +
srgb_chunk_crc)
file.seek(len(_PNG_MAGIC), os.SEEK_SET)
file.write(chunks[b'IHDR'])
file.write(chunks[b'sRGB'])
if ihdr_color_type == 3:
file.write(chunks[b'PLTE'])
file.write(chunks[b'tRNS'])
file.write(chunks[b'IDAT'])
file.write(chunks[b'IEND'])
file.truncate()
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('paths', nargs='+', metavar='path')
parsed = parser.parse_args(args)
for path in parsed.paths:
print(path)
_process_path(path)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| chromium/chromium | tools/mac/icons/png_fix.py | Python | bsd-3-clause | 4,042 |
"""
Exercise the Website class to insure the methods operate
as expected.
"""
# pylint: disable=C0301,W0622
import unittest
if __name__ == "__main__" and __package__ is None:
__package__ = "apiary.tests"
from apiary.tasks.bot.notify_segment import NotifySegment
class TestNotifySegment(unittest.TestCase):
"""Run some tests."""
def get_websites_to_notify(self):
"""This gets websites from the segment that WikiApiary is in."""
curr_day = 4
curr_hour = 18
task = NotifySegment()
sites = task.get_notify_sites(curr_day, curr_hour)
if 'WikiApiary' not in sites:
raise Exception('WikiApiary not in list of sites returned.')
if __name__ == '__main__':
unittest.main()
| apexkid/Wikiapiary | apiary/tests/bot/notify_segment_tests.py | Python | gpl-2.0 | 746 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 DeneroTeam. (<http://www.deneroteam.com>)
# Copyright (C) 2011 Didotech Inc. (<http://www.didotech.com>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| dhp-denero/LibrERP | product_catalog_extend_livelucky/__init__.py | Python | agpl-3.0 | 1,117 |
#coding=utf-8
# Copyright (C) 2014 by Víctor Romero Blanco <info at playcircular dot com>.
# http://playcircular.com/
# It's licensed under the AFFERO GENERAL PUBLIC LICENSE unless stated otherwise.
# You can get copies of the licenses here: http://www.affero.org/oagpl.html
# AFFERO GENERAL PUBLIC LICENSE is also included in the file called "LICENSE".
from django.contrib import admin
from django.conf import settings
from settings import MEDIA_ROOT, STATIC_ROOT
from django.utils.translation import ugettext as _
from django.contrib.contenttypes import generic
from django.conf.urls import patterns, include, url
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from sorl.thumbnail import default
from sorl.thumbnail.admin import AdminImageMixin
from paginas.models import *
from paginas.forms import *
from mptt.admin import MPTTModelAdmin
# Si utilizo el DjangoMpttAdmin no funciona el def queryset
from django_mptt_admin.admin import DjangoMpttAdmin
from tinymce import models as tinymce_models
from tinymce.models import HTMLField
from django.db.models import Q
from django.db.utils import DatabaseError,IntegrityError
ADMIN_THUMBS_SIZE = '80x30'
############################################################################################################################
class Fotos_Entrada_Inline(admin.TabularInline):
model = Fotos_entrada
extra = 2
max_num = 14
verbose_name = _(u'foto')
############################################################################################################################
class Fotos_Pagina_Inline(admin.TabularInline):
model = Fotos_pagina
extra = 2
max_num = 14
verbose_name = _(u'foto')
############################################################################################################################
class MetatagInline(generic.GenericStackedInline):
model = Metatag
extra = 1
max_num = 1
verbose_name = "SEO"
############################################################################################################################
class Idiomas_Categoria_Entrada_Inline(admin.StackedInline):
model = Idiomas_categoria_entrada
formset = Idioma_requerido_formset
prepopulated_fields = {"slug": ("nombre",)}
extra = 1
max_num = 5
def get_extra(self, request, obj=None, **kwargs):
extra = 1
if obj:
extra = 0
return extra
return extra
verbose_name = _(u'idioma de categoria')
verbose_name_plural = _(u'idiomas de categorias')
############################################################################################################################
class Idiomas_Pagina_Inline(admin.StackedInline):
model = Idiomas_pagina
formset = Idioma_requerido_formset
prepopulated_fields = {"slug": ("titulo",)}
extra = 1
max_num = 5
def get_extra(self, request, obj=None, **kwargs):
extra = 1
if obj:
extra = 0
return extra
return extra
verbose_name = _(u'idioma de la pagina')
verbose_name_plural = _(u'idiomas')
############################################################################################################################
class Idiomas_Entrada_Inline(admin.StackedInline):
model = Idiomas_entrada
formset = Idioma_requerido_formset
prepopulated_fields = {"slug": ("titulo",)}
extra = 1
max_num = 5
def get_extra(self, request, obj=None, **kwargs):
extra = 1
if obj:
extra = 0
return extra
return extra
verbose_name = _(u'idioma de la entrada')
verbose_name_plural = _(u'idiomas')
############################################################################################################################
class Categoria_Entrada_Admin(admin.ModelAdmin):
list_display = ('nombre_de_categoria','usuario','grupos','idiomas','creada','creada_por','modificada','modificada_por')
search_fields = ['nombre']
form = Form_Categoria_Entrada_Admin
inlines = [
Idiomas_Categoria_Entrada_Inline,
]
#Esto es para que funcione el Form_Categoria_Entrada_Admin. Para pasarle el request
def get_form(self, request, obj=None, **kwargs):
self.exclude = []
if not request.user.is_superuser:
self.exclude.append('superadmin')
AdminForm = super(Categoria_Entrada_Admin, self).get_form(request, obj, **kwargs)
class ModelFormMetaClass(AdminForm):
def __new__(cls, *args, **kwargs):
kwargs['request'] = request
kwargs['user'] = request.user
return AdminForm(*args, **kwargs)
return ModelFormMetaClass
def queryset(self, request):
qs = super(Categoria_Entrada_Admin, self).queryset(request)
#Si es superusuario lo ve todo
if request.user.is_superuser:
return qs
else:
# Si no es superusuario pero es administrador de grupo, ve todos los de su grupo.
grupos_administrados = Miembro.objects.filter(usuario=request.user,nivel=u'Administrador').values_list('grupo', flat=True).order_by('grupo')
if len(grupos_administrados) > 0:
return qs.filter(grupo__in=grupos_administrados,superadmin=False)
else:
#Y si no ve solo lo suyo
return qs.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
try:
c = Categoria_Entrada.objects.get(pk=obj.pk)
except Categoria_Entrada.DoesNotExist:
obj.usuario = request.user
obj.creada_por = request.user
obj.modificada_por = request.user
obj.save()
############################################################################################################################
class Entrada_Admin(admin.ModelAdmin):
list_display = ('usuario','_titulo','tipo','grupos','idiomas','visibilidad','estado','comentarios','creada','creada_por','modificada','modificada_por')
list_filter = ('visibilidad','estado')
search_fields = ['usuario','Idiomas_entrada__titulo']
filter_horizontal = ['categoria','entradas_relacionadas']
form = Form_Entrada_Admin
change_form_template = 'admin/paginas/entrada/change_form.html'
inlines = [
Idiomas_Entrada_Inline,
Fotos_Entrada_Inline,
]
#Esto es para que funcione el Form_Entrada_Admin. Para pasarle el request
def get_form(self, request, obj=None, **kwargs):
self.exclude = []
if not request.user.is_superuser:
self.exclude.append('superadmin')
AdminForm = super(Entrada_Admin, self).get_form(request, obj, **kwargs)
class ModelFormMetaClass(AdminForm):
def __new__(cls, *args, **kwargs):
kwargs['request'] = request
kwargs['user'] = request.user
return AdminForm(*args, **kwargs)
return ModelFormMetaClass
def queryset(self, request):
qs = super(Entrada_Admin, self).queryset(request)
#Si es superusuario lo ve todo
if request.user.is_superuser:
return qs
else:
# Si no es superusuario pero es administrador de grupo, ve todos los de su grupo.
grupos_administrados = Miembro.objects.filter(usuario=request.user,nivel=u'Administrador').values_list('grupo', flat=True).order_by('grupo')
if len(grupos_administrados) > 0:
return qs.filter(grupo__in=grupos_administrados,superadmin=False)
else:
#Y si no ve solo lo suyo
return qs.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
try:
c = Entrada.objects.get(pk=obj.pk)
except Entrada.DoesNotExist:
#obj.usuario = request.user
obj.creada_por = request.user
obj.modificada_por = request.user
obj.save()
############################################################################################################################
class Pagina_Admin(DjangoMpttAdmin):
list_display = ('_titulo','parent','usuario','grupos','idiomas','tipo','visibilidad','en_menu','estado','comentarios','creada','creada_por','modificada','modificada_por')
form = Form_Pagina_Admin
change_form_template = 'admin/paginas/pagina/change_form.html'
list_filter = ('tipo','estado')
search_fields = ['usuario']
inlines = [
Idiomas_Pagina_Inline,
Fotos_Pagina_Inline,
]
#Esto es para que funcione el Form_Pagina_Admin. Para pasarle el request
def get_form(self, request, obj=None, **kwargs):
self.exclude = []
if not request.user.is_superuser:
self.exclude.append('tipo')
self.exclude.append('superadmin')
AdminForm = super(Pagina_Admin, self).get_form(request, obj, **kwargs)
class ModelFormMetaClass(AdminForm):
def __new__(cls, *args, **kwargs):
kwargs['request'] = request
kwargs['user'] = request.user
return AdminForm(*args, **kwargs)
return ModelFormMetaClass
def queryset(self, request):
qs = super(Pagina_Admin, self).queryset(request)
#Si es superusuario lo ve todo
if request.user.is_superuser:
return qs
else:
# Si no es superusuario pero es administrador de grupo, ve todos los de su grupo.
grupos_administrados = Miembro.objects.filter(usuario=request.user,nivel=u'Administrador').values_list('grupo', flat=True).order_by('grupo')
if len(grupos_administrados) > 0:
return qs.filter(grupo__in=grupos_administrados,superadmin=False)
else:
#Un usuario normal no ve páginas
return qs.none()
def save_model(self, request, obj, form, change):
try:
c = Pagina.objects.get(pk=obj.pk)
except Pagina.DoesNotExist:
obj.usuario = request.user
obj.creada_por = request.user
obj.modificada_por = request.user
obj.save()
############################################################################################################################
class Banner_Admin(admin.ModelAdmin):
list_display = ('thumb_banner','titulo','grupos','url','tipo','dimensiones','visibilidad','activo','orden','creado','creado_por','modificado','modificado_por')
form = Form_Banner_Admin
list_filter = ('tipo','visibilidad')
def thumb_banner(self, obj):
thumb = default.backend.get_thumbnail(obj.banner.file, ADMIN_THUMBS_SIZE)
return u'<img width="%s" src="%s" />' % (thumb.width, thumb.url)
thumb_banner.short_description = 'Banner'
thumb_banner.allow_tags = True
#Esto es para que funcione el Form_Banner_Admin. Para pasarle el request
def get_form(self, request, obj=None, **kwargs):
self.exclude = []
if not request.user.is_superuser:
self.exclude.append('superadmin')
AdminForm = super(Banner_Admin, self).get_form(request, obj, **kwargs)
class ModelFormMetaClass(AdminForm):
def __new__(cls, *args, **kwargs):
kwargs['request'] = request
kwargs['user'] = request.user
return AdminForm(*args, **kwargs)
return ModelFormMetaClass
def queryset(self, request):
qs = super(Banner_Admin, self).queryset(request)
#Si es superusuario lo ve todo
if request.user.is_superuser:
return qs
else:
# Si no es superusuario pero es administrador de grupo, ve todos los de su grupo.
grupos_administrados = Miembro.objects.filter(usuario=request.user,nivel=u'Administrador').values_list('grupo', flat=True).order_by('grupo')
if len(grupos_administrados) > 0:
return qs.filter(grupo__in=grupos_administrados,superadmin=False)
else:
#El usuario normal no ve banners
return qs.none()
def save_model(self, request, obj, form, change):
try:
b = Banner.objects.get(pk=obj.pk)
except Banner.DoesNotExist:
obj.usuario = request.user
obj.creado_por = request.user
obj.modificado_por = request.user
obj.save()
############################################################################################################################
class Comentarios_Admin(admin.ModelAdmin):
list_display = ('perfil','grupos','content_type','creado','creado_por','modificado','modificado_por')
search_fields = ['perfil']
readonly_fields = ('perfil','grupo','content_type','parent','object_id')
def queryset(self, request):
qs = super(Comentarios_Admin, self).queryset(request)
#Si es superusuario lo ve todo
if request.user.is_superuser:
return qs
else:
# Si no es superusuario pero es administrador de grupo, ve todos los de su grupo.
grupos_administrados = Miembro.objects.filter(usuario=request.user,nivel=u'Administrador').values_list('grupo', flat=True).order_by('grupo')
if len(grupos_administrados) > 0:
#miembros_administrados = Miembro.objects.filter(grupo__in=grupos_administrados).values_list('usuario', flat=True).order_by('usuario')
return qs.filter(grupo__in=grupos_administrados)
else:
#Y si no ve solo lo suyo
return qs.filter(pagina__usuario=request.user)
############################################################################################################################
admin.site.register(Categoria_Entrada,Categoria_Entrada_Admin)
admin.site.register(Entrada,Entrada_Admin)
admin.site.register(Pagina,Pagina_Admin)
admin.site.register(Comentario,Comentarios_Admin)
admin.site.register(Banner,Banner_Admin)
| PlayCircular/play_circular | apps/paginas/admin.py | Python | agpl-3.0 | 12,654 |
# Copyright 2008 Felix Marczinowski <[email protected]>
#
# This file is part of PySXM.
#
# PySXM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySXM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySXM. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import scipy
import Data
def __parline(name,value):
"""Format name and value as a parfile line
Returns a string"""
return name.ljust(32) + ': ' + value + '\n'
def write(filename,pages):
"""Write the topography and current images in 'pages' to Omikron-style files"""
root, ext = os.path.splitext(filename)
path, base = os.path.split(root)
print 'opening ' + root + '.par' + ' for writing'
parfile = open(root + '.par','w')
for firstimage in pages:
if firstimage['type'] == 0:
break
parfile.write(';\n; Omicron SPM Control.\n; Parameter file for SPM data.\n;\n\n')
parfile.write('Format : 1\n\n')
parfile.write('Version : V2.2\n')
parfile.write('System : SCALA\n\n')
parfile.write('\n;\n; User Information.\n;\n\n')
date = str.split(firstimage['strings']['date'],'/')
parfile.write(__parline('Date',str.join('.',(date[1],date[0],date[2])) + ' ' + firstimage['strings']['time']))
parfile.write(__parline('User','spm'))
parfile.write(__parline('Comment',firstimage['strings']['user_text']))
parfile.write('\n;\n; Scanner Description.\n;\n\n')
parfile.write(__parline('Name','Converted from SM3-file'))
parfile.write(__parline('VGAP Contact','SAMPLE'))
parfile.write('\n;\n; Scanner Area Description.\n;\n\n')
parfile.write(__parline('Field X Size in nm',str(firstimage['x_size']*firstimage['x_scale']*1000000000.0) + ' ;[nm]'))
parfile.write(__parline('Field Y Size in nm',str(firstimage['y_size']*firstimage['y_scale']*1000000000.0) + ' ;[nm]'))
parfile.write(__parline('Image Size in X',str(firstimage['x_size'])))
parfile.write(__parline('Image Size in Y',str(firstimage['y_size'])))
parfile.write(__parline('Increment X',str(firstimage['x_scale']*1000000000.0) + ' ;[nm]'))
parfile.write(__parline('Increment Y',str(firstimage['y_scale']*1000000000.0) + ' ;[nm]'))
parfile.write(__parline('Scan Angle',str(firstimage['angle']) + ' ;[Degree]'))
parfile.write(__parline('X Offset',str(firstimage['x_offset']*1000000000.0) + ' ;[nm]'))
parfile.write(__parline('Y Offset',str(firstimage['y_offset']*1000000000.0) + ' ;[nm]'))
parfile.write('\n;\n; Topographic Channels.\n;\n\n')
for page in pages:
if page['type'] == 0: # image
if page['page_type'] == 1: # topography
displayname = 'Z'
physunit = 'nm'
extnum = '0'
if page['page_type'] == 2: # current
displayname = 'Ext 1'
physunit = 'V'
extnum = '1'
# RHK(right,down) -> Omikron(forward)
if (page['scan'] == 0) or (page['scan'] == 2):
direction = 'Forward'
extletter = 'f'
forwardpage = page
else:
direction = 'Backward'
extletter = 'b'
backwardpage = page
pagebasename = base + '.t' + extletter + extnum
page['basename'] = pagebasename
print 'opening ' + os.path.join(path,pagebasename) + ' for writing'
datafile = open(os.path.join(path,pagebasename),'wb')
data = page['page_data'].astype(scipy.UInt16)
data = data[::-1]
data = data[:, ::-1]
# data = transpose(data)
data.byteswap()
datafile.write(data.astype(scipy.UInt16))
datafile.close()
parfile.write('Topographic Channel'.ljust(32) + ': ' + displayname + ' ;Channel\n')
parfile.write((' ' * 34) + direction + '\n')
parfile.write((' ' * 34) + '-32767 ;Minimum raw value\n')
parfile.write((' ' * 34) + '32766 ;Maximum raw value\n')
parfile.write((' ' * 34) + str(page['z_scale']*1000000000.0*(-32767.0)) + ' ;Minimum value in physical unit\n')
parfile.write((' ' * 34) + str(page['z_scale']*1000000000.0*32766.0) + ' ;Maximum value in physical unit\n')
parfile.write((' ' * 34) + str(page['z_scale']*1000000000.0) + ' ;Resolution\n')
parfile.write((' ' * 34) + physunit + ' ;Physical unit\n')
parfile.write((' ' * 34) + pagebasename + ' ;Filename\n')
parfile.write((' ' * 34) + displayname.upper() + ' ;Display name\n')
parfile.write('\n')
parfile.write('\n;\n; Measurement parameters.\n;\n\n')
parfile.write(__parline('SPM Method',': STM'))
parfile.write(__parline('Dual mode','On'))
parfile.write(__parline('Delay','30000 ;[us] (Forward)'))
parfile.write((' ' * 34) + '30000 ;[us] (Backward)\n')
parfile.write(__parline('Gap Voltage', str(forwardpage['bias']) + ' ;[V] (Forward)'))
parfile.write((' ' * 34) + str(backwardpage['bias']) + ' ;[V] (Forward)\n')
parfile.write(__parline('Feedback Set', str(forwardpage['current']*1000000000.0) + ' ;[nA] (Forward)'))
parfile.write((' ' * 34) + str(backwardpage['current']*1000000000.0) + ' ;[nA] (Backward)\n')
parfile.write(__parline('Loog Gain','1.50000 ;[%] (Forward)'))
parfile.write((' ' * 34) + '1.50000 ;[%] (Backward)\n')
parfile.write('X Resolution : 0 ;Currently not used.\n')
parfile.write('Y Resolution : 0 ;Currently not used.\n')
parfile.write(__parline('Scan Speed', str(forwardpage['x_scale']*1000000000.0/forwardpage['period']) + ' ;[nm/s]'))
parfile.write('X Drift : 0.00000 ;[nm/s]\n')
parfile.write('Y Drift : 0.00000 ;[nm/s]\n')
parfile.write('Scan Mode : Frame\n')
parfile.write('Topography Time per Point : 0\n')
parfile.write('Spectroscopy Grid Value in X : 1\n')
parfile.write('Spectroscopy Grid Value in Y : 1\n')
parfile.write('Spectroscopy Points in X : 400\n')
parfile.write('Spectroscopy Lines in Y : 0\n')
parfile.write('\n')
parfile.write('\n;\n; Z Control Parameters.\n;\n\n')
parfile.write('Z Speed : 1000.00 ;[nm/s]\n')
parfile.write('Z Output Gain : 0.0100000\n')
parfile.write('Automatic Z zero : On')
parfile.write('Z Input Gain : 1.0000\n')
parfile.close()
def readparfile(fname):
parfile = open(fname,'r')
common = {}
channels = []
re_emptyline = re.compile(r'^\s*$')
re_channelstart = re.compile(r'^Topographic Channel')
re_comment = re.compile(r'^Comment')
lineno = 0
line = parfile.readline()
while len(line) > 0:
line = line.split(';')[0] # use only the part before the first ';'
if not (re_emptyline.match(line)):
if re_channelstart.match(line):
channel = {}
channel['channel'] = line.split(':')[1].strip()
channel['direction'] = parfile.readline().split(';')[0].strip()
channel['minraw'] = parfile.readline().split(';')[0].strip()
channel['maxraw'] = parfile.readline().split(';')[0].strip()
channel['minphys'] = parfile.readline().split(';')[0].strip()
channel['maxphys'] = parfile.readline().split(';')[0].strip()
channel['res'] = parfile.readline().split(';')[0].strip()
channel['unit'] = parfile.readline().split(';')[0].strip()
channel['file'] = parfile.readline().split(';')[0].strip()
channel['name'] = parfile.readline().split(';')[0].strip()
channels.append(channel)
else:
if re_comment.match(line):
comment = line.split(':')[1].strip() + '\n'
line = parfile.readline().split(';')[0]
while len(line) > 0:
comment += line + '\n'
line = parfile.readline().split(';')[0].strip()
else:
(name, value) = line.split(':',1)
name = name.strip()
value = value.strip()
common[name] = value
if name in ('Delay','Gap Voltage','Feedback Set','Loop Gain'):
if common.has_key('Dual mode') and (common['Dual mode'] == 'On'):
line = parfile.readline().split(';')[0].strip()
common[name] = (common[name], line)
line = parfile.readline()
return (common,channels)
def pickValueForMode(com, chan, field):
if com['Dual mode'] == 'On':
if chan['direction'] == 'Forward':
return com[field][0]
else:
return com[field][1]
else:
return com[field]
def readdatafile(fname):
root, ext = os.path.splitext(fname)
path, base = os.path.split(root)
datafilename = base + ext
parfile = root + '.par'
(common, channels) = readparfile(parfile)
channelindex = [n for n,d in enumerate(channels) if d['file'] == datafilename][0]
thechannel = channels[channelindex]
i = Data.Image()
i.Name = datafilename
i.ImageType = 'Topo'
i.XPos = float(common['X Offset'])
i.YPos = float(common['Y Offset'])
i.XSize = float(common['Field X Size in nm'])
i.YSize = float(common['Field Y Size in nm'])
i.XRes = int(common['Image Size in X'])
i.YRes = int(common['Image Size in Y'])
i.ZScale = (float(thechannel['maxphys']) - float(thechannel['minphys'])) / \
(float(thechannel['maxraw']) - float(thechannel['minraw']))
i.UBias = float(pickValueForMode(common,thechannel,'Gap Voltage'))
i.ISet = float(pickValueForMode(common,thechannel,'Feedback Set'))
i.ScanSpeed = float(common['Scan Speed'])
i.d = scipy.fromfile(file=fname,dtype=scipy.int16)
i.d = i.d.byteswap()
i.d.shape = i.XRes, i.YRes
i.updateDataRange()
return i
| fmarczin/pysxm | SXM/Omikron.py | Python | gpl-3.0 | 10,263 |
import sys
import numpy as np
#from ase import Atoms
from ase.units import Bohr
from ase.dft.kpoints import kpoint_convert
from gpaw import debug
from gpaw.mpi import world, distribute_cpus
from gpaw.utilities import gcd
#from gpaw.utilities.tools import md5_array
from gpaw.utilities.gauss import gaussian_wave
from gpaw.band_descriptor import BandDescriptor
from gpaw.grid_descriptor import GridDescriptor
from gpaw.kpt_descriptor import KPointDescriptorOld as KPointDescriptor
#from gpaw.test.ut_common import create_parsize_maxbands
from gpaw.paw import kpts2ndarray
#from gpaw.brillouin import reduce_kpoints
from gpaw.parameters import InputParameters
#from gpaw.xc import XC
from gpaw.setup import Setups
from gpaw.utilities.gauss import gaussian_wave
from gpaw.fd_operators import Laplace
# -------------------------------------------------------------------
from gpaw.test.ut_common import ase_svnversion, shapeopt, TestCase, \
TextTestRunner, CustomTextTestRunner, defaultTestLoader, \
initialTestLoader, create_random_atoms, create_parsize_minbands
# -------------------------------------------------------------------
class UTDomainParallelSetup(TestCase):
"""
Setup a simple domain parallel calculation."""
# Number of bands
nbands = 1
# Spin-paired
nspins = 1
# Mean spacing and number of grid points per axis
h = 0.2 / Bohr
# Generic lattice constant for unit cell
a = 5.0 / Bohr
# Type of boundary conditions employed
boundaries = None
# Type of unit cell employed
celltype = None
def setUp(self):
for virtvar in ['boundaries', 'celltype']:
assert getattr(self,virtvar) is not None, 'Virtual "%s"!' % virtvar
# Basic unit cell information:
pbc_c = {'zero' : (False,False,False), \
'periodic': (True,True,True), \
'mixed' : (True, False, True)}[self.boundaries]
a, b = self.a, 2**0.5*self.a
cell_cv = {'general' : np.array([[0,a,a],[a/2,0,a/2],[a/2,a/2,0]]),
'rotated' : np.array([[0,0,b],[b/2,0,0],[0,b/2,0]]),
'inverted' : np.array([[0,0,b],[0,b/2,0],[b/2,0,0]]),
'orthogonal': np.diag([b, b/2, b/2])}[self.celltype]
cell_cv = np.array([(4-3*pbc)*c_v for pbc,c_v in zip(pbc_c, cell_cv)])
# Decide how many kpoints to sample from the 1st Brillouin Zone
kpts_c = np.ceil((10/Bohr)/np.sum(cell_cv**2,axis=1)**0.5).astype(int)
kpts_c = tuple(kpts_c*pbc_c + 1-pbc_c)
bzk_kc = kpts2ndarray(kpts_c)
self.gamma = len(bzk_kc) == 1 and not bzk_kc[0].any()
#p = InputParameters()
#Z_a = self.atoms.get_atomic_numbers()
#xcfunc = XC(p.xc)
#setups = Setups(Z_a, p.setups, p.basis, p.lmax, xcfunc)
#symmetry, weight_k, self.ibzk_kc = reduce_kpoints(self.atoms, bzk_kc,
# setups, p.usesymm)
self.ibzk_kc = bzk_kc.copy() # don't use symmetry reduction of kpoints
self.nibzkpts = len(self.ibzk_kc)
self.ibzk_kv = kpoint_convert(cell_cv, skpts_kc=self.ibzk_kc)
# Parse parallelization parameters and create suitable communicators.
#parsize_domain, parsize_bands = create_parsize_minbands(self.nbands, world.size)
parsize_domain, parsize_bands = world.size//gcd(world.size, self.nibzkpts), 1
assert self.nbands % np.prod(parsize_bands) == 0
domain_comm, kpt_comm, band_comm = distribute_cpus(parsize_domain,
parsize_bands, self.nspins, self.nibzkpts)
# Set up band descriptor:
self.bd = BandDescriptor(self.nbands, band_comm)
# Set up grid descriptor:
N_c = np.round(np.sum(cell_cv**2, axis=1)**0.5 / self.h)
N_c += 4-N_c % 4 # makes domain decomposition easier
self.gd = GridDescriptor(N_c, cell_cv, pbc_c, domain_comm, parsize_domain)
self.assertEqual(self.gamma, np.all(~self.gd.pbc_c))
# What to do about kpoints?
self.kpt_comm = kpt_comm
if debug and world.rank == 0:
comm_sizes = tuple([comm.size for comm in [world, self.bd.comm, \
self.gd.comm, self.kpt_comm]])
print '%d world, %d band, %d domain, %d kpt' % comm_sizes
def tearDown(self):
del self.ibzk_kc, self.ibzk_kv, self.bd, self.gd, self.kpt_comm
# =================================
def verify_comm_sizes(self):
if world.size == 1:
return
comm_sizes = tuple([comm.size for comm in [world, self.bd.comm, \
self.gd.comm, self.kpt_comm]])
self._parinfo = '%d world, %d band, %d domain, %d kpt' % comm_sizes
self.assertEqual(self.nbands % self.bd.comm.size, 0)
self.assertEqual((self.nspins*self.nibzkpts) % self.kpt_comm.size, 0)
def verify_grid_volume(self):
gdvol = np.prod(self.gd.get_size_of_global_array())*self.gd.dv
self.assertAlmostEqual(self.gd.integrate(1+self.gd.zeros()), gdvol, 10)
def verify_grid_point(self):
# Volume integral of cartesian coordinates of all available grid points
gdvol = np.prod(self.gd.get_size_of_global_array())*self.gd.dv
cmr_v = self.gd.integrate(self.gd.get_grid_point_coordinates()) / gdvol
# Theoretical center of cell based on all available grid data
cm0_v = np.dot((0.5*(self.gd.get_size_of_global_array()-1.0) \
+ 1.0-self.gd.pbc_c) / self.gd.N_c, self.gd.cell_cv)
self.assertAlmostEqual(np.abs(cmr_v-cm0_v).max(), 0, 10)
def verify_non_pbc_spacing(self):
atoms = create_random_atoms(self.gd, 1000, 'NH3', self.a/2)
pos_ac = atoms.get_positions()
cellvol = np.linalg.det(self.gd.cell_cv)
if debug: print 'cell volume:', np.abs(cellvol)*Bohr**3, 'Ang^3', cellvol>0 and '(right handed)' or '(left handed)'
# Loop over non-periodic axes and check minimum distance requirement
for c in np.argwhere(~self.gd.pbc_c).ravel():
a_v = self.gd.cell_cv[(c+1)%3]
b_v = self.gd.cell_cv[(c+2)%3]
c_v = np.cross(a_v, b_v)
for d in range(2):
# Inwards unit normal vector of d'th cell face of c'th axis
# and point intersected by this plane (d=0,1 / bottom,top).
n_v = np.sign(cellvol) * (1-2*d) * c_v / np.linalg.norm(c_v)
if debug: print {0:'x',1:'y',2:'z'}[c]+'-'+{0:'bottom',1:'top'}[d]+':', n_v, 'Bohr'
if debug: print 'gd.xxxiucell_cv[%d]~' % c, self.gd.xxxiucell_cv[c] / np.linalg.norm(self.gd.xxxiucell_cv[c]), 'Bohr'
origin_v = np.dot(d * np.eye(3)[c], self.gd.cell_cv)
d_a = np.dot(pos_ac/Bohr - origin_v[np.newaxis,:], n_v)
if debug: print 'a:', self.a/2*Bohr, 'min:', np.min(d_a)*Bohr, 'max:', np.max(d_a)*Bohr
self.assertAlmostEqual(d_a.min(), self.a/2, 0) #XXX digits!
class UTDomainParallelSetup_GUC(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'mixed'
celltype = 'general'
class UTDomainParallelSetup_Rot(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'mixed'
celltype = 'rotated'
class UTDomainParallelSetup_Inv(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'mixed'
celltype = 'inverted'
class UTDomainParallelSetup_Ortho(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'mixed'
celltype = 'orthogonal'
# -------------------------------------------------------------------
class UTGaussianWavefunctionSetup(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__ + """
The pseudo wavefunctions are moving gaussians centered around each atom."""
allocated = False
dtype = None
def setUp(self):
UTDomainParallelSetup.setUp(self)
for virtvar in ['dtype']:
assert getattr(self,virtvar) is not None, 'Virtual "%s"!' % virtvar
# Set up kpoint descriptor:
self.kd = KPointDescriptor(self.nspins, self.nibzkpts, self.kpt_comm, \
self.gamma, self.dtype)
# Choose a sufficiently small width of gaussian test functions
cell_c = np.sum(self.gd.cell_cv**2, axis=1)**0.5
self.sigma = np.min((0.1+0.4*self.gd.pbc_c)*cell_c)
if debug and world.rank == 0:
print 'sigma=%8.5f Ang' % (self.sigma*Bohr), 'cell_c:', cell_c*Bohr, 'Ang', 'N_c:', self.gd.N_c
self.atoms = create_random_atoms(self.gd, 4, 'H', 4*self.sigma)
self.r_vG = None
self.wf_uG = None
self.laplace0_uG = None
self.allocate()
def tearDown(self):
UTDomainParallelSetup.tearDown(self)
del self.phase_ucd, self.atoms, self.r_vG, self.wf_uG, self.laplace0_uG
self.allocated = False
def allocate(self):
if self.allocated:
raise RuntimeError('Already allocated!')
# Calculate complex phase factors:
self.phase_ucd = np.ones((self.kd.mynks, 3, 2), complex)
if not self.gamma:
for myu, phase_cd in enumerate(self.phase_ucd):
u = self.kd.global_index(myu)
s, k = self.kd.what_is(u)
phase_cd[:] = np.exp(2j * np.pi * self.gd.sdisp_cd * \
self.ibzk_kc[k,:,np.newaxis])
assert self.dtype == complex, 'Complex wavefunctions are required.'
self.r_vG = self.gd.get_grid_point_coordinates()
self.wf_uG = self.gd.zeros(self.kd.mynks, dtype=self.dtype)
self.laplace0_uG = self.gd.zeros(self.kd.mynks, dtype=self.dtype)
buf_G = self.gd.empty(dtype=self.dtype)
sdisp_Ac = []
for a,spos_c in enumerate(self.atoms.get_scaled_positions() % 1.0):
for sdisp_x in range(-1*self.gd.pbc_c[0],self.gd.pbc_c[0]+1):
for sdisp_y in range(-1*self.gd.pbc_c[1],self.gd.pbc_c[1]+1):
for sdisp_z in range(-1*self.gd.pbc_c[2],self.gd.pbc_c[2]+1):
sdisp_c = np.array([sdisp_x, sdisp_y, sdisp_z])
if debug and world.rank == 0:
print 'a=%d, spos=%s, sdisp_c=%s' % (a,spos_c,sdisp_c)
sdisp_Ac.append((a,spos_c,sdisp_c))
for a,spos_c,sdisp_c in sdisp_Ac:
if debug and world.rank == 0:
print 'Adding gaussian at a=%d, spos=%s, sigma=%8.5f Ang' % (a,spos_c+sdisp_c,self.sigma*Bohr)
r0_v = np.dot(spos_c+sdisp_c, self.gd.cell_cv)
for myu in range(self.kd.mynks):
u = self.kd.global_index(myu)
s, k = self.kd.what_is(u)
ibzk_v = self.ibzk_kv[k]
# f(r) = sum_a A exp(-|r-R^a|^2 / 2sigma^2) exp(i k.r)
gaussian_wave(self.r_vG, r0_v, self.sigma, ibzk_v, A=1.0,
dtype=self.dtype, out_G=buf_G)
self.wf_uG[myu] += buf_G
# d^2/dx^2 exp(ikx-(x-x0)^2/2sigma^2)
# ((ik-(x-x0)/sigma^2)^2 - 1/sigma^2) exp(ikx-(x-x0)^2/2sigma^2)
dr2_G = np.sum((1j*ibzk_v[:,np.newaxis,np.newaxis,np.newaxis] \
- (self.r_vG-r0_v[:,np.newaxis,np.newaxis,np.newaxis]) \
/ self.sigma**2)**2, axis=0)
self.laplace0_uG[myu] += (dr2_G - 3/self.sigma**2) * buf_G
self.allocated = True
# =================================
def test_something(self):
laplace_uG = np.empty_like(self.laplace0_uG)
op = Laplace(self.gd, dtype=self.dtype)
for myu, laplace_G in enumerate(laplace_uG):
phase_cd = {float:None, complex:self.phase_ucd[myu]}[self.dtype]
op.apply(self.wf_uG[myu], laplace_G, phase_cd)
print 'myu:', myu, 'diff:', np.std(laplace_G-self.laplace0_uG[myu]), '/', np.abs(laplace_G-self.laplace0_uG[myu]).max()
# -------------------------------------------------------------------
def UTGaussianWavefunctionFactory(boundaries, celltype, dtype):
sep = '_'
classname = 'UTGaussianWavefunctionSetup' \
+ sep + {'zero':'Zero', 'periodic':'Periodic', 'mixed':'Mixed'}[boundaries] \
+ sep + {'general':'GUC', 'rotated':'Rot', 'inverted':'Inv',
'orthogonal':'Ortho'}[celltype] \
+ sep + {float:'Float', complex:'Complex'}[dtype]
class MetaPrototype(UTGaussianWavefunctionSetup, object):
__doc__ = UTGaussianWavefunctionSetup.__doc__
boundaries = boundaries
celltype = celltype
dtype = dtype
MetaPrototype.__name__ = classname
return MetaPrototype
# -------------------------------------------------------------------
if __name__ in ['__main__', '__builtin__']:
# We may have been imported by test.py, if so we should redirect to logfile
if __name__ == '__builtin__':
testrunner = CustomTextTestRunner('ut_gucops.log', verbosity=2)
else:
from gpaw.utilities import devnull
stream = (world.rank == 0) and sys.stdout or devnull
testrunner = TextTestRunner(stream=stream, verbosity=2)
parinfo = []
for test in [UTDomainParallelSetup_GUC, UTDomainParallelSetup_Rot, \
UTDomainParallelSetup_Inv, UTDomainParallelSetup_Ortho]:
info = ['', test.__name__, test.__doc__.strip('\n'), '']
testsuite = initialTestLoader.loadTestsFromTestCase(test)
map(testrunner.stream.writeln, info)
testresult = testrunner.run(testsuite)
assert testresult.wasSuccessful(), 'Initial verification failed!'
parinfo.extend([' Parallelization options: %s' % tci._parinfo for \
tci in testsuite._tests if hasattr(tci, '_parinfo')])
parinfo = np.unique(np.sort(parinfo)).tolist()
testcases = []
for boundaries in ['zero', 'periodic', 'mixed']:
for celltype in ['general', 'rotated', 'inverted', 'orthogonal']:
for dtype in (boundaries=='zero' and [float, complex] or [complex]):
testcases.append(UTGaussianWavefunctionFactory(boundaries, \
celltype, dtype))
for test in testcases:
info = ['', test.__name__, test.__doc__.strip('\n')] + parinfo + ['']
testsuite = defaultTestLoader.loadTestsFromTestCase(test)
map(testrunner.stream.writeln, info)
testresult = testrunner.run(testsuite)
# Provide feedback on failed tests if imported by test.py
if __name__ == '__builtin__' and not testresult.wasSuccessful():
raise SystemExit('Test failed. Check ut_gucops.log for details.')
| robwarm/gpaw-symm | gpaw/test/parallel/ut_gucops.py | Python | gpl-3.0 | 14,751 |
#! /usr/bin/env python
"""
Author: Jeremy M. Stober
Program: FIND_DUPLICATES.PY
Date: Thursday, November 1 2012
Description: Find duplicate images.
"""
import numpy as np
duplicates = []
imgs = np.load("observations.npy")
#imgs = np.load("imgs.npy")
for (i,p) in enumerate(imgs):
for (j,q) in enumerate(imgs[i:]):
if i != j+i and np.allclose(p,q):
print i,j+i
duplicates.append((i,j+i))
import cPickle as pickle
pickle.dump(duplicates, open("o_duplicates.pck","w"), pickle.HIGHEST_PROTOCOL)
| stober/lspi | bin/find_duplicates.py | Python | bsd-2-clause | 533 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript
from io import BytesIO
import time
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
'''
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
'''
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
'''
This test is meant to exercise BIP66 (DER SIG).
Connect to a single node.
Mine 2 (version 2) blocks (save the coinbases for later).
Generate 98 more version 2 blocks, verify the node accepts.
Mine 749 version 3 blocks, verify the node accepts.
Check that the new DERSIG rules are not enforced on the 750th version 3 block.
Check that the new DERSIG rules are enforced on the 751st version 3 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP66Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-use-thinblocks=0', '-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
return CTransaction().deserialize(signresult['hex'])
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
''' 298 more version 2 blocks '''
test_blocks = []
for i in range(298):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 3 blocks '''
test_blocks = []
for i in range(749):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=True)
'''
Check that the new DERSIG rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in range(199):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Check that the new DERSIG rules are enforced in the 951st version 3
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP66Test().main()
| BitcoinUnlimited/BitcoinUnlimited | qa/rpc-tests/bipdersig-p2p.py | Python | mit | 6,817 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2015 Elico Corp (<http://www.elico-corp.com>)
# Authors: Siyuan Gu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Adds several more product sale prices.",
"version": "8.0.1.0.1",
'license': 'AGPL-3',
"author": "Mentis do.o.o.,Elico Corp",
'website': 'https://www.elico-corp.com',
'support': '[email protected]',
"category": "Sales Management",
"depends": ['product'],
"data": [
'views/multiprice.xml',
'wizard/multiprice_copy_price.xml',
'wizard/multiprice_multiply_price.xml',
],
"application": False,
"installable": True,
}
| Elico-Corp/odoo-addons | product_multiprices/__openerp__.py | Python | agpl-3.0 | 1,514 |
# coding: utf-8
import os
for parent, subdir, files in os.walk("../pymatgen"):
for fn in files:
if fn.endswith(".py") and fn != "__init__.py":
fp = os.path.join(parent, fn)
with open(fp) as f:
contents = f.read()
if "unicode_literals" not in contents:
contents = "from __future__ import unicode_literals\n" + contents
lines = contents.split("\n")
future = []
clean = []
while len(lines) > 0:
l = lines.pop(0)
if l.strip().startswith("from __future__"):
future.append(l.strip().split("import")[-1].strip())
elif not l.strip().startswith("# coding"):
clean.append(l)
clean = (
"# coding: utf-8\n\nfrom __future__ import "
+ ", ".join(future)
+ "\n\n"
+ "\n".join(clean).strip()
+ "\n"
)
with open(fp, "w") as f:
f.write(clean)
| gmatteo/pymatgen | dev_scripts/force_unicode.py | Python | mit | 1,075 |
# -*- coding: utf-8 -*-
""" Commonly used band frequencies
For your convenience we have predefined some widely adopted brain rhythms.
You can access them with
.. code-block:: python
:linenos:
from dyconnmap.bands import *
print(bands['alpha'])
============= ================== =================
brainwave frequency (Hz) variable/index
============= ================== =================
δ [1.0, 4.0] bands['delta']
θ [4.0, 8.0] bands['theta']
α1 [7.0, 10.0] bands['alpha1']
α2 [10.0, 13.0] bands['alpha2']
α [7.0, 13.0] bands['alpha']
μ [8.0, 13.0] band['mu']
β [13.0, 25.0] bands['beta']
γ [25.0, 40.0] bands['gamma']
============= ================== =================
"""
# Author: Avraam Marimpis <[email protected]>
bands = {
'delta': [1.0, 4.0],
'theta': [4.0, 8.0],
'mu': [8.0, 13.0],
'alpha': [7.0, 13.0], 'alpha1': [7.0, 10.0], 'alpha2': [10.0, 13.0],
'beta': [13.0, 25.0],
'gamma': [25.0, 40.0]
}
| makism/dyfunconn | dyconnmap/bands.py | Python | bsd-3-clause | 1,162 |
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
vertices = (
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1)
)
edges = (
(0, 1),
(0, 3),
(0, 4),
(2, 1),
(2, 3),
(2, 7),
(6, 3),
(6, 4),
(6, 7),
(5, 1),
(5, 4),
(5, 7)
)
colors = (
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 1, 0),
)
surfaces = (
(0, 1, 2, 3),
(3, 2, 7, 6),
(6, 7, 5, 4),
(4, 5, 1, 0),
(1, 5, 7, 2),
(4, 0, 3, 6)
)
def Cube():
glBegin(GL_QUADS)
for surface in surfaces:
for i, vertex in enumerate(surface):
glColor3fv(colors[i])
glVertex3fv(vertices[vertex])
glEnd()
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def main():
pygame.init()
display = (800, 600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)
glTranslatef(0.0, 0.0, -5)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
glTranslatef(-0.5, 0, 0)
if event.key == pygame.K_RIGHT:
glTranslatef(0.5, 0, 0)
if event.key == pygame.K_UP:
glTranslatef(0, 1, 0)
if event.key == pygame.K_DOWN:
glTranslatef(0, -1, 0)
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 4:
glTranslatef(0, 0, 1.0)
if event.button == 5:
glTranslatef(0, 0, -1.0)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
Cube()
pygame.display.flip()
pygame.time.wait(10)
if __name__ == "__main__":
main()
| quanhua92/learning-notes | libs/pyopengl/pygame/01_navigate.py | Python | apache-2.0 | 2,078 |
# -*- encoding: utf-8 -*-
"""
Usage::
hammer capsule [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
content Manage the capsule content
create Create a capsule
delete Delete a capsule
import-classes Import puppet classes from puppet Capsule.
info Show a capsule
list List all capsules
refresh-features Refresh capsule features
update Update a capsule
"""
from robottelo.cli.base import Base
class Capsule(Base):
"""
Manipulates Foreman's capsule.
"""
command_base = 'capsule'
@classmethod
def content_add_lifecycle_environment(cls, options):
"""Add lifecycle environments to the capsule."""
cls.command_sub = 'content add-lifecycle-environment'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_available_lifecycle_environments(cls, options):
"""List the lifecycle environments not attached to the capsule."""
cls.command_sub = 'content available-lifecycle-environments'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_info(cls, options):
"""Get current capsule synchronization status."""
cls.command_sub = 'content info'
result = cls.execute(
cls._construct_command(options), output_format='json')
return result
@classmethod
def content_lifecycle_environments(cls, options):
"""List the lifecycle environments attached to the capsule."""
cls.command_sub = 'content lifecycle-environments'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_remove_lifecycle_environment(cls, options):
"""Remove lifecycle environments from the capsule."""
cls.command_sub = 'content remove-lifecycle-environment'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_synchronization_status(cls, options):
"""Get current capsule synchronization status."""
cls.command_sub = 'content synchronization-status'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_synchronize(cls, options):
"""Synchronize the content to the capsule."""
cls.command_sub = 'content synchronize'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def import_classes(cls, options):
"""Import puppet classes from puppet Capsule."""
cls.command_sub = 'import-classes'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def refresh_features(cls, options):
"""Refresh capsule features."""
cls.command_sub = 'refresh-features'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
| ares/robottelo | robottelo/cli/capsule.py | Python | gpl-3.0 | 3,537 |
# encoding: UTF-8
import json
import shelve
import sys
import traceback
import time; # 引入time模块
import datetime
import pandas as pd
import uuid
from vnpy.event import Event
from vnpy.DAO import *
from vnpy.trader.vtConstant import (DIRECTION_LONG, DIRECTION_SHORT,
OFFSET_OPEN, OFFSET_CLOSE,
PRICETYPE_LIMITPRICE)
from vnpy.trader.vtEvent import (EVENT_TICK, EVENT_TRADE, EVENT_POSITION,
EVENT_TIMER, EVENT_ORDER)
from vnpy.trader.vtFunction import getJsonPath, getTempPath
from vnpy.trader.vtObject import (VtSubscribeReq, VtOrderReq,
VtCancelOrderReq, VtLogData)
from vnpy.trader.app.spreadTrading.stAlgo import SniperAlgo
from vnpy.trader.app.spreadTrading.stBase import (StLeg, StSpread, EVENT_SPREADTRADING_TICK,
EVENT_SPREADTRADING_POS, EVENT_SPREADTRADING_LOG,
EVENT_SPREADTRADING_ALGO, EVENT_SPREADTRADING_ALGOLOG)
sys.path.append("..")
########################################################################
class StDataEngine(object):
"""价差数据计算引擎"""
settingFileName = 'ST_setting.json'
settingFilePath = getJsonPath(settingFileName, __file__)
# ----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 腿、价差相关字典
self.legDict = {} # vtSymbol:StLeg
self.spreadDict = {} # name:StSpread
self.vtSymbolSpreadDict = {} # vtSymbol:StSpread
self.registerEvent()
# ----------------------------------------------------------------------
def loadSetting(self):
"""加载配置"""
try:
with open(self.settingFilePath) as f:
l = json.load(f)
for setting in l:
result, msg = self.createSpread(setting)
self.writeLog(msg)
self.writeLog(u'价差配置加载完成')
except:
content = u'价差配置加载出错,原因:' + traceback.format_exc()
self.writeLog(content)
# ----------------------------------------------------------------------
def saveSetting(self):
"""保存配置"""
with open(self.settingFilePath) as f:
pass
# ----------------------------------------------------------------------
def createSpread(self, setting):
"""创建价差"""
result = False
msg = ''
# 检查价差重名
if setting['name'] in self.spreadDict:
msg = u'%s价差存在重名' % setting['name']
return result, msg
# 检查腿是否已使用
l = []
l.append(setting['activeLeg']['vtSymbol'])
for d in setting['passiveLegs']:
l.append(d['vtSymbol'])
for vtSymbol in l:
if vtSymbol in self.vtSymbolSpreadDict:
existingSpread = self.vtSymbolSpreadDict[vtSymbol]
msg = u'%s合约已经存在于%s价差中' % (vtSymbol, existingSpread.name)
return result, msg
# 创建价差
spread = StSpread()
spread.name = setting['name']
self.spreadDict[spread.name] = spread
# 创建主动腿
activeSetting = setting['activeLeg']
activeLeg = StLeg()
activeLeg.vtSymbol = str(activeSetting['vtSymbol'])
activeLeg.ratio = float(activeSetting['ratio'])
activeLeg.multiplier = float(activeSetting['multiplier'])
activeLeg.payup = int(activeSetting['payup'])
spread.addActiveLeg(activeLeg)
self.legDict[activeLeg.vtSymbol] = activeLeg
self.vtSymbolSpreadDict[activeLeg.vtSymbol] = spread
self.subscribeMarketData(activeLeg.vtSymbol)
# 创建被动腿
passiveSettingList = setting['passiveLegs']
passiveLegList = []
for d in passiveSettingList:
passiveLeg = StLeg()
passiveLeg.vtSymbol = str(d['vtSymbol'])
passiveLeg.ratio = float(d['ratio'])
passiveLeg.multiplier = float(d['multiplier'])
passiveLeg.payup = int(d['payup'])
spread.addPassiveLeg(passiveLeg)
self.legDict[passiveLeg.vtSymbol] = passiveLeg
self.vtSymbolSpreadDict[passiveLeg.vtSymbol] = spread
self.subscribeMarketData(passiveLeg.vtSymbol)
# 初始化价差
spread.initSpread()
self.putSpreadTickEvent(spread)
self.putSpreadPosEvent(spread)
# 返回结果
result = True
msg = u'%s价差创建成功' % spread.name
return result, msg
# ----------------------------------------------------------------------
def processTickEvent(self, event):
"""处理行情推送"""
# 检查行情是否需要处理
tick = event.dict_['data']
if tick.vtSymbol not in self.legDict:
return
# 更新腿价格
leg = self.legDict[tick.vtSymbol]
leg.bidPrice = tick.bidPrice1
leg.askPrice = tick.askPrice1
leg.bidVolume = tick.bidVolume1
leg.askVolume = tick.askVolume1
# 更新价差价格
spread = self.vtSymbolSpreadDict[tick.vtSymbol]
spread.calculatePrice()
# 发出事件
self.putSpreadTickEvent(spread)
# ----------------------------------------------------------------------
def putSpreadTickEvent(self, spread):
"""发出价差行情更新事件"""
event1 = Event(EVENT_SPREADTRADING_TICK + spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_TICK)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
# ----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交推送"""
# 检查成交是否需要处理
trade = event.dict_['data']
if trade.vtSymbol not in self.legDict:
return
# 更新腿持仓
leg = self.legDict[trade.vtSymbol]
direction = trade.direction
offset = trade.offset
if direction == DIRECTION_LONG:
if offset == OFFSET_OPEN:
leg.longPos += trade.volume
else:
leg.shortPos -= trade.volume
else:
if offset == OFFSET_OPEN:
leg.shortPos += trade.volume
else:
leg.longPos -= trade.volume
leg.netPos = leg.longPos - leg.shortPos
# 更新价差持仓
spread = self.vtSymbolSpreadDict[trade.vtSymbol]
spread.calculatePos()
# 推送价差持仓更新
event1 = Event(EVENT_SPREADTRADING_POS + spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_POS)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
# ----------------------------------------------------------------------
def processPosEvent(self, event):
"""处理持仓推送"""
# 检查持仓是否需要处理
pos = event.dict_['data']
if pos.vtSymbol not in self.legDict:
return
# 更新腿持仓
leg = self.legDict[pos.vtSymbol]
direction = pos.direction
if direction == DIRECTION_LONG:
leg.longPos = pos.position
else:
leg.shortPos = pos.position
leg.netPos = leg.longPos - leg.shortPos
# 更新价差持仓
spread = self.vtSymbolSpreadDict[pos.vtSymbol]
spread.calculatePos()
# 推送价差持仓更新
self.putSpreadPosEvent(spread)
# ----------------------------------------------------------------------
def putSpreadPosEvent(self, spread):
"""发出价差持仓事件"""
event1 = Event(EVENT_SPREADTRADING_POS + spread.name)
event1.dict_['data'] = spread
self.eventEngine.put(event1)
event2 = Event(EVENT_SPREADTRADING_POS)
event2.dict_['data'] = spread
self.eventEngine.put(event2)
# ----------------------------------------------------------------------
def registerEvent(self):
""""""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_POSITION, self.processPosEvent)
# ----------------------------------------------------------------------
def subscribeMarketData(self, vtSymbol):
"""订阅行情"""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
self.writeLog(u'订阅行情失败,找不到该合约%s' % vtSymbol)
return
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
self.mainEngine.subscribe(req, contract.gatewayName)
# ----------------------------------------------------------------------
def writeLog(self, content):
"""发出日志"""
log = VtLogData()
log.logContent = content
event = Event(EVENT_SPREADTRADING_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
# ----------------------------------------------------------------------
def getAllSpreads(self):
"""获取所有的价差"""
return self.spreadDict.values()
########################################################################
class StAlgoEngine(object):
"""价差算法交易引擎"""
algoFileName = 'SpreadTradingAlgo.vt'
algoFilePath = getTempPath(algoFileName)
# ----------------------------------------------------------------------
def __init__(self, dataEngine, mainEngine, eventEngine):
"""Constructor"""
self.dataEngine = dataEngine
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.algoDict = {} # spreadName:algo
self.vtSymbolAlgoDict = {} # vtSymbol:algo
self.registerEvent()
# ----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_SPREADTRADING_TICK, self.processSpreadTickEvent)
self.eventEngine.register(EVENT_SPREADTRADING_POS, self.processSpreadPosEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TIMER, self.processTimerEvent)
# ----------------------------------------------------------------------
# 成交回报后持仓处理
def updatePosition(self, trade):
if trade.vtSymbol not in self.vtSymbolAlgoDict:
return
sniperAlgo = self.vtSymbolAlgoDict[trade.vtSymbol]
# 取出合约对应的委托单列表
if len(sniperAlgo.legOrderDict) != 0 :
orderIdList = sniperAlgo.legOrderDict[trade.vtSymbol]
else:
return
# 该笔成交是该算法发出的委托
if trade.vtOrderID in orderIdList:
spreadName = self.sniperAlgo.spreadName
# 根据客户、策略、合约从数据库中取出持仓信息
qrySql = 'select LONG_POSITION, TODAY_LONG, LONG_OPEN_AVG_PRICE, SHORT_POSITION, TODAY_SHORT, SHORT_OPEN_AVG_PRICE' \
'from defer_real_hold where BROKER_ID = %s and EXCH_ID = %s and SYMBOL = %s and STRATAGE = %s' % (
trade.brokerID,
trade.exchange, trade.symbol, spreadName)
retData = getDataBySQL('vnpy', qrySql)
posData = retData.irow(0) # 根据以上条件查询出的默认持仓只有一条记录
# 没有持仓记录,首次交易
if not posData:
posData.longPosition = EMPTY_INT
posData.longToday = EMPTY_INT
posData.longOpenAverPrice = EMPTY_FLOAT
posData.shortPosition = EMPTY_INT
posData.shortToday = EMPTY_INT
posData.shortOpenAverPrice = EMPTY_INT
if trade.direction == DIRECTION_LONG:
# 多方开仓,则对应多头的持仓和今仓增加
if trade.offset == OFFSET_OPEN:
posData.longOpenAverPrice = (
posData.longOpenAverPrice * posData.longPosition + trade.volume * trade.price) / (
posData.longPosition + trade.volume)
posData.longPosition += trade.volume
posData.longToday += trade.volume
elif trade.offset == OFFSET_CLOSETODAY: # 先开先平以后考虑
# 公式
posData.shortOpenAverPrice = (
posData.shortOpenAverPrice * posData.shortPosition - trade.volume * trade.price) / (
posData.shortPosition - trade.volume)
posData.shortPosition -= trade.volume
posData.shortToday -= trade.volume
else:
# 空方开仓,则对应空头的持仓和今仓增加
if trade.offset == OFFSET_OPEN:
posData.shortOpenAverPrice = (
posData.shortOpenAverPrice * posData.shortPosition + trade.volume * trade.price) / (
posData.shortPosition + trade.volume)
posData.shortPosition += trade.volume
posData.shortToday += trade.volume
elif trade.offset == OFFSET_CLOSETODAY:
posData.longOpenAverPrice = (
posData.longOpenAverPrice * posData.longPosition - trade.volume * trade.price) / (
posData.longPosition - trade.volume)
posData.longPosition -= trade.volume
posData.longToday -= trade.volume
# 更新defer_real_hold表
updateSql = 'update defer_real_hold set LONG_POSITION = %s, TODAY_LONG = %s, LONG_OPEN_AVG_PRICE = %s,' \
'SHORT_POSITION = %s, TODAY_SHORT = %s, SHORT_OPEN_AVG_PRICE = %s where BROKER_ID = %s and EXCH_ID = %s and SYMBOL = %s and STRATAGE = %s' % (
posData.longPosition, posData.longToday, posData.longOpenAverPrice, posData.shortPosition,
posData.shortToday, posData.shortOpenAverPrice,
trade.brokerID, trade.exchange, trade.symbol, spreadName)
try:
updateData('vnpy', updateSql)
except Exception as e:
self.writeLog(u"更新客户持仓信息出错,错误信息:%s" % str(e.message))
# 成交回报入库
def handleTradeData(self, trade):
print trade
if trade.vtSymbol not in self.vtSymbolAlgoDict:
return
sniperAlgo = self.vtSymbolAlgoDict[trade.vtSymbol]
# 取出合约对应的委托单列表
if len(sniperAlgo.legOrderDict) != 0 :
orderIdList = sniperAlgo.legOrderDict[trade.vtSymbol]
else:
return
# 该笔成交是该算法发出的委托
if trade.vtOrderID in orderIdList:
spreadName = sniperAlgo.spreadName
DEFER_DONE_COLUMNS = ['VT_TRADE_ID', 'VT_ORDER_ID', 'TRADE_DATE', 'TRADE_TIME', 'USER_ID',
'BROKER_ID', 'OPER_CODE', 'SYMBOL', 'EXCH_ID', 'TRADE_PRICE', 'DONE_QTY',
'BS_FLAG', 'EO_FLAG', 'STRATAGE']
# 0:开仓;1:平仓
if trade.offset == OFFSET_OPEN:
offsettmp = '0'
elif trade.offset == OFFSET_CLOSE:
offsettmp = '1'
else:
print("不支持的offset")
# L:多;S:空
if trade.direction == DIRECTION_LONG:
directiontmp = 'L'
elif trade.direction == DIRECTION_SHORT:
directiontmp = 'S'
else:
print("不支持的DIRECTION")
# str(uuid.uuid1()).replace('-', '')来唯一生成32为流水号,记录委托单入库时间,取系统时间
i = datetime.datetime.now()
orderDate = str(i.year) + str(i.month) + str(i.day)
orderTime = str(i.hour) + str(i.minute) + str(i.second)
tradedata = [trade.vtTradeID, trade.vtOrderID, '', trade.tradeTime, '', '',#trade.brokerID,
'', trade.symbol, trade.exchange, trade.price, trade.volume,
directiontmp, offsettmp, spreadName]
d = pd.DataFrame([tradedata], columns=DEFER_DONE_COLUMNS)
print("开始写入DEFER_DONE")
try:
writeData('vnpy', 'defer_done', d)
print("写入DEFER_DONE结束了")
except Exception as e:
self.writeLog(u"增量写入数据时发生了错误,错误信息:%s" % str(e.message))
print("写入DEFER_DONE报错%s"% str(e.message))
# 委托单入库处理
def handleOrder(self, vtSymbol, orderReq, vtOrderID):
#orderReq = VtOrderReq()
print (u'handleOrder:orderReq=%s,vtSymbol=%s' % (orderReq,vtSymbol))
print vtSymbol
spreadName = self.vtSymbolAlgoDict[vtSymbol].spreadName
#主键:'VT_ORDER_ID', 'ENTRUST_DATE', 'EXCH_ID'
DEFER_ENTRUST_COLUMNS = ['VT_ORDER_ID', 'ENTRUST_DATE', 'ENTRUST_TIME', 'USER_ID',
'BROKER_ID', 'OPER_CODE', 'SYMBOL', 'EXCH_ID', 'ENTRUST_PRICE',
'ENTRUST_QTY', 'PRODUCT_CLASS', 'CURRENCY_CODE', 'PRICE_TYPE', 'BS_FLAG',
'EO_FLAG', 'ENTRUST_STATUS', 'STRATAGE']
#0:开仓;1:平仓
if orderReq.offset == OFFSET_OPEN:
offsettmp = '0'
elif orderReq.offset == OFFSET_CLOSE:
offsettmp = '1'
else:
print("不支持的offset")
#L:多;S:空
if orderReq.direction == DIRECTION_LONG:
directiontmp = 'L'
elif orderReq.direction == DIRECTION_SHORT:
directiontmp = 'S'
else:
print("不支持的DIRECTION")
#str(uuid.uuid1()).replace('-', '')来唯一生成32为流水号,记录委托单入库时间,取系统时间
i = datetime.datetime.now()
orderDate = str(i.year) + str(i.month) + str(i.day )
orderTime = str(i.hour) + str(i.minute) + str(i.second )
orderData = [vtOrderID, orderDate,orderTime, '', '', '', orderReq.symbol, orderReq.exchange, orderReq.price, orderReq.volume,
'', '', '', directiontmp, offsettmp, '', spreadName]
d = pd.DataFrame([orderData], columns=DEFER_ENTRUST_COLUMNS)
print("开始写入DEFER_ENTRUST中")
try:
writeData('vnpy', 'defer_entrust', d)
# common.logger.info(u"写入数据%s" % (d.max))
print("写入DEFER_ENTRUST结束了")
except Exception as e:
self.writeLog(u"增量写入数据时发生了错误,错误信息:%s" % str(e.message))
print("写入DEFER_ENTRUST报错%s"% str(e.message))
# 委托推送入库处理
def handleOrderBack(self, order,event):
print (u'handleOrder:order=%s,event=%s' % (order,event))
if order.vtSymbol not in self.vtSymbolAlgoDict:
return
print order
#event.dict_['data']为VtOrderData, ==order
spread = event.dict_['data']
#print spread
print (u'spread=%s' % (spread))
sniperAlgo = self.vtSymbolAlgoDict[order.vtSymbol]
# 取出合约对应的委托单列表
#print ("委托推送入库处理sniperAlgo %s,sniperAlgo.legOrderDict[order.vtSymbol]:%s"% str(sniperAlgo),str(sniperAlgo.legOrderDict[order.vtSymbol]))
if len(sniperAlgo.legOrderDict) != 0 :
orderIdList = sniperAlgo.legOrderDict[order.vtSymbol]
else:
return
#0:开仓;1:平仓
if order.offset == OFFSET_OPEN:
offsettmp = '0'
elif order.offset == OFFSET_CLOSE:
offsettmp = '1'
else:
print("不支持的offset")
#L:多;S:空
if order.direction == DIRECTION_LONG:
directiontmp = 'L'
elif order.direction == DIRECTION_SHORT:
directiontmp = 'S'
else:
print("不支持的DIRECTION")
#str(uuid.uuid1()).replace('-', '')来唯一生成32为流水号,记录委托单入库时间,取系统时间
i = datetime.datetime.now()
orderDate = str(i.year) + str(i.month) + str(i.day )
orderTime = str(i.hour) + str(i.minute) + str(i.second )
# 该笔委托回报是该算法发出的委托
if order.vtOrderID in orderIdList:
DEFER_ENTRUST_RTN_COLUMNS = ['VT_ORDER_ID', 'ENTRUST_DATE', 'ENTRUST_TIME', 'CANCEL_TIME',
'USER_ID', 'BROKER_ID', 'OPER_CODE', 'SYMBOL', 'EXCH_ID', 'ENTRUST_PRICE',
'ENTRUST_QTY','PRODUCT_CLASS', 'CURRENCY_CODE', 'PRICE_TYPE', 'BS_FLAG',
'EO_FLAG', 'ENTRUST_STATUS', 'STRATAGE']
ordertn = [order.vtOrderID,orderDate, orderTime, '', '',#order.brokerID,
'','', order.symbol, order.exchange, order.price, order.totalVolume,
'', '', '', directiontmp, offsettmp, '', sniperAlgo.spreadName]
d = pd.DataFrame([ordertn], columns=DEFER_ENTRUST_RTN_COLUMNS)
print("开始写入DEFER_ENTRUST_RTN中")
try:
writeData('vnpy', 'defer_entrust_rtn', d)
# common.logger.info(u"写入数据%s" % (d.max))
print("写入DEFER_ENTRUST_RTN结束了")
except Exception as e:
self.writeLog(u"增量写入数据时发生了错误,错误信息:%s" % str(e.message))
print("写入DEFER_ENTRUST_RTN报错%s"% str(e.message))
# ----------------------------------------------------------------------
def processSpreadTickEvent(self, event):
"""处理价差行情事件"""
spread = event.dict_['data']
algo = self.algoDict.get(spread.name, None)
if algo:
algo.updateSpreadTick(spread)
# ----------------------------------------------------------------------
def processSpreadPosEvent(self, event):
"""处理价差持仓事件"""
spread = event.dict_['data']
algo = self.algoDict.get(spread.name, None)
if algo:
algo.updateSpreadPos(spread)
# ----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交事件"""
trade = event.dict_['data']
# 持仓处理、成交入库
self.updatePosition(trade)
self.handleTradeData(trade)
algo = self.vtSymbolAlgoDict.get(trade.vtSymbol, None)
if algo:
algo.updateTrade(trade)
# 更新价差持仓 wzhua 新增spreadenginee中计算浮动盈亏
spread = self.dataEngine.vtSymbolSpreadDict[trade.vtSymbol]
spread.calculatePos()
# 推送价差持仓更新
self.dataEngine.putSpreadPosEvent(spread)
# ----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托事件"""
order = event.dict_['data']
#spread = event.dict_['data']
#algo = self.algoDict.get(spread.name, None)
self.handleOrderBack(order,event)
algo = self.vtSymbolAlgoDict.get(order.vtSymbol, None)
if algo:
algo.updateOrder(order)
# ----------------------------------------------------------------------
def processTimerEvent(self, event):
""""""
for algo in self.algoDict.values():
algo.updateTimer()
# ----------------------------------------------------------------------
def sendOrder(self, vtSymbol, direction, offset, price, volume, payup=0):
"""发单"""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
return ''
req = VtOrderReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
req.direction = direction
req.offset = offset
req.volume = int(volume)
req.priceType = PRICETYPE_LIMITPRICE
if direction == DIRECTION_LONG:
req.price = price + payup * contract.priceTick
else:
req.price = price - payup * contract.priceTick
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName)
# 委托单入库处理
self.handleOrder(vtSymbol, req, vtOrderID)
return vtOrderID
# ----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
order = self.mainEngine.getOrder(vtOrderID)
if not order:
return
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
# ----------------------------------------------------------------------
def buy(self, vtSymbol, price, volume, payup=0):
"""买入"""
vtOrderID = self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_OPEN, price, volume, payup)
l = []
if vtOrderID:
l.append(vtOrderID)
return l
# ----------------------------------------------------------------------
def sell(self, vtSymbol, price, volume, payup=0):
"""卖出"""
vtOrderID = self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_CLOSE, price, volume, payup)
l = []
if vtOrderID:
l.append(vtOrderID)
return l
# ----------------------------------------------------------------------
def short(self, vtSymbol, price, volume, payup=0):
"""卖空"""
vtOrderID = self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_OPEN, price, volume, payup)
l = []
if vtOrderID:
l.append(vtOrderID)
return l
# ----------------------------------------------------------------------
def cover(self, vtSymbol, price, volume, payup=0):
"""平空"""
vtOrderID = self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_CLOSE, price, volume, payup)
l = []
if vtOrderID:
l.append(vtOrderID)
return l
# ----------------------------------------------------------------------
def putAlgoEvent(self, algo):
"""发出算法状态更新事件"""
event = Event(EVENT_SPREADTRADING_ALGO + algo.name)
self.eventEngine.put(event)
# ----------------------------------------------------------------------
def writeLog(self, content):
"""输出日志"""
log = VtLogData()
log.logContent = content
event = Event(EVENT_SPREADTRADING_ALGOLOG)
event.dict_['data'] = log
self.eventEngine.put(event)
# ----------------------------------------------------------------------
def saveSetting(self):
"""保存算法配置"""
setting = {}
for algo in self.algoDict.values():
setting[algo.spreadName] = algo.getAlgoParams()
f = shelve.open(self.algoFilePath)
f['setting'] = setting
f.close()
# ----------------------------------------------------------------------
def loadSetting(self):
"""加载算法配置"""
# 创建算法对象
l = self.dataEngine.getAllSpreads()
for spread in l:
algo = SniperAlgo(self, spread)
self.algoDict[spread.name] = algo
# 保存腿代码和算法对象的映射
for leg in spread.allLegs:
self.vtSymbolAlgoDict[leg.vtSymbol] = algo
# 加载配置
f = shelve.open(self.algoFilePath)
setting = f.get('setting', None)
f.close()
if not setting:
return
for algo in self.algoDict.values():
if algo.spreadName in setting:
d = setting[algo.spreadName]
algo.setAlgoParams(d)
# ----------------------------------------------------------------------
def stopAll(self):
"""停止全部算法"""
for algo in self.algoDict.values():
algo.stop()
# ----------------------------------------------------------------------
def startAlgo(self, spreadName):
"""启动算法"""
algo = self.algoDict[spreadName]
algoActive = algo.start()
return algoActive
# ----------------------------------------------------------------------
def stopAlgo(self, spreadName):
"""停止算法"""
algo = self.algoDict[spreadName]
algoActive = algo.stop()
return algoActive
# ----------------------------------------------------------------------
def getAllAlgoParams(self):
"""获取所有算法的参数"""
return [algo.getAlgoParams() for algo in self.algoDict.values()]
# ----------------------------------------------------------------------
def setAlgoBuyPrice(self, spreadName, buyPrice):
"""设置算法买开价格"""
algo = self.algoDict[spreadName]
algo.setBuyPrice(buyPrice)
# ----------------------------------------------------------------------
def setAlgoSellPrice(self, spreadName, sellPrice):
"""设置算法卖平价格"""
algo = self.algoDict[spreadName]
algo.setSellPrice(sellPrice)
# ----------------------------------------------------------------------
def setAlgoShortPrice(self, spreadName, shortPrice):
"""设置算法卖开价格"""
algo = self.algoDict[spreadName]
algo.setShortPrice(shortPrice)
# ----------------------------------------------------------------------
def setAlgoCoverPrice(self, spreadName, coverPrice):
"""设置算法买平价格"""
algo = self.algoDict[spreadName]
algo.setCoverPrice(coverPrice)
# ----------------------------------------------------------------------
def setAlgoMode(self, spreadName, mode):
"""设置算法工作模式"""
algo = self.algoDict[spreadName]
algo.setMode(mode)
# ----------------------------------------------------------------------
def setAlgoMaxOrderSize(self, spreadName, maxOrderSize):
"""设置算法单笔委托限制"""
algo = self.algoDict[spreadName]
algo.setMaxOrderSize(maxOrderSize)
# ----------------------------------------------------------------------
def setAlgoMaxPosSize(self, spreadName, maxPosSize):
"""设置算法持仓限制"""
algo = self.algoDict[spreadName]
algo.setMaxPosSize(maxPosSize)
########################################################################
class StEngine(object):
"""价差引擎"""
# ----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.dataEngine = StDataEngine(mainEngine, eventEngine)
self.algoEngine = StAlgoEngine(self.dataEngine, mainEngine, eventEngine)
# ----------------------------------------------------------------------
def init(self):
"""初始化"""
self.dataEngine.loadSetting()
self.algoEngine.loadSetting()
# ----------------------------------------------------------------------
def stop(self):
"""停止"""
self.dataEngine.saveSetting()
self.algoEngine.stopAll()
self.algoEngine.saveSetting()
| cmbclh/vnpy1.7 | vnpy/trader/app/spreadTrading/stEngine.py | Python | mit | 32,762 |
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from binascii import unhexlify
from io import BytesIO
from typing import Optional
from unittest.mock import Mock
from urllib import parse
import attr
from parameterized import parameterized, parameterized_class
from PIL import Image as Image
from twisted.internet import defer
from twisted.internet.defer import Deferred
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.logging.context import make_deferred_yieldable
from synapse.rest import admin
from synapse.rest.client import login
from synapse.rest.media.v1._base import FileInfo
from synapse.rest.media.v1.filepath import MediaFilePaths
from synapse.rest.media.v1.media_storage import MediaStorage
from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend
from tests import unittest
from tests.server import FakeSite, make_request
from tests.test_utils import SMALL_PNG
from tests.utils import default_config
class MediaStorageTests(unittest.HomeserverTestCase):
needs_threadpool = True
def prepare(self, reactor, clock, hs):
self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
self.addCleanup(shutil.rmtree, self.test_dir)
self.primary_base_path = os.path.join(self.test_dir, "primary")
self.secondary_base_path = os.path.join(self.test_dir, "secondary")
hs.config.media.media_store_path = self.primary_base_path
storage_providers = [FileStorageProviderBackend(hs, self.secondary_base_path)]
self.filepaths = MediaFilePaths(self.primary_base_path)
self.media_storage = MediaStorage(
hs, self.primary_base_path, self.filepaths, storage_providers
)
def test_ensure_media_is_in_local_cache(self):
media_id = "some_media_id"
test_body = "Test\n"
# First we create a file that is in a storage provider but not in the
# local primary media store
rel_path = self.filepaths.local_media_filepath_rel(media_id)
secondary_path = os.path.join(self.secondary_base_path, rel_path)
os.makedirs(os.path.dirname(secondary_path))
with open(secondary_path, "w") as f:
f.write(test_body)
# Now we run ensure_media_is_in_local_cache, which should copy the file
# to the local cache.
file_info = FileInfo(None, media_id)
# This uses a real blocking threadpool so we have to wait for it to be
# actually done :/
x = defer.ensureDeferred(
self.media_storage.ensure_media_is_in_local_cache(file_info)
)
# Hotloop until the threadpool does its job...
self.wait_on_thread(x)
local_path = self.get_success(x)
self.assertTrue(os.path.exists(local_path))
# Asserts the file is under the expected local cache directory
self.assertEqual(
os.path.commonprefix([self.primary_base_path, local_path]),
self.primary_base_path,
)
with open(local_path) as f:
body = f.read()
self.assertEqual(test_body, body)
@attr.s(slots=True, frozen=True)
class _TestImage:
"""An image for testing thumbnailing with the expected results
Attributes:
data: The raw image to thumbnail
content_type: The type of the image as a content type, e.g. "image/png"
extension: The extension associated with the format, e.g. ".png"
expected_cropped: The expected bytes from cropped thumbnailing, or None if
test should just check for success.
expected_scaled: The expected bytes from scaled thumbnailing, or None if
test should just check for a valid image returned.
expected_found: True if the file should exist on the server, or False if
a 404 is expected.
"""
data = attr.ib(type=bytes)
content_type = attr.ib(type=bytes)
extension = attr.ib(type=bytes)
expected_cropped = attr.ib(type=Optional[bytes], default=None)
expected_scaled = attr.ib(type=Optional[bytes], default=None)
expected_found = attr.ib(default=True, type=bool)
@parameterized_class(
("test_image",),
[
# smoll png
(
_TestImage(
SMALL_PNG,
b"image/png",
b".png",
unhexlify(
b"89504e470d0a1a0a0000000d4948445200000020000000200806"
b"000000737a7af40000001a49444154789cedc101010000008220"
b"ffaf6e484001000000ef0610200001194334ee0000000049454e"
b"44ae426082"
),
unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000d49444154789c636060606000000005"
b"0001a5f645400000000049454e44ae426082"
),
),
),
# small png with transparency.
(
_TestImage(
unhexlify(
b"89504e470d0a1a0a0000000d49484452000000010000000101000"
b"00000376ef9240000000274524e5300010194fdae0000000a4944"
b"4154789c636800000082008177cd72b60000000049454e44ae426"
b"082"
),
b"image/png",
b".png",
# Note that we don't check the output since it varies across
# different versions of Pillow.
),
),
# small lossless webp
(
_TestImage(
unhexlify(
b"524946461a000000574542505650384c0d0000002f0000001007"
b"1011118888fe0700"
),
b"image/webp",
b".webp",
),
),
# an empty file
(
_TestImage(
b"",
b"image/gif",
b".gif",
expected_found=False,
),
),
],
)
class MediaRepoTests(unittest.HomeserverTestCase):
hijack_auth = True
user_id = "@test:user"
def make_homeserver(self, reactor, clock):
self.fetches = []
def get_file(destination, path, output_stream, args=None, max_size=None):
"""
Returns tuple[int,dict,str,int] of file length, response headers,
absolute URI, and response code.
"""
def write_to(r):
data, response = r
output_stream.write(data)
return response
d = Deferred()
d.addCallback(write_to)
self.fetches.append((d, destination, path, args))
return make_deferred_yieldable(d)
client = Mock()
client.get_file = get_file
self.storage_path = self.mktemp()
self.media_store_path = self.mktemp()
os.mkdir(self.storage_path)
os.mkdir(self.media_store_path)
config = self.default_config()
config["media_store_path"] = self.media_store_path
config["max_image_pixels"] = 2000000
provider_config = {
"module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
"store_local": True,
"store_synchronous": False,
"store_remote": True,
"config": {"directory": self.storage_path},
}
config["media_storage_providers"] = [provider_config]
hs = self.setup_test_homeserver(config=config, federation_http_client=client)
return hs
def prepare(self, reactor, clock, hs):
media_resource = hs.get_media_repository_resource()
self.download_resource = media_resource.children[b"download"]
self.thumbnail_resource = media_resource.children[b"thumbnail"]
self.store = hs.get_datastores().main
self.media_repo = hs.get_media_repository()
self.media_id = "example.com/12345"
def _req(self, content_disposition, include_content_type=True):
channel = make_request(
self.reactor,
FakeSite(self.download_resource, self.reactor),
"GET",
self.media_id,
shorthand=False,
await_result=False,
)
self.pump()
# We've made one fetch, to example.com, using the media URL, and asking
# the other server not to do a remote fetch
self.assertEqual(len(self.fetches), 1)
self.assertEqual(self.fetches[0][1], "example.com")
self.assertEqual(
self.fetches[0][2], "/_matrix/media/r0/download/" + self.media_id
)
self.assertEqual(self.fetches[0][3], {"allow_remote": "false"})
headers = {
b"Content-Length": [b"%d" % (len(self.test_image.data))],
}
if include_content_type:
headers[b"Content-Type"] = [self.test_image.content_type]
if content_disposition:
headers[b"Content-Disposition"] = [content_disposition]
self.fetches[0][0].callback(
(self.test_image.data, (len(self.test_image.data), headers))
)
self.pump()
self.assertEqual(channel.code, 200)
return channel
def test_handle_missing_content_type(self):
channel = self._req(
b"inline; filename=out" + self.test_image.extension,
include_content_type=False,
)
headers = channel.headers
self.assertEqual(channel.code, 200)
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"]
)
def test_disposition_filename_ascii(self):
"""
If the filename is filename=<ascii> then Synapse will decode it as an
ASCII string, and use filename= in the response.
"""
channel = self._req(b"inline; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
[b"inline; filename=out" + self.test_image.extension],
)
def test_disposition_filenamestar_utf8escaped(self):
"""
If the filename is filename=*utf8''<utf8 escaped> then Synapse will
correctly decode it as the UTF-8 string, and use filename* in the
response.
"""
filename = parse.quote("\u2603".encode()).encode("ascii")
channel = self._req(
b"inline; filename*=utf-8''" + filename + self.test_image.extension
)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
[b"inline; filename*=utf-8''" + filename + self.test_image.extension],
)
def test_disposition_none(self):
"""
If there is no filename, one isn't passed on in the Content-Disposition
of the request.
"""
channel = self._req(None)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
def test_thumbnail_crop(self):
"""Test that a cropped remote thumbnail is available."""
self._test_thumbnail(
"crop", self.test_image.expected_cropped, self.test_image.expected_found
)
def test_thumbnail_scale(self):
"""Test that a scaled remote thumbnail is available."""
self._test_thumbnail(
"scale", self.test_image.expected_scaled, self.test_image.expected_found
)
def test_invalid_type(self):
"""An invalid thumbnail type is never available."""
self._test_thumbnail("invalid", None, False)
@unittest.override_config(
{"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]}
)
def test_no_thumbnail_crop(self):
"""
Override the config to generate only scaled thumbnails, but request a cropped one.
"""
self._test_thumbnail("crop", None, False)
@unittest.override_config(
{"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]}
)
def test_no_thumbnail_scale(self):
"""
Override the config to generate only cropped thumbnails, but request a scaled one.
"""
self._test_thumbnail("scale", None, False)
def test_thumbnail_repeated_thumbnail(self):
"""Test that fetching the same thumbnail works, and deleting the on disk
thumbnail regenerates it.
"""
self._test_thumbnail(
"scale", self.test_image.expected_scaled, self.test_image.expected_found
)
if not self.test_image.expected_found:
return
# Fetching again should work, without re-requesting the image from the
# remote.
params = "?width=32&height=32&method=scale"
channel = make_request(
self.reactor,
FakeSite(self.thumbnail_resource, self.reactor),
"GET",
self.media_id + params,
shorthand=False,
await_result=False,
)
self.pump()
self.assertEqual(channel.code, 200)
if self.test_image.expected_scaled:
self.assertEqual(
channel.result["body"],
self.test_image.expected_scaled,
channel.result["body"],
)
# Deleting the thumbnail on disk then re-requesting it should work as
# Synapse should regenerate missing thumbnails.
origin, media_id = self.media_id.split("/")
info = self.get_success(self.store.get_cached_remote_media(origin, media_id))
file_id = info["filesystem_id"]
thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir(
origin, file_id
)
shutil.rmtree(thumbnail_dir, ignore_errors=True)
channel = make_request(
self.reactor,
FakeSite(self.thumbnail_resource, self.reactor),
"GET",
self.media_id + params,
shorthand=False,
await_result=False,
)
self.pump()
self.assertEqual(channel.code, 200)
if self.test_image.expected_scaled:
self.assertEqual(
channel.result["body"],
self.test_image.expected_scaled,
channel.result["body"],
)
def _test_thumbnail(self, method, expected_body, expected_found):
params = "?width=32&height=32&method=" + method
channel = make_request(
self.reactor,
FakeSite(self.thumbnail_resource, self.reactor),
"GET",
self.media_id + params,
shorthand=False,
await_result=False,
)
self.pump()
headers = {
b"Content-Length": [b"%d" % (len(self.test_image.data))],
b"Content-Type": [self.test_image.content_type],
}
self.fetches[0][0].callback(
(self.test_image.data, (len(self.test_image.data), headers))
)
self.pump()
if expected_found:
self.assertEqual(channel.code, 200)
if expected_body is not None:
self.assertEqual(
channel.result["body"], expected_body, channel.result["body"]
)
else:
# ensure that the result is at least some valid image
Image.open(BytesIO(channel.result["body"]))
else:
# A 404 with a JSON body.
self.assertEqual(channel.code, 404)
self.assertEqual(
channel.json_body,
{
"errcode": "M_NOT_FOUND",
"error": "Not found [b'example.com', b'12345']",
},
)
@parameterized.expand([("crop", 16), ("crop", 64), ("scale", 16), ("scale", 64)])
def test_same_quality(self, method, desired_size):
"""Test that choosing between thumbnails with the same quality rating succeeds.
We are not particular about which thumbnail is chosen."""
self.assertIsNotNone(
self.thumbnail_resource._select_thumbnail(
desired_width=desired_size,
desired_height=desired_size,
desired_method=method,
desired_type=self.test_image.content_type,
# Provide two identical thumbnails which are guaranteed to have the same
# quality rating.
thumbnail_infos=[
{
"thumbnail_width": 32,
"thumbnail_height": 32,
"thumbnail_method": method,
"thumbnail_type": self.test_image.content_type,
"thumbnail_length": 256,
"filesystem_id": f"thumbnail1{self.test_image.extension}",
},
{
"thumbnail_width": 32,
"thumbnail_height": 32,
"thumbnail_method": method,
"thumbnail_type": self.test_image.content_type,
"thumbnail_length": 256,
"filesystem_id": f"thumbnail2{self.test_image.extension}",
},
],
file_id=f"image{self.test_image.extension}",
url_cache=None,
server_name=None,
)
)
def test_x_robots_tag_header(self):
"""
Tests that the `X-Robots-Tag` header is present, which informs web crawlers
to not index, archive, or follow links in media.
"""
channel = self._req(b"inline; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"X-Robots-Tag"),
[b"noindex, nofollow, noarchive, noimageindex"],
)
class TestSpamChecker:
"""A spam checker module that rejects all media that includes the bytes
`evil`.
"""
def __init__(self, config, api):
self.config = config
self.api = api
def parse_config(config):
return config
async def check_event_for_spam(self, foo):
return False # allow all events
async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
return True # allow all invites
async def user_may_create_room(self, userid):
return True # allow all room creations
async def user_may_create_room_alias(self, userid, room_alias):
return True # allow all room aliases
async def user_may_publish_room(self, userid, room_id):
return True # allow publishing of all rooms
async def check_media_file_for_spam(self, file_wrapper, file_info) -> bool:
buf = BytesIO()
await file_wrapper.write_chunks_to(buf.write)
return b"evil" in buf.getvalue()
class SpamCheckerTestCase(unittest.HomeserverTestCase):
servlets = [
login.register_servlets,
admin.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.user = self.register_user("user", "pass")
self.tok = self.login("user", "pass")
# Allow for uploading and downloading to/from the media repo
self.media_repo = hs.get_media_repository_resource()
self.download_resource = self.media_repo.children[b"download"]
self.upload_resource = self.media_repo.children[b"upload"]
load_legacy_spam_checkers(hs)
def default_config(self):
config = default_config("test")
config.update(
{
"spam_checker": [
{
"module": TestSpamChecker.__module__ + ".TestSpamChecker",
"config": {},
}
]
}
)
return config
def test_upload_innocent(self):
"""Attempt to upload some innocent data that should be allowed."""
self.helper.upload_media(
self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200
)
def test_upload_ban(self):
"""Attempt to upload some data that includes bytes "evil", which should
get rejected by the spam checker.
"""
data = b"Some evil data"
self.helper.upload_media(
self.upload_resource, data, tok=self.tok, expect_code=400
)
| matrix-org/synapse | tests/rest/media/v1/test_media_storage.py | Python | apache-2.0 | 21,400 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def IsTimelineInteractionRecord(event_name):
return event_name.startswith('Interaction.')
class TimelineInteractionRecord(object):
"""Represents an interaction that took place during a timeline recording.
As a page runs, typically a number of different (simulated) user interactions
take place. For instance, a user might click a button in a mail app causing a
popup to animate in. Then they might press another button that sends data to a
server and simultaneously closes the popup without an animation. These are two
interactions.
From the point of view of the page, each interaction might have a different
logical name: ClickComposeButton and SendEmail, for instance. From the point
of view of the benchmarking harness, the names aren't so interesting as what
the performance expectations are for that interaction: was it loading
resources from the network? was there an animation?
Determining these things is hard to do, simply by observing the state given to
a page from javascript. There are hints, for instance if network requests are
sent, or if a CSS animation is pending. But this is by no means a complete
story.
Instead, we expect pages to mark up the timeline what they are doing, with
logical names, and flags indicating the semantics of that interaction. This
is currently done by pushing markers into the console.time/timeEnd API: this
for instance can be issued in JS:
var str = 'Interaction.SendEmail/is_smooth,is_loading_resources';
console.time(str);
setTimeout(function() {
console.timeEnd(str);
}, 1000);
When run with perf.measurements.timeline_based_measurement running, this will
then cause a TimelineInteractionRecord to be created for this range and both
smoothness and network metrics to be reported for the marked up 1000ms
time-range.
"""
def __init__(self, logical_name, start, end):
assert logical_name
self.logical_name = logical_name
self.start = start
self.end = end
self.is_smooth = False
self.is_loading_resources = False
@staticmethod
def FromEvent(event):
m = re.match('Interaction\.(.+)\/(.+)', event.name)
if m:
logical_name = m.group(1)
if m.group(1) != '':
flags = m.group(2).split(',')
else:
flags = []
else:
m = re.match('Interaction\.(.+)', event.name)
assert m
logical_name = m.group(1)
flags = []
record = TimelineInteractionRecord(logical_name, event.start, event.end)
for f in flags:
if not f in ('is_smooth', 'is_loading_resources'):
raise Exception(
'Unrecognized flag in timeline Interaction record: %s' % f)
record.is_smooth = 'is_smooth' in flags
record.is_loading_resources = 'is_loading_resources' in flags
return record
def GetResultNameFor(self, result_name):
return "%s-%s" % (self.logical_name, result_name)
| patrickm/chromium.src | tools/perf/metrics/timeline_interaction_record.py | Python | bsd-3-clause | 3,083 |
import boto3
import json
import time
import dateutil.parser
import datetime
import calendar
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
def lambda_handler(event, context):
# print("Received event: " + json.dumps(event, indent=2))
processed = False
DYNAMODB_TABLE = "DEVOPS_SES_DELIVERIES"
DDBtable = boto3.resource('dynamodb').Table(DYNAMODB_TABLE)
# Generic SNS headers
SnsMessageId = event['Records'][0]['Sns']['MessageId']
SnsPublishTime = event['Records'][0]['Sns']['Timestamp']
SnsTopicArn = event['Records'][0]['Sns']['TopicArn']
SnsMessage = event['Records'][0]['Sns']['Message']
print("Read SNS Message with ID " + SnsMessageId + " published at " + SnsPublishTime)
now = time.strftime("%c")
LambdaReceiveTime = now
# SES specific fields
SESjson = json.loads(SnsMessage)
sesNotificationType = SESjson['notificationType']
if 'mail' in SESjson:
sesMessageId = SESjson['mail']['messageId']
sesTimestamp = SESjson['mail']['timestamp']
sender = SESjson['mail']['source']
print("Processing an SES " + sesNotificationType + " with mID " + sesMessageId)
if sesNotificationType == "Delivery":
print("Processing SES delivery message")
reportingMTA = SESjson['delivery']['reportingMTA']
deliveryRecipients = SESjson['delivery']['recipients']
smtpResponse = SESjson['delivery']['smtpResponse']
deliveryTimestamp = SESjson['delivery']['timestamp']
processingTime = SESjson['delivery']['processingTimeMillis']
# there can be multiple recipients but the SMTPresponse is the same for each
for recipient in deliveryRecipients:
recipientEmailAddress = recipient
print("Delivery recipient: " + recipientEmailAddress)
sesTimestamp_parsed = dateutil.parser.parse(sesTimestamp)
sesTimestamp_seconds = sesTimestamp_parsed.strftime('%s')
deliveryTimestamp_parsed = dateutil.parser.parse(deliveryTimestamp)
deliveryTimestamp_seconds = deliveryTimestamp_parsed.strftime('%s')
# Set an expiry time for this record so we can use Dynamo TTLs to remove
# 4 months but easy to change
future = datetime.datetime.utcnow() + datetime.timedelta(days=120)
expiry_ttl = calendar.timegm(future.timetuple())
# Add entry to DB for this recipient
Item={
'recipientAddress': recipientEmailAddress,
'sesMessageId': sesMessageId,
'sesTimestamp': int(sesTimestamp_seconds),
'deliveryTimestamp': int(deliveryTimestamp_seconds),
'processingTime': int(processingTime),
'reportingMTA': reportingMTA,
'smtpResponse': smtpResponse,
'sender': sender.lower(),
'expiry': int(expiry_ttl)
}
response = DDBtable.put_item(Item=Item)
print("PutItem succeeded:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
processed = True
else:
print("Unhandled notification type: " + sesNotificationType)
else:
print("Incoming event is not a mail event")
print("Received event was: " + json.dumps(event, indent=2))
processed = True
return processed
| Signiant/aws-ses-recorder | lambda-functions/storeSESDelivery/src/storeSESDelivery_lambda.py | Python | mit | 3,903 |
"""
auth.py
author: Akash Gaonkar
Methods to help with user authentication.
Some useful methods:
create_user: add a new user to the users document
check_password: check a username and password
create_auth_token: create an authorization token
@require_auth_header: ensure an auth token is sent with a
request. this will also set request.user to be the
logged in user if Successful.
"""
import bcrypt
import re
from base64 import b64encode, b64decode
from flask import request
from functools import wraps
import globalopts
from util import create_error
AUTH_HEADER = 'Authorization'
REGEX_EMAIL = r'(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)'
class AuthError(Exception):
def __bool__(self):
return False
def require_auth_header(func):
""" A decorator that ensures a route has a valid authorization header.
If authorized, the function will sets request.user to hold the
username of the logged in user.
Args:
func: The function to decorate
Returns:
A function that validates authorization and then performs func.
Ex:
```
@app.route('/someroute/')
@require_auth_header
def createUser():
# ...
pass
```
"""
@wraps(func)
def wrapper(*args, **kwargs):
if AUTH_HEADER not in request.headers:
return create_error(401, "no authorization header")
if not check_auth_token(request.headers[AUTH_HEADER]):
return create_error(401, "invalid authorization")
auth_type, key = get_auth_details(request.headers[AUTH_HEADER])
user, passw = get_credentials(key)
request.user = user
return func(*args, **kwargs)
return wrapper
def create_user(username, email, password):
""" Creates a user with a given name, email, and password.
Args:
username: the string username
email: the string email of the user
password: the string password for the user
Returns:
True if the user was created, an error (which will evaluate
to false) otherwise.
"""
if not re.fullmatch(globalopts.REGEX_USERNAME, username):
return AuthError("invalid username")
if username in globalopts.users:
return AuthError("duplicate username")
if not re.fullmatch(REGEX_EMAIL, email):
return AuthError("invalid email")
if len(password) < 8:
return AuthError("invalid password")
password = bytes(password, 'utf-8')
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
globalopts.users[username] = {
'name': username,
'email': email,
'password': hashed.decode('utf-8')
}
return True
def create_auth_token(username, password):
""" Creates an authorization token from a username and a password.
Args:
username: a string username
password: a string password
Returns:
A string auth token.
"""
return "Basic %s" % (b64encode(
(username + ':' + password).encode('utf-8')
).decode('utf-8'))
def check_auth_token(header, user=None):
""" Returns whether an auth token is valid for an existing user.
Args:
token: a token to validate
Returns:
True if the token is valid, False otherwise.
"""
try:
auth_type, key = get_auth_details(header)
if not auth_type == 'Basic':
return False
user, passw = get_credentials(key)
return check_password(str(user), str(passw))
except:
return False
def get_auth_details(auth_header):
""" Returns the authorization type and key from an auth header.
Args:
auth_header: the string authorization header
Returns:
A tuple of the string auth type and string auth key.
"""
return auth_header.split(' ', maxsplit=1)
def get_credentials(auth_key):
""" Returns the username and password from an auth key.
NOTE: Could throw an error if the token is poorly formatted.
Args:
auth_key: the string authorization key
Returns:
A tuple of the username and password
"""
return b64decode(auth_key).decode().split(':')
def check_password(username, password):
""" Checks to make sure a user with a given username has a given password.
Args:
username: a string username
password: a string password
Returns:
True if the password matches the username, False otherwise.
"""
user = globalopts.users.get(username, None)
if not user:
return False
passw = bytes(password, 'utf-8')
hashed = user['password'].encode('utf-8')
return user and hashed == bcrypt.hashpw(passw, hashed)
| ByronBecker/rmp | focus_server/auth.py | Python | mit | 4,912 |
from __future__ import print_function
import argparse
import sys
import traceback
from subprocess import CalledProcessError
from bloom.git import ensure_git_root
from bloom.git import get_root
from bloom.logging import error
from bloom.util import add_global_arguments
from bloom.util import handle_global_arguments
from bloom.util import print_exc
from bloom.commands.git.patch import export_cmd
from bloom.commands.git.patch import import_cmd
from bloom.commands.git.patch import remove_cmd
from bloom.commands.git.patch import rebase_cmd
from bloom.commands.git.patch import trim_cmd
def get_argument_parser():
parser = argparse.ArgumentParser(
description="Configures the bloom repository with information in groups called tracks.")
metavar = "[export|import|remove|rebase|trim]"
subparsers = parser.add_subparsers(
title="Commands",
metavar=metavar,
description="Call `git-bloom-patch {0} -h` for additional help information on each command.".format(metavar))
export_cmd.add_parser(subparsers)
import_cmd.add_parser(subparsers)
remove_cmd.add_parser(subparsers)
rebase_cmd.add_parser(subparsers)
trim_cmd.add_parser(subparsers)
return parser
def main(sysargs=None):
parser = get_argument_parser()
add_global_arguments(parser)
args = parser.parse_args(sysargs)
handle_global_arguments(args)
retcode = "command not run"
if get_root() is None:
parser.print_help()
error("This command must be run in a valid git repository.", exit=True)
ensure_git_root()
try:
retcode = args.func(args) or 0
except CalledProcessError as err:
# Problem calling out to git probably
print_exc(traceback.format_exc())
error(str(err))
retcode = 2
except Exception as err:
# Unhandled exception, print traceback
print_exc(traceback.format_exc())
error(str(err))
retcode = 3
sys.exit(retcode)
| vrabaud/bloom | bloom/commands/git/patch/patch_main.py | Python | bsd-3-clause | 1,979 |
from emit import Router
from emit.multilang import ShellNode
router = Router()
@router.node(('n',))
class PythonShellNode(ShellNode):
command = 'python test.py'
@router.node(('n',))
class RubyShellNode(ShellNode):
command = 'bundle exec ruby test.rb'
| BrianHicks/emit | examples/multilang/graph.py | Python | mit | 264 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from essential.config import cfg
CONF = cfg.CONF
opt = cfg.StrOpt('foo')
CONF.register_opt(opt, group='fbar')
| gaolichuang/py-essential | tests/testmods/fbar_foo_opt.py | Python | apache-2.0 | 719 |
from js9 import j
import tarantool
import pystache
import os
JSBASE = j.application.jsbase_get_class()
class TarantoolDB(JSBASE):
def __init__(self, name="test", path="$DATADIR/tarantool/$NAME", adminsecret="admin007", port=3301):
JSBASE.__init__(self)
self.path = j.dirs.replace_txt_dir_vars(path).replace("$NAME", name).strip()
j.sal.fs.createDir(self.path)
self.name = name
self.login = "root"
self.adminsecret = adminsecret
self.addr = "localhost"
self.port = port
self.configTemplatePath = None
@property
def _path(self):
return j.sal.fs.getDirName(os.path.abspath(__file__)).rstrip("/")
def _setConfig(self):
"""
if path None then will use template which is in this dir
"""
path = self.configTemplatePath
if path is None:
path = "%s/config_template.lua" % self._path
C = j.sal.fs.readFile(path)
data = {}
data["PORT"] = self.port
data["NAME"] = self.name
data["DBDIR"] = self.path
data["SECRET"] = self.adminsecret
C2 = pystache.render(C, **data)
# add path to systemscripts
systempath = "%s/systemscripts" % self._path
C3 = "\npackage.path = '$path/?.lua;' .. package.path\n"
C3 = C3.replace("$path", systempath)
for path0 in j.sal.fs.listFilesInDir(systempath, recursive=False, filter="*.lua"):
bname = j.sal.fs.getBaseName(path0)[:-4]
C3 += "require('%s')\n" % bname
C2 += C3
j.sal.fs.writeFile(j.clients.tarantool.cfgdir + "/%s.lua" % self.name, C2)
def start_connect(self):
"""
will start a local tarantool in console
"""
self.start()
j.sal.process.executeInteractive("tarantoolctl enter %s" % self.name)
# FOR TEST PURPOSES (DEBUG ON CONSOLE)
# rm 000*;rm -rf /Users/kristofdespiegeleer1/opt/var/data/tarantool/test;tarantoolctl start test; cat /Users/kristofdespiegeleer1/opt/var/data/tarantool/test/instance.log
def start(self):
# j.tools.prefab.local.db.tarantool.start()
self._setConfig()
cpath = j.clients.tarantool.cfgdir + "/%s.lua" % self.name
j.tools.tmux.execute("tarantool -i %s" % cpath, window="tarantool")
j.sal.nettools.waitConnectionTest("localhost", self.port, 5)
c = j.clients.tarantool.client_get(name=self.name)
c.call("ping")
# IF WE USE THE FOLLOWING THEN HAVE SECURITY ISSUES BECAUSE WILL RUN AS TARANTOOL
# j.sal.fs.chown(self.path,"tarantool")
# j.sal.process.execute("tarantoolctl stop %s"%self.name)
# j.sal.process.execute("tarantoolctl start %s"%self.name)
# j.sal.process.executeInteractive("tarantoolctl enter %s"%self.name)
def connect_shell(self):
"""
connect over tcp to the running tarantool
"""
cmd = "tarantoolctl connect %s:%s@%s:%s" % (self.login, self.adminsecret, self.addr, self.port)
j.sal.process.executeInteractive(cmd)
| Jumpscale/core9 | JumpScale9/clients/tarantool/TarantoolDB.py | Python | apache-2.0 | 3,082 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('primary', '0006_weight'),
]
operations = [
migrations.AddField(
model_name='usersettings',
name='motto',
field=models.CharField(default=b'', max_length=100),
),
migrations.AddField(
model_name='usersettings',
name='platform1',
field=models.CharField(default=b'', max_length=1000),
),
migrations.AddField(
model_name='usersettings',
name='platform2',
field=models.CharField(default=b'', max_length=1000),
),
]
| ThirdCoalition/primary | primary/migrations/0007_auto_20150512_0301.py | Python | mit | 757 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: play_response.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import plus_profile_pb2 as plus__profile__pb2
import response_messages_pb2 as response__messages__pb2
import play_plus_profile_pb2 as play__plus__profile__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='play_response.proto',
package='PlayResponse',
syntax='proto2',
serialized_pb=_b('\n\x13play_response.proto\x12\x0cPlayResponse\x1a\x12plus_profile.proto\x1a\x17response_messages.proto\x1a\x17play_plus_profile.proto\"\x9b\x01\n\x0bPlayPayload\x12M\n\x1boBSOLETEPlusProfileResponse\x18\x01 \x01(\x0b\x32(.PlayPlusProfile.PlayPlusProfileResponse\x12=\n\x13plusProfileResponse\x18\x02 \x01(\x0b\x32 .PlusProfile.PlusProfileResponse\"\xdd\x01\n\x13PlayResponseWrapper\x12*\n\x07payload\x18\x01 \x01(\x0b\x32\x19.PlayResponse.PlayPayload\x12\x32\n\x08\x63ommands\x18\x02 \x01(\x0b\x32 .ResponseMessages.ServerCommands\x12,\n\x08preFetch\x18\x03 \x03(\x0b\x32\x1a.ResponseMessages.PreFetch\x12\x38\n\x0eserverMetadata\x18\x04 \x01(\x0b\x32 .ResponseMessages.ServerMetadataB0\n com.google.android.finsky.protosB\x0cPlayResponse')
,
dependencies=[plus__profile__pb2.DESCRIPTOR,response__messages__pb2.DESCRIPTOR,play__plus__profile__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PLAYPAYLOAD = _descriptor.Descriptor(
name='PlayPayload',
full_name='PlayResponse.PlayPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='oBSOLETEPlusProfileResponse', full_name='PlayResponse.PlayPayload.oBSOLETEPlusProfileResponse', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='plusProfileResponse', full_name='PlayResponse.PlayPayload.plusProfileResponse', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=263,
)
_PLAYRESPONSEWRAPPER = _descriptor.Descriptor(
name='PlayResponseWrapper',
full_name='PlayResponse.PlayResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='PlayResponse.PlayResponseWrapper.payload', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='commands', full_name='PlayResponse.PlayResponseWrapper.commands', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='preFetch', full_name='PlayResponse.PlayResponseWrapper.preFetch', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serverMetadata', full_name='PlayResponse.PlayResponseWrapper.serverMetadata', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=266,
serialized_end=487,
)
_PLAYPAYLOAD.fields_by_name['oBSOLETEPlusProfileResponse'].message_type = play__plus__profile__pb2._PLAYPLUSPROFILERESPONSE
_PLAYPAYLOAD.fields_by_name['plusProfileResponse'].message_type = plus__profile__pb2._PLUSPROFILERESPONSE
_PLAYRESPONSEWRAPPER.fields_by_name['payload'].message_type = _PLAYPAYLOAD
_PLAYRESPONSEWRAPPER.fields_by_name['commands'].message_type = response__messages__pb2._SERVERCOMMANDS
_PLAYRESPONSEWRAPPER.fields_by_name['preFetch'].message_type = response__messages__pb2._PREFETCH
_PLAYRESPONSEWRAPPER.fields_by_name['serverMetadata'].message_type = response__messages__pb2._SERVERMETADATA
DESCRIPTOR.message_types_by_name['PlayPayload'] = _PLAYPAYLOAD
DESCRIPTOR.message_types_by_name['PlayResponseWrapper'] = _PLAYRESPONSEWRAPPER
PlayPayload = _reflection.GeneratedProtocolMessageType('PlayPayload', (_message.Message,), dict(
DESCRIPTOR = _PLAYPAYLOAD,
__module__ = 'play_response_pb2'
# @@protoc_insertion_point(class_scope:PlayResponse.PlayPayload)
))
_sym_db.RegisterMessage(PlayPayload)
PlayResponseWrapper = _reflection.GeneratedProtocolMessageType('PlayResponseWrapper', (_message.Message,), dict(
DESCRIPTOR = _PLAYRESPONSEWRAPPER,
__module__ = 'play_response_pb2'
# @@protoc_insertion_point(class_scope:PlayResponse.PlayResponseWrapper)
))
_sym_db.RegisterMessage(PlayResponseWrapper)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n com.google.android.finsky.protosB\014PlayResponse'))
# @@protoc_insertion_point(module_scope)
| mmcloughlin/finsky | finsky/protos/play_response_pb2.py | Python | mit | 6,214 |
"""Assign a WFO to sites in the metadata tables that have no WFO set."""
from pyiem.util import get_dbconn, logger
LOG = logger()
def main():
"""Go Main"""
mesosite = get_dbconn("mesosite")
postgis = get_dbconn("postgis")
mcursor = mesosite.cursor()
mcursor2 = mesosite.cursor()
pcursor = postgis.cursor()
# Find sites we need to check on
mcursor.execute(
"select s.id, s.iemid, s.network, st_x(geom) as lon, "
"st_y(geom) as lat from stations s WHERE "
"(s.wfo IS NULL or s.wfo = '') and s.country = 'US'"
)
for row in mcursor:
sid = row[0]
iemid = row[1]
network = row[2]
# Look for WFO that
pcursor.execute(
"select wfo from cwa WHERE "
"ST_Contains(the_geom, "
" ST_SetSrid(ST_GeomFromEWKT('POINT(%s %s)'), 4326)) ",
(row[3], row[4]),
)
if pcursor.rowcount == 0:
LOG.info(
"IEMID: %s ID: %s NETWORK: %s not within CWAs, calc dist",
iemid,
sid,
network,
)
pcursor.execute(
"SELECT wfo, ST_Distance(the_geom, "
" ST_SetSrid(ST_GeomFromEWKT('POINT(%s %s)'), 4326)) as dist "
"from cwa ORDER by dist ASC LIMIT 1",
(row[3], row[4]),
)
wfo, dist = pcursor.fetchone()
if dist > 3:
LOG.info(
" closest CWA %s found >3 degrees away %.2f",
wfo,
dist,
)
continue
else:
row2 = pcursor.fetchone()
wfo = row2[0][:3]
LOG.info(
"Assinging WFO: %s to IEMID: %s ID: %s NETWORK: %s",
wfo,
iemid,
sid,
network,
)
mcursor2.execute(
"UPDATE stations SET wfo = %s WHERE iemid = %s", (wfo, iemid)
)
mcursor.close()
mcursor2.close()
mesosite.commit()
mesosite.close()
if __name__ == "__main__":
main()
| akrherz/iem | scripts/dbutil/set_wfo.py | Python | mit | 2,127 |
from unittest import TestCase
import pandas as pd
from .test_trading_calendar import ExchangeCalendarTestBase
from zipline.utils.calendars.exchange_calendar_ice import ICEExchangeCalendar
class ICECalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = 'ice'
calendar_class = ICEExchangeCalendar
MAX_SESSION_HOURS = 22
def test_hurricane_sandy_one_day(self):
self.assertFalse(
self.calendar.is_session(pd.Timestamp("2012-10-29", tz='UTC'))
)
# ICE wasn't closed on day 2 of hurricane sandy
self.assertTrue(
self.calendar.is_session(pd.Timestamp("2012-10-30", tz='UTC'))
)
def test_2016_holidays(self):
# 2016 holidays:
# new years: 2016-01-01
# good friday: 2016-03-25
# christmas (observed): 2016-12-26
for date in ["2016-01-01", "2016-03-25", "2016-12-26"]:
self.assertFalse(
self.calendar.is_session(pd.Timestamp(date, tz='UTC'))
)
def test_2016_early_closes(self):
# 2016 early closes
# mlk: 2016-01-18
# presidents: 2016-02-15
# mem day: 2016-05-30
# independence day: 2016-07-04
# labor: 2016-09-05
# thanksgiving: 2016-11-24
for date in ["2016-01-18", "2016-02-15", "2016-05-30", "2016-07-04",
"2016-09-05", "2016-11-24"]:
dt = pd.Timestamp(date, tz='UTC')
self.assertTrue(dt in self.calendar.early_closes)
market_close = self.calendar.schedule.loc[dt].market_close
self.assertEqual(
13, # all ICE early closes are 1 pm local
market_close.tz_localize("UTC").tz_convert(
self.calendar.tz
).hour
)
| humdings/zipline | tests/calendars/test_ice_calendar.py | Python | apache-2.0 | 1,818 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2011, Max Leuthaeuser
License: GPL (see LICENSE.txt for details)
"""
__author__ = 'Max Leuthaeuser'
__license__ = 'GPL'
from abc import ABCMeta
class NamedElement:
'''
NamedElement defines a simple meta class for all elements
in a meta language based object oriented language.
'''
'''
This class is an abstract base class. You cannot instantiate this class.
Write subclasses and implement all methods marked with '@abstractmethod'.
@see http://docs.python.org/library/abc.html
'''
__metaclass__ = ABCMeta
_name = ""
_modifier = []
def __init__(self, name, modifier=[]):
'''
Constructor
@param name: the name as string of this named element
@param modifier: a list containing optional modifier
@raise ValueError: if name is None or empty
'''
self.set_name(name)
self.set_modifier(modifier)
def set_name(self, name):
'''
Set a name for this named element.
@precondition: name != None and name != ""
@postcondition: self._name = name
@param name: the name as string of this named element
@raise ValueError: if name is None or empty
'''
if name and name != "":
self._name = name
else:
raise ValueError('The name should not be None or empty!')
def get_name(self):
'''
@return: the name of this named element.
'''
return self._name
def set_modifier(self, modifier):
'''
Set optional modifier for this named element.
@precondition: modifier is a list of modifier
@postcondition: self._modifier = modifier
@param modifier: a list containing optional modifier
'''
self._modifier = modifier | max-leuthaeuser/naoservice | CodeGeneration/AbstractMetaModel/NamedElement.py | Python | gpl-3.0 | 1,929 |
# Generated by Django 3.2.7 on 2021-11-05 08:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('a4comments', '0008_comment_is_deleted_empty_text'),
]
operations = [
migrations.AddField(
model_name='comment',
name='is_blocked',
field=models.BooleanField(default=False),
),
]
| liqd/adhocracy4 | adhocracy4/comments/migrations/0009_comment_is_blocked.py | Python | agpl-3.0 | 406 |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import os
import unittest
import doctest
from itertools import chain
from lxml import etree
from decimal import Decimal
from io import BytesIO
from mock import Mock, patch
import trytond.tests.test_tryton
from trytond.tests.test_tryton import ModuleTestCase
from trytond.tests.test_tryton import DB_NAME, USER, CONTEXT
from trytond.transaction import Transaction
from trytond.exceptions import UserError
from trytond.modules.account_payment_sepa.payment import CAMT054
from trytond.pool import Pool
def setup_environment():
pool = Pool()
Address = pool.get('party.address')
Company = pool.get('company.company')
Currency = pool.get('currency.currency')
Party = pool.get('party.party')
Bank = pool.get('bank')
company, = Company.search([
('rec_name', '=', 'Dunder Mifflin'),
])
euro, = Currency.create([{
'name': 'Euro',
'symbol': 'EUR',
'code': 'EUR',
}])
company.currency = euro
company.party.sepa_creditor_identifier = 'BE68539007547034'
company.party.save()
company.save()
bank_party = Party(name='European Bank')
bank_party.save()
bank = Bank(party=bank_party, bic='BICODEBBXXX')
bank.save()
customer = Party(name='Customer')
address = Address(street='street', streetbis='street bis',
zip='1234', city='City')
customer.addresses = [address]
customer.save()
return {
'company': company,
'bank': bank,
'customer': customer,
}
def setup_accounts(bank, company, customer):
pool = Pool()
Account = pool.get('bank.account')
return Account.create([{
'bank': bank,
'owners': [('add', [company.party])],
'currency': company.currency.id,
'numbers': [('create', [{
'type': 'iban',
'number': 'ES8200000000000000000000',
}])]}, {
'bank': bank,
'owners': [('add', [customer])],
'currency': company.currency.id,
'numbers': [('create', [{
'type': 'iban',
'number': 'ES3600000000050000000001',
}])]}])
def setup_mandate(company, customer, account):
pool = Pool()
Mandate = pool.get('account.payment.sepa.mandate')
Date = pool.get('ir.date')
return Mandate.create([{
'company': company,
'party': customer,
'account_number': account.numbers[0],
'identification': 'MANDATE',
'type': 'recurrent',
'signature_date': Date.today(),
'state': 'validated',
}])[0]
def setup_journal(flavor, kind, company, account):
pool = Pool()
Journal = pool.get('account.payment.journal')
journal = Journal()
journal.name = flavor
journal.company = company
journal.currency = company.currency
journal.process_method = 'sepa'
journal.sepa_bank_account_number = account.numbers[0]
journal.sepa_payable_flavor = 'pain.001.001.03'
journal.sepa_receivable_flavor = 'pain.008.001.02'
setattr(journal, 'sepa_%s_flavor' % kind, flavor)
journal.save()
return journal
def validate_file(flavor, kind, xsd=None):
'Test generated files are valid'
pool = Pool()
Payment = pool.get('account.payment')
PaymentGroup = pool.get('account.payment.group')
Date = pool.get('ir.date')
ProcessPayment = pool.get('account.payment.process', type='wizard')
if xsd is None:
xsd = flavor
environment = setup_environment()
company = environment['company']
bank = environment['bank']
customer = environment['customer']
company_account, customer_account = setup_accounts(
bank, company, customer)
setup_mandate(company, customer, customer_account)
journal = setup_journal(flavor, kind, company, company_account)
payment, = Payment.create([{
'company': company,
'party': customer,
'journal': journal,
'kind': kind,
'amount': Decimal('1000.0'),
'state': 'approved',
'description': 'PAYMENT',
'date': Date.today(),
}])
session_id, _, _ = ProcessPayment.create()
process_payment = ProcessPayment(session_id)
with Transaction().set_context(active_ids=[payment.id]):
_, data = process_payment.do_process(None)
group, = PaymentGroup.browse(data['res_id'])
message, = group.sepa_messages
assert message.type == 'out', message.type
assert message.state == 'waiting', message.state
sepa_string = message.message.encode('utf-8')
sepa_xml = etree.fromstring(sepa_string)
schema_file = os.path.join(os.path.dirname(__file__),
'%s.xsd' % xsd)
schema = etree.XMLSchema(etree.parse(schema_file))
schema.assertValid(sepa_xml)
class AccountPaymentSepaTestCase(ModuleTestCase):
'Test Account Payment SEPA module'
module = 'account_payment_sepa'
def test_pain001_001_03(self):
'Test pain001.001.03 xsd validation'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
validate_file('pain.001.001.03', 'payable')
def test_pain001_001_05(self):
'Test pain001.001.05 xsd validation'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
validate_file('pain.001.001.05', 'payable')
def test_pain001_003_03(self):
'Test pain001.003.03 xsd validation'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
validate_file('pain.001.003.03', 'payable')
def test_pain008_001_02(self):
'Test pain008.001.02 xsd validation'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
validate_file('pain.008.001.02', 'receivable')
def test_pain008_001_04(self):
'Test pain008.001.04 xsd validation'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
validate_file('pain.008.001.04', 'receivable')
def test_pain008_003_02(self):
'Test pain008.003.02 xsd validation'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
validate_file('pain.008.003.02', 'receivable')
def test_sepa_mandate_sequence(self):
'Test SEPA mandate sequence'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
pool = Pool()
Configuration = pool.get('account.configuration')
Sequence = pool.get('ir.sequence')
Party = pool.get('party.party')
Mandate = pool.get('account.payment.sepa.mandate')
party = Party(name='Test')
party.save()
mandate = Mandate(party=party)
mandate.save()
self.assertFalse(mandate.identification)
sequence = Sequence(name='Test',
code='account.payment.sepa.mandate')
sequence.save()
config = Configuration(1)
config.sepa_mandate_sequence = sequence
config.save()
mandate = Mandate(party=party)
mandate.save()
self.assertTrue(mandate.identification)
def test_identification_unique(self):
'Test unique identification constraint'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
pool = Pool()
Party = pool.get('party.party')
Mandate = pool.get('account.payment.sepa.mandate')
same_id = '1'
party = Party(name='Test')
party.save()
mandate = Mandate(party=party, identification=same_id)
mandate.save()
for i in range(2):
mandate = Mandate(party=party)
mandate.save()
mandate = Mandate(party=party, identification='')
mandate.save()
self.assertEqual(mandate.identification, None)
Mandate.write([mandate], {
'identification': '',
})
self.assertEqual(mandate.identification, None)
self.assertRaises(UserError, Mandate.create, [{
'party': party.id,
'identification': same_id,
}])
def test_payment_sepa_bank_account_number(self):
'Test Payment.sepa_bank_account_number'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
pool = Pool()
Payment = pool.get('account.payment')
Mandate = pool.get('account.payment.sepa.mandate')
AccountNumber = pool.get('bank.account.number')
Party = pool.get('party.party')
BankAccount = pool.get('bank.account')
account_number = AccountNumber()
mandate = Mandate(account_number=account_number)
payment = Payment(kind='receivable', sepa_mandate=mandate)
self.assertEqual(id(payment.sepa_bank_account_number),
id(account_number))
other_account_number = AccountNumber(type='other')
iban_account_number = AccountNumber(type='iban')
bank_account = BankAccount(
numbers=[other_account_number, iban_account_number])
party = Party(
bank_accounts=[bank_account])
payment = Payment(kind='payable', party=party)
self.assertEqual(id(payment.sepa_bank_account_number),
id(iban_account_number))
def test_payment_sequence_type(self):
'Test payment sequence type'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
pool = Pool()
Date = pool.get('ir.date')
Payment = pool.get('account.payment')
ProcessPayment = pool.get('account.payment.process', type='wizard')
environment = setup_environment()
company = environment['company']
bank = environment['bank']
customer = environment['customer']
company_account, customer_account = setup_accounts(
bank, company, customer)
setup_mandate(company, customer, customer_account)
journal = setup_journal('pain.008.001.02', 'receivable',
company, company_account)
payment, = Payment.create([{
'company': company,
'party': customer,
'journal': journal,
'kind': 'receivable',
'amount': Decimal('1000.0'),
'state': 'approved',
'description': 'PAYMENT',
'date': Date.today(),
}])
session_id, _, _ = ProcessPayment.create()
process_payment = ProcessPayment(session_id)
with Transaction().set_context(active_ids=[payment.id]):
_, data = process_payment.do_process(None)
self.assertEqual(payment.sepa_mandate_sequence_type, 'FRST')
payments = Payment.create([{
'company': company,
'party': customer,
'journal': journal,
'kind': 'receivable',
'amount': Decimal('2000.0'),
'state': 'approved',
'description': 'PAYMENT',
'date': Date.today(),
}, {
'company': company,
'party': customer,
'journal': journal,
'kind': 'receivable',
'amount': Decimal('3000.0'),
'state': 'approved',
'description': 'PAYMENT',
'date': Date.today(),
},
])
session_id, _, _ = ProcessPayment.create()
process_payment = ProcessPayment(session_id)
payment_ids = [p.id for p in payments]
with Transaction().set_context(active_ids=payment_ids):
_, data = process_payment.do_process(None)
for payment in payments:
self.assertEqual(payment.sepa_mandate_sequence_type, 'RCUR')
def handle_camt054(self, flavor):
'Handle camt.054'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
pool = Pool()
Message = pool.get('account.payment.sepa.message')
message_file = os.path.join(os.path.dirname(__file__),
'%s.xml' % flavor)
message = open(message_file).read()
namespace = Message.get_namespace(message)
self.assertEqual(namespace,
'urn:iso:std:iso:20022:tech:xsd:%s' % flavor)
payment = Mock()
Payment = Mock()
Payment.search.return_value = [payment]
handler = CAMT054(BytesIO(message), Payment)
self.assertEqual(handler.msg_id, 'AAAASESS-FP-00001')
Payment.search.assert_called_with([
('sepa_end_to_end_id', '=', 'MUELL/FINP/RA12345'),
('kind', '=', 'payable'),
])
Payment.succeed.assert_called_with([payment])
payment.reset_mock()
Payment.reset_mock()
with patch.object(CAMT054, 'is_returned') as is_returned:
is_returned.return_value = True
handler = CAMT054(BytesIO(message), Payment)
Payment.save.assert_called_with([payment])
Payment.fail.assert_called_with([payment])
def test_camt054_001_01(self):
'Test camt.054.001.01 handling'
self.handle_camt054('camt.054.001.01')
def test_camt054_001_02(self):
'Test camt.054.001.02 handling'
self.handle_camt054('camt.054.001.02')
def test_camt054_001_03(self):
'Test camt.054.001.03 handling'
self.handle_camt054('camt.054.001.03')
def test_camt054_001_04(self):
'Test camt.054.001.04 handling'
self.handle_camt054('camt.054.001.04')
def test_sepa_mandate_report(self):
'Test sepa mandate report'
with Transaction().start(DB_NAME, USER, context=CONTEXT):
pool = Pool()
Report = pool.get('account.payment.sepa.mandate', type='report')
environment = setup_environment()
company = environment['company']
bank = environment['bank']
customer = environment['customer']
company_account, customer_account = setup_accounts(
bank, company, customer)
mandate = setup_mandate(company, customer, customer_account)
oext, content, _, _ = Report.execute([mandate.id], {})
self.assertEqual(oext, 'odt')
self.assertTrue(content)
def suite():
suite = trytond.tests.test_tryton.suite()
from trytond.modules.company.tests import test_company
from trytond.modules.account.tests import test_account
for test in chain(test_company.suite(), test_account.suite()):
if test not in suite and not isinstance(test, doctest.DocTestCase):
suite.addTest(test)
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(
AccountPaymentSepaTestCase))
return suite
| kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/account_payment_sepa/tests/test_account_payment_sepa.py | Python | gpl-3.0 | 15,736 |
# -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <[email protected]>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
from unittest import TestCase
from util.disk.sanitize import sanitize_filename
class TestSanitizeFilename(TestCase):
"""
NOTE: This class was lifted as-is from the "youtube-dl" project.
https://github.com/rg3/youtube-dl/blob/master/youtube_dl/test/test_utils.py
Commit: 5552c9eb0fece567f7dda13810939fca32d7d65a
"""
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYPssaaaaaaaeceeeeiiiionooooooooeuuuuuypy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
| jonasjberg/autonameow | tests/unit/test_util_disk_sanitize.py | Python | gpl-2.0 | 4,830 |
"""
"""
from nose.plugins import Plugin
from .notifications import Notification
class AlertPlugin(Plugin):
"""
Plugin that should fire os level notification after tests are finished.
"""
name = 'alert'
def get_notification(self, result):
"""
Returns ``Notification`` instance for given nosetest ``result``.
:param result: nosetest result that is passed to ``Plugin.finalize``
method at the end of tests
"""
return Notification(
fails=len(result.failures),
errors=len(result.errors),
total=result.testsRun,
)
def finalize(self, result):
"""
Shows notification about success or failure.
"""
notification = self.get_notification(result)
notification.send()
class WatchPlugin(Plugin):
"""
Plugin that use watchdog for continuos tests run.
"""
name = 'watch'
is_watching = False
def finalize(self, result):
import sys
from subprocess import call
argv = list(sys.argv)
argv.remove('--with-watch')
watchcmd = 'clear && ' + ' '.join(argv)
call(['watchmedo', 'shell-command', '-c', watchcmd, '-R', '-p', '*.py', '.'])
| lukaszb/nose-alert | nosealert/plugin.py | Python | bsd-2-clause | 1,243 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
from timeit import default_timer as timer
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.dataset.wmt16 as wmt16
os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0"
from parallel_executor_test_base import TestParallelExecutorBase
from test_parallel_executor_transformer import get_feed_data_reader, transformer
# NOTE(dzhwinter): test diferent strategy colisions.
# open the eager delete tensor strategy by default.
class TestTransformerWithIR(TestParallelExecutorBase):
def test_main(self):
if core.is_compiled_with_cuda():
# check python transpiler
self.check_network_convergence(
transformer,
use_cuda=True,
feed_data_reader=get_feed_data_reader(),
use_ir_memory_optimize=False,
iter=2)
# check IR memory optimize
self.check_network_convergence(
transformer,
use_cuda=True,
feed_data_reader=get_feed_data_reader(),
use_ir_memory_optimize=True,
iter=2)
if __name__ == '__main__':
unittest.main()
| chengduoZH/Paddle | python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py | Python | apache-2.0 | 1,817 |
# Prime Palindrome Challenge
# Solution 06: Using Sieve of Atkin
if __name__ == "__main__":
# Not Implemented yet
| madoodia/codeLab | python/codeeavl.com/Easy/Prime Palindrome/solution_06.py | Python | mit | 120 |
#!venv/bin/python3
# whats_on_tv.py - randomly selects media files to play from input directory
import sys
import os
import logging
import argparse
import random
import subprocess
import shlex
import re
import videoLister
import mff
def main():
# set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(filename='whats_on_tv.log', filemode='w', level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# set up argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='Input directory to search.')
# Set up custom parser for viewing time range.
def parseNumList(string):
"""
https://stackoverflow.com/questions/6512280/accept-a-range-of-numbers-in-the-form-of-0-5-using-pythons-argparse
"""
m = re.match(r'(\d+)(?:-(\d+))?$', string)
start = m.group(1)
end = m.group(2) or start
logger.debug("List of values: {}".format([int(start), int(end)]))
if start == end:
return [int(start)]
else:
return [int(start), int(end)]
parser.add_argument('-t', '--time', help='Viewing time in minutes', type=parseNumList, default=[300])
parser.add_argument('-n', '--num', help='Number of videos to queue', type=int, default=1)
parser.add_argument('-s', '--search', help='String to search for', type=str, default="")
args = parser.parse_args()
if args.input:
in_path = args.input
else:
parser.print_help()
sys.exit(1)
# One time loop to generate a list of available media files in path
m_list = []
for item in videoLister.videoDir(in_path, args.search):
logger.debug("Found: {}".format(item)) # Can write really long log files
m_list.append(item)
# Check that we matched at least args.num
if len(m_list) == 0:
print("Search term not found, exiting...")
raise SystemExit
elif len(m_list) < args.num:
print("Number of matches found: {}, fewer than number to queue, exiting...".format(len(m_list)))
raise SystemExit
# set min/max durations
if len(args.time) >= 2:
duration_max = args.time[-1]
duration_min = args.time[0]
else:
duration_max = args.time[0]
duration_min = 0
# Randomly select a video to play
random.seed()
p_list = []
for x in range(args.num):
duration = duration_max + 1 # Fix this, its hacky to get the loop to run...
while duration not in range(duration_min, duration_max): # Find a file with a duration in the allotted range.
choice = random.choice(m_list)
m_list.remove(choice) # remove the choice from the list
m_file = mff.format_info(choice) # get file details
duration = round(float(m_file['duration']) / 60) # convert to integer minutes
logger.info("Selected: {}".format(os.path.basename(choice)))
logger.info("Running time: {} min".format(duration))
logger.info("Added to playlist: {}".format(os.path.basename(choice)))
p_list.append(choice)
logger.info("Playlist: {}".format(p_list))
# Launch selected video with MPV in full screen
play_command = 'mpv {} --really-quiet --fs &'.format(' '.join('"{}"'.format(p) for p in p_list))
proc = subprocess.Popen(shlex.split(play_command))
# use proc.terminate() to kill
if __name__ == '__main__':
main()
| samcheck/PyMedia | whats_on_tv.py | Python | mit | 3,488 |
from robot.utils import normalizing
def _int_any_base(i, base=0):
try:
return int(i, base)
except TypeError:
return i
def int_any_base(i, base=0):
try:
return _int_any_base(i, base)
except ValueError:
raise RuntimeError('Could not parse integer "%s"' % i)
def list_any_input(data):
"""Convert any input to list.
data can be:
basestring: '0x10 0x22 0xab' => [16, 32, 171]
list: [16, 32, 171] => [16, 32, 171]
value: 16 => [16]
value: 0x10 => [16]
"""
if isinstance(data, basestring):
data = [_int_any_base(d) for d in data.split(' ')]
elif isinstance(data, tuple):
data = [d for d in data]
elif isinstance(data, list):
data = data
else:
data = [_int_any_base(data)]
return data
| kontron/robotframework-aardvarklibrary | src/AardvarkLibrary/utils.py | Python | apache-2.0 | 832 |
import yaml
from pydrive2.files import GoogleDriveFile
with open('config/settings.yaml', 'r') as yaml_config:
config_map = yaml.load(yaml_config, Loader=yaml.SafeLoader)
def delete_drive_permission_job(context):
job_data = context.job.context
drive_root_folder: GoogleDriveFile = job_data['folder_obj']
permission = job_data['permission_obj']
request_message = job_data['request_message']
drive_root_folder.DeletePermission(permission['id'])
context.bot.send_message(
chat_id=config_map["dev_group_chatid"],
text=f"Permessi ritirati",
reply_to_message_id=request_message.message_id,
)
| UNICT-DMI/Telegram-DMI-Bot | module/utils/drive_contribute_utils.py | Python | gpl-3.0 | 648 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component using the time service.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
try:
now = yield self.call(u'com.timeservice.now')
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
self.leave()
def onDisconnect(self):
print("disconnected")
reactor.stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| hzruandd/AutobahnPython | examples/twisted/wamp/rpc/timeservice/frontend.py | Python | mit | 2,292 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'DocumentSetFormEntry.document'
db.alter_column(u'crowdataapp_documentsetformentry', 'document_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['crowdataapp.Document']))
def backwards(self, orm):
# Changing field 'DocumentSetFormEntry.document'
db.alter_column(u'crowdataapp_documentsetformentry', 'document_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['crowdataapp.Document']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'crowdataapp.document': {
'Meta': {'object_name': 'Document'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['crowdataapp.DocumentSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': "'512'"})
},
u'crowdataapp.documentset': {
'Meta': {'object_name': 'DocumentSet'},
'entries_threshold': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'template_function': ('django.db.models.fields.TextField', [], {'default': "'// Javascript function to insert the document into the DOM.\\n// Receives the URL of the document as its only parameter.\\n// Must be called insertDocument\\n// JQuery is available\\n// resulting element should be inserted into div#document-viewer-container\\nfunction insertDocument(document_url) {\\n}\\n'"})
},
u'crowdataapp.documentsetfieldentry': {
'Meta': {'object_name': 'DocumentSetFieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetFormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'})
},
u'crowdataapp.documentsetform': {
'Meta': {'object_name': 'DocumentSetForm'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'form'", 'unique': 'True', 'to': u"orm['crowdataapp.DocumentSet']"}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'crowdataapp.documentsetformentry': {
'Meta': {'object_name': 'DocumentSetFormEntry'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'form_entries'", 'null': 'True', 'to': u"orm['crowdataapp.Document']"}),
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'crowdataapp.documentsetformfield': {
'Meta': {'object_name': 'DocumentSetFormField'},
'autocomplete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'field_type': ('django.db.models.fields.IntegerField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['crowdataapp'] | the-engine-room/crowdata | crowdataapp/migrations/0004_auto__chg_field_documentsetformentry_document.py | Python | mit | 10,383 |
# -*- coding: utf-8 -*-
from decimal import Decimal
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from fuzzy_modeling.models.norms import NormModel
from fuzzy_modeling.utils import get_class_by_python_path
from fuzzy_modeling.models.adjectives import AdjectiveModel
from fuzzy_modeling.models.utils import PyFuzzyMixin
class OperatorModel(MPTTModel, PyFuzzyMixin):
"""
A Fuzzy Operator base model
this one handles all operators types
"""
class Meta:
app_label = 'fuzzy_modeling'
class MPTTMeta:
parent_attr = 'compound_inputs'
# order_insertion_by = ['id']
OPERATOR_TYPES = (
('fuzzy.operator.Compound.Compound', _('Compound')),
('fuzzy.operator.Const.Const', _('Const')),
('fuzzy.operator.Input.Input', _('Input')),
('fuzzy.operator.Not.Not', _('Not')),
)
operator_type = models.CharField(_("Operator Type"),
choices=OPERATOR_TYPES,
max_length=250,
blank=False, null=False,
default=OPERATOR_TYPES[0][0]
)
compound_norm = models.ForeignKey(NormModel, null=True, blank=True)
compound_inputs = TreeForeignKey(
'self',
related_name="compound_inputs_children",
null=True,
blank=True,
verbose_name=_('Input')
)
const_value = models.DecimalField(
_("Constant Value"),
max_digits=10,
decimal_places=2,
default=float("0"),
blank=True,
null=True
)
input_adjective = models.ForeignKey(AdjectiveModel, blank=True, null=True)
not_input = models.ForeignKey('self', blank=True, null=True, related_name='not_input_set')
def _get_adj_instance(self, system):
"""
Return an existing instance of the adjective that is been used in this operator.
Pyfuzzy needs the instances to be the same, so that when the inference is
runned it will keep consistent.
"""
if self.input_adjective.ovar:
var_model = self.input_adjective.ovar
else:
var_model = self.input_adjective.ivar
var = system.variables[var_model.name]
adj = var.adjectives[self.input_adjective.name]
return adj
def _get_pyfuzzy_compound(self, system=None):
"""
Return the Pyfuzzy class of this model for the Compound type
"""
Compound = get_class_by_python_path(self.operator_type)
norm = self.compound_norm.get_pyfuzzy()
inputs = [op.get_pyfuzzy(system=system) for op in self.get_children()]
compound = Compound(norm, *inputs)
return compound
def _get_pyfuzzy_const(self):
"""
Return the Pyfuzzy class of this model for the Const type
"""
Const = get_class_by_python_path(self.operator_type)
value = self.const_value
const = Const(value)
return const
def _get_pyfuzzy_input(self, system=None):
"""
Return the Pyfuzzy class of this model for the Input type
"""
Input = get_class_by_python_path(self.operator_type)
# try:
adjective = self._get_adj_instance(system)
# except:
# adjective = self.input_adjective.get_pyfuzzy()
input_op = Input(adjective)
return input_op
def _get_pyfuzzy_not(self, system=None):
"""
Return the Pyfuzzy class of this model for the Not type
"""
Not = get_class_by_python_path(self.operator_type)
op = self.not_input.get_pyfuzzy(system=system)
not_op = Not(op)
return not_op
def get_pyfuzzy(self, system=None):
"""
Return the Pyfuzzy class of this model
"""
if self.operator_type == 'fuzzy.operator.Compound.Compound':
return self._get_pyfuzzy_compound(system=system)
if self.operator_type == 'fuzzy.operator.Const.Const':
return self._get_pyfuzzy_const()
if self.operator_type == 'fuzzy.operator.Input.Input':
return self._get_pyfuzzy_input(system=system)
if self.operator_type == 'fuzzy.operator.Not.Not':
return self._get_pyfuzzy_not(system=system)
return None
@classmethod
def _from_pyfuzzy_compound(cls, pyfuzzy, system=None, systemModel=None):
op_model = cls(operator_type='fuzzy.operator.Compound.Compound')
# norm
norm_model = cls.compound_norm.field.related.parent_model.from_pyfuzzy(pyfuzzy.norm)
op_model.compound_norm = norm_model
op_model.save()
# inputs
for op_input in pyfuzzy.inputs:
op_i_model = cls.from_pyfuzzy(op_input, system=system, systemModel=systemModel)
op_model.compound_inputs_children.add(op_i_model)
op_model.save()
return op_model
@classmethod
def _from_pyfuzzy_const(cls, pyfuzzy):
op_model = cls(operator_type='fuzzy.operator.Const.Const', const_value=pyfuzzy.value)
op_model.save()
return op_model
@classmethod
def _from_pyfuzzy_input(cls, pyfuzzy, system=None, systemModel=None):
op_model = cls(operator_type='fuzzy.operator.Input.Input')
# adj
# try:
adj_model = cls.input_adjective.field.related.parent_model._get_existing_adjective_model(system, systemModel, pyfuzzy.adjective)
# except:
# adj_model = cls.input_adjective.field.related.parent_model.from_pyfuzzy(pyfuzzy.adjective)
op_model.input_adjective = adj_model
op_model.save()
return op_model
@classmethod
def _from_pyfuzzy_not(cls, pyfuzzy, system=None, systemModel=None):
op_model = cls(operator_type='fuzzy.operator.Not.Not')
# operator
op_not_model = cls.not_input.field.related.parent_model.from_pyfuzzy(pyfuzzy.input, system=system, systemModel=systemModel)
op_model.not_input = op_not_model
op_model.save()
return op_model
@classmethod
def from_pyfuzzy(cls, pyfuzzy, system=None, systemModel=None):
"""
Return the model representation of an instance of the pyfuzzy attr
"""
if pyfuzzy.__class__.__name__ == 'Compound':
return cls._from_pyfuzzy_compound(pyfuzzy, system=system, systemModel=systemModel)
if pyfuzzy.__class__.__name__ == 'Const':
return cls._from_pyfuzzy_const(pyfuzzy)
if pyfuzzy.__class__.__name__ == 'Input':
return cls._from_pyfuzzy_input(pyfuzzy, system=system, systemModel=systemModel)
if pyfuzzy.__class__.__name__ == 'Not':
return cls._from_pyfuzzy_not(pyfuzzy, system=system, systemModel=systemModel)
def __unicode__(self):
return "%s - %s" % (self.get_operator_type_display(), self.id)
| arruda/cloudfuzzy | fuzzy_modeling/models/operators.py | Python | mit | 6,898 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.