code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import Confidential
message = Confidential('top secret text')
secret_field = Confidential.getDeclaredField('secret')
secret_field.setAccessible(True) # break the lock!
print 'message.secret =', secret_field.get(message)
| YuxuanLing/trunk | trunk/code/study/python/Fluent-Python-example-code/09-pythonic-obj/private/expose.py | Python | gpl-3.0 | 228 |
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import notify_attachment_update
from zerver.lib.attachments import access_attachment_by_id, remove_attachment, user_attachments
from zerver.lib.response import json_success
from zerver.models import UserProfile
def list_by_user(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success(
{
"attachments": user_attachments(user_profile),
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
)
def remove(request: HttpRequest, user_profile: UserProfile, attachment_id: str) -> HttpResponse:
attachment = access_attachment_by_id(user_profile, int(attachment_id), needs_owner=True)
remove_attachment(user_profile, attachment)
notify_attachment_update(user_profile, "remove", {"id": int(attachment_id)})
return json_success()
| eeshangarg/zulip | zerver/views/attachments.py | Python | apache-2.0 | 917 |
#!/usr/bin/env python
# test_copy.py - unit test for COPY support
#
# Copyright (C) 2010-2011 Daniele Varrazzo <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import sys
import string
from testutils import unittest, ConnectingTestCase, decorate_all_tests
from testutils import skip_if_no_iobase, skip_before_postgres
from cStringIO import StringIO
from itertools import cycle, izip
import psycopg2
import psycopg2.extensions
from testutils import skip_copy_if_green
if sys.version_info[0] < 3:
_base = object
else:
from io import TextIOBase as _base
class MinimalRead(_base):
"""A file wrapper exposing the minimal interface to copy from."""
def __init__(self, f):
self.f = f
def read(self, size):
return self.f.read(size)
def readline(self):
return self.f.readline()
class MinimalWrite(_base):
"""A file wrapper exposing the minimal interface to copy to."""
def __init__(self, f):
self.f = f
def write(self, data):
return self.f.write(data)
class CopyTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self._create_temp_table()
def _create_temp_table(self):
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE tcopy (
id serial PRIMARY KEY,
data text
)''')
def test_copy_from(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={})
finally:
curs.close()
def test_copy_from_insane_size(self):
# Trying to trigger a "would block" error
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=10*1024, srec=10*1024,
copykw={'size': 20*1024*1024})
finally:
curs.close()
def test_copy_from_cols(self):
curs = self.conn.cursor()
f = StringIO()
for i in xrange(10):
f.write("%s\n" % (i,))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", columns=['id'])
curs.execute("select * from tcopy order by id")
self.assertEqual([(i, None) for i in range(10)], curs.fetchall())
def test_copy_from_cols_err(self):
curs = self.conn.cursor()
f = StringIO()
for i in xrange(10):
f.write("%s\n" % (i,))
f.seek(0)
def cols():
raise ZeroDivisionError()
yield 'id'
self.assertRaises(ZeroDivisionError,
curs.copy_from, MinimalRead(f), "tcopy", columns=cols())
def test_copy_to(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={})
self._copy_to(curs, srec=10*1024)
finally:
curs.close()
@skip_if_no_iobase
def test_copy_text(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, range(32, 127) + range(160, 256)))
about = abin.decode('latin1').replace('\\', '\\\\')
else:
abin = bytes(range(32, 127) + range(160, 256)).decode('latin1')
about = abin.replace('\\', '\\\\')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.StringIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_bytes(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, range(32, 127) + range(160, 255)))
about = abin.replace('\\', '\\\\')
else:
abin = bytes(range(32, 127) + range(160, 255)).decode('latin1')
about = abin.replace('\\', '\\\\').encode('latin1')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.BytesIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_expert_textiobase(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, range(32, 127) + range(160, 256)))
abin = abin.decode('latin1')
about = abin.replace('\\', '\\\\')
else:
abin = bytes(range(32, 127) + range(160, 256)).decode('latin1')
about = abin.replace('\\', '\\\\')
import io
f = io.StringIO()
f.write(about)
f.seek(0)
curs = self.conn.cursor()
psycopg2.extensions.register_type(
psycopg2.extensions.UNICODE, curs)
curs.copy_expert('COPY tcopy (data) FROM STDIN', f)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
f = io.StringIO()
curs.copy_expert('COPY tcopy (data) TO STDOUT', f)
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
# same tests with setting size
f = io.StringIO()
f.write(about)
f.seek(0)
exp_size = 123
# hack here to leave file as is, only check size when reading
real_read = f.read
def read(_size, f=f, exp_size=exp_size):
self.assertEqual(_size, exp_size)
return real_read(_size)
f.read = read
curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
def _copy_from(self, curs, nrecs, srec, copykw):
f = StringIO()
for i, c in izip(xrange(nrecs), cycle(string.ascii_letters)):
l = c * srec
f.write("%s\t%s\n" % (i,l))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", **copykw)
curs.execute("select count(*) from tcopy")
self.assertEqual(nrecs, curs.fetchone()[0])
curs.execute("select data from tcopy where id < %s order by id",
(len(string.ascii_letters),))
for i, (l,) in enumerate(curs):
self.assertEqual(l, string.ascii_letters[i] * srec)
def _copy_to(self, curs, srec):
f = StringIO()
curs.copy_to(MinimalWrite(f), "tcopy")
f.seek(0)
ntests = 0
for line in f:
n, s = line.split()
if int(n) < len(string.ascii_letters):
self.assertEqual(s, string.ascii_letters[int(n)] * srec)
ntests += 1
self.assertEqual(ntests, len(string.ascii_letters))
def test_copy_expert_file_refcount(self):
class Whatever(object):
pass
f = Whatever()
curs = self.conn.cursor()
self.assertRaises(TypeError,
curs.copy_expert, 'COPY tcopy (data) FROM STDIN', f)
def test_copy_no_column_limit(self):
cols = [ "c%050d" % i for i in range(200) ]
curs = self.conn.cursor()
curs.execute('CREATE TEMPORARY TABLE manycols (%s)' % ',\n'.join(
[ "%s int" % c for c in cols]))
curs.execute("INSERT INTO manycols DEFAULT VALUES")
f = StringIO()
curs.copy_to(f, "manycols", columns = cols)
f.seek(0)
self.assertEqual(f.read().split(), ['\\N'] * len(cols))
f.seek(0)
curs.copy_from(f, "manycols", columns = cols)
curs.execute("select count(*) from manycols;")
self.assertEqual(curs.fetchone()[0], 2)
@skip_before_postgres(8, 2) # they don't send the count
def test_copy_rowcount(self):
curs = self.conn.cursor()
curs.copy_from(StringIO('aaa\nbbb\nccc\n'), 'tcopy', columns=['data'])
self.assertEqual(curs.rowcount, 3)
curs.copy_expert(
"copy tcopy (data) from stdin",
StringIO('ddd\neee\n'))
self.assertEqual(curs.rowcount, 2)
curs.copy_to(StringIO(), "tcopy")
self.assertEqual(curs.rowcount, 5)
curs.execute("insert into tcopy (data) values ('fff')")
curs.copy_expert("copy tcopy to stdout", StringIO())
self.assertEqual(curs.rowcount, 6)
def test_copy_rowcount_error(self):
curs = self.conn.cursor()
curs.execute("insert into tcopy (data) values ('fff')")
self.assertEqual(curs.rowcount, 1)
self.assertRaises(psycopg2.DataError,
curs.copy_from, StringIO('aaa\nbbb\nccc\n'), 'tcopy')
self.assertEqual(curs.rowcount, -1)
decorate_all_tests(CopyTests, skip_copy_if_green)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| kylelwm/ponus | venv/build/psycopg2/tests/test_copy.py | Python | mit | 9,939 |
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Runs tests for the current model and adapter """
from diplomacy_research.models.policy.tests.policy_adapter_test_setup import PolicyAdapterTestSetup
from diplomacy_research.models.policy.token_based import PolicyAdapter, BaseDatasetBuilder
from diplomacy_research.models.policy.token_based.v001_markovian_no_film import PolicyModel, load_args
from diplomacy_research.models.value.v001_val_relu_7 import ValueModel, load_args as load_value_args
from diplomacy_research.models.self_play.algorithms.a2c import Algorithm as A2CAlgo, load_args as a2c_args
from diplomacy_research.models.self_play.algorithms.ppo import Algorithm as PPOAlgo, load_args as ppo_args
from diplomacy_research.models.self_play.algorithms.reinforce import Algorithm as ReinforceAlgo,\
load_args as reinforce_args
from diplomacy_research.models.self_play.algorithms.tests.algorithm_test_setup import AlgorithmSetup
from diplomacy_research.utils.process import run_in_separate_process
# ----------- Testable Class --------------
class BaseTestClass(AlgorithmSetup):
""" Tests the algorithm """
def __init__(self, algorithm_ctor, algo_load_args):
""" Constructor """
AlgorithmSetup.__init__(self, algorithm_ctor, algo_load_args, 'token_based')
def get_policy_model(self):
""" Returns the PolicyModel """
return PolicyModel
def get_policy_builder(self):
""" Returns the Policy's BaseDatasetBuilder """
return BaseDatasetBuilder
def get_policy_adapter(self):
""" Returns the PolicyAdapter """
return PolicyAdapter
def get_policy_load_args(self):
""" Returns the policy args """
return load_args()
# ----------- Launch Scripts --------------
def launch_a2c():
""" Launches tests for a2c """
test_object = BaseTestClass(A2CAlgo, a2c_args)
test_object.run_tests()
def launch_ppo():
""" Launches tests for ppo """
test_object = BaseTestClass(PPOAlgo, ppo_args)
test_object.run_tests()
def launch_reinforce():
""" Launches tests for reinforce """
test_object = BaseTestClass(ReinforceAlgo, reinforce_args)
test_object.run_tests()
def launch_adapter():
""" Launches the tests """
testable_class = PolicyAdapterTestSetup(policy_model_ctor=PolicyModel,
value_model_ctor=ValueModel,
draw_model_ctor=None,
dataset_builder=BaseDatasetBuilder(),
policy_adapter_ctor=PolicyAdapter,
load_policy_args=load_args,
load_value_args=load_value_args,
load_draw_args=None,
strict=False)
testable_class.run_tests()
# ----------- Tests --------------
def test_run_a2c():
""" Runs the a2c test """
run_in_separate_process(target=launch_a2c, timeout=240)
def test_run_ppo():
""" Runs the ppo test """
run_in_separate_process(target=launch_ppo, timeout=240)
def test_run_reinforce():
""" Runs the reinforce test """
run_in_separate_process(target=launch_reinforce, timeout=240)
def test_run_adapter():
""" Runs the adapter test """
run_in_separate_process(target=launch_adapter, timeout=240)
| diplomacy/research | diplomacy_research/models/policy/token_based/v001_markovian_no_film/tests/test_model.py | Python | mit | 4,189 |
#!/usr/bin/env python
'''
File: dir-inventory.py
Author: Jonas Gorauskas [JGG]
Created: 2011-01-07 11:35:52
Modified: 2011-01-07 11:38:56
Description:
This script will recursively list the information about
files inside a root folder that is passed in as a parameter
History:
2011-01-07 11:38:56 - JGG
Initial version
'''
import os
import sys
import time
import stat
import argparse
def main():
'''Main entry point for the script'''
args = init()
outlst = ['FilePath,FileSizeBytes,CreatedDate,ModifiedDate'] # Column headers
flst = walk_tree(args.root_folder, visit_file, outlst) # Get list of file information
data = '\n'.join(['%s' % item for item in flst]) # Convert list to string data
save_to_file(data, args.output_file)
def init():
'''Parse the command line parameters'''
parser = argparse.ArgumentParser(description='Recursively lists the information about files inside a root folder that is passed in as a parameter')
parser.add_argument('-V', '--version', action='version',
version='%(prog)s 0.1.0 - written by Jonas Gorauskas')
parser.add_argument('-r', '--root', required=True,
help='The path to the folder to be inventoried',
metavar='FULL_PATH_TO_FOLDER', dest='root_folder')
parser.add_argument('-o', '--output', required=True,
help='The path to the output file that will hold the data',
metavar='FULL_PATH_TO_OUTPUT_FILE', dest='output_file')
return parser.parse_args()
def walk_tree(root_path, file_callback, out_list):
'''Recursively walks down a directory tree and gather information about
each file into a list'''
for item in os.listdir(root_path):
itempath = os.path.join(root_path, item)
itemmode = os.stat(itempath)[stat.ST_MODE]
if stat.S_ISDIR(itemmode): #this is a folder: recurse
walk_tree(itempath, file_callback, out_list)
elif stat.S_ISREG(itemmode): #this is a regular file
out_list.append(file_callback(itempath))
else: #this is something else: skip it
pass
return out_list
def visit_file(filepath):
'''Gathers information about a single file in comma delimited format'''
str_format = '"%s",%d,"%s","%s"'
fileinfo = os.stat(filepath)
created = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(fileinfo[stat.ST_CTIME]))
modified = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(fileinfo[stat.ST_MTIME]))
return str_format % (filepath, fileinfo[stat.ST_SIZE], created, modified)
def save_to_file(data, filepath):
'''Saves the data to file path'''
f = open(filepath, 'w')
f.write(data)
f.close()
if __name__ == '__main__':
main()
| gorauskas/Hacks | python/dir-inventory.py | Python | mit | 3,010 |
# LICENSE: AGPL 3.0
# Author: Name: Davide, Surname: Setti, email: [email protected]
# Copyright: Fondazione Bruno Kessler (www.fbk.eu), 2008-2010
from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
(r'^$', 'django.views.generic.simple.redirect_to', {'url': '/meetings/'}),
(r'^meetings/', include('nevede.meetings.urls')),
(r'^comments/', include('django.contrib.comments.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^meetings/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| vad/django-nevede | nevede/urls.py | Python | agpl-3.0 | 1,032 |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
first = head
second = head.next if first != None else None
while first and second:
first_val = first.val
second_val = second.val
if first_val == second_val:
if second.next == None:
first.next = None
break
second = second.next
else:
first.next = second
second = second.next
first = first.next
return head
| scream7/leetcode | algorithms/python/83.py | Python | apache-2.0 | 799 |
# -*- coding: utf-8 -*-
# Third Party
import pytest
from django.core.urlresolvers import reverse
# AskCoding
from askcoding.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
@pytest.fixture
def user():
return UserFactory.create()
def test_get_user_profile_non_logged_in(client):
url = reverse('account_profile')
response = client.get(url)
assert response.status_code == 200
assert 'login' in response.content.decode('utf-8')
def test_get_user_profile_logged_in(client, user):
url = reverse('account_profile')
client.login(email=user.email, password='test')
response = client.get(url)
assert response.status_code == 200
assert user.name in response.content.decode('utf-8')
def test_get_user_profile_update(client, user):
url = reverse('account_profile_update')
client.login(email=user.email, password='test')
response = client.get(url)
assert response.status_code == 200
assert user.name in response.content.decode('utf-8')
def test_post_user_profile_update(client, user):
url = reverse('account_profile_update')
client.login(email=user.email, password='test')
new_name = 'something new'
data = {
'name': new_name,
}
response = client.post(url, data, follow=True)
assert response.status_code == 200
assert new_name in response.content.decode('utf-8')
| akarambir/askcoding | askcoding/users/tests/test_views.py | Python | mit | 1,389 |
# Using the following encoding: utf-8
# Python 2
from ConfigParser import ConfigParser, NoOptionError
# End Python 2
# Python 3
# from configparser import ConfigParser, NoOptionError
# End Python 3
import requests
import os
import shutil
import fnmatch
import time
import getpass
import itertools
from ltk import exceptions
from ltk.apicalls import ApiCalls
from ltk.utils import *
from ltk.managers import DocumentManager, FolderManager
from ltk.constants import CONF_DIR, CONF_FN, SYSTEM_FILE, ERROR_FN
import json
from ltk.logger import logger
from ltk.git_auto import Git_Auto
from tabulate import tabulate
class Action:
def __init__(self, path, watch=False, timeout=60):
self.host = ''
self.access_token = ''
self.project_id = ''
self.project_name = ''
self.path = path
self.community_id = ''
self.workflow_id = '' # default workflow id; MT phase only
self.locale = ''
self.clone_option = 'on'
self.auto_format_option = ''
self.download_option = 'clone'
self.download_dir = None # directory where downloaded translation will be stored
self.watch_locales = set() # if specified, add these target locales to any files in the watch folder
self.git_autocommit = None
self.git_username = ''
self.git_password = ''
self.append_option = 'none'
self.locale_folders = {}
if not self._is_initialized():
raise exceptions.UninitializedError("This project is not initialized. Please run init command.")
self._initialize_self()
self.watch = watch
self.doc_manager = DocumentManager(self.path)
self.folder_manager = FolderManager(self.path)
self.timeout = timeout
self.api = ApiCalls(self.host, self.access_token, self.watch, self.timeout)
self.git_auto = Git_Auto(self.path)
self.error_file_name = os.path.join(self.path, CONF_DIR, ERROR_FN)
def _is_initialized(self):
actual_path = find_conf(self.path)
if not actual_path:
return False
self.path = os.path.join(actual_path, '')
if not is_initialized(self.path):
return False
return True
def _initialize_self(self):
config_file_name = os.path.join(self.path, CONF_DIR, CONF_FN)
conf_parser = ConfigParser()
conf_parser.read(config_file_name)
self.host = conf_parser.get('main', 'host')
self.access_token = conf_parser.get('main', 'access_token')
self.project_id = conf_parser.get('main', 'project_id')
self.community_id = conf_parser.get('main', 'community_id')
self.workflow_id = conf_parser.get('main', 'workflow_id')
self.locale = conf_parser.get('main', 'default_locale')
self.locale = self.locale.replace('_','-')
try:
if conf_parser.has_option('main', 'auto_format'):
self.auto_format_option = conf_parser.get('main', 'auto_format')
else:
self.update_config_file('auto_format', 'on', conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'project_name'):
self.project_name = conf_parser.get('main', 'project_name')
if conf_parser.has_option('main', 'download_folder'):
self.download_dir = conf_parser.get('main', 'download_folder')
else:
self.download_dir = None
self.update_config_file('download_folder', json.dumps(self.download_dir), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'watch_locales'):
watch_locales = conf_parser.get('main', 'watch_locales')
if watch_locales:
self.watch_locales = set(watch_locales.split(','))
else:
# there are no watch locales, so set it to an empty set
self.watch_locales = set()
else:
self.watch_locales = set()
self.update_config_file('watch_locales', json.dumps(list(self.watch_locales)), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'locale_folders'):
self.locale_folders = json.loads(conf_parser.get('main', 'locale_folders'))
locale_folders = {}
#for key, value in self.locale_folders.items():
# key = key.replace('_', '-');
# locale_folders[key] = value
#self.locale_folders = locale_folders
else:
self.locale_folders = {}
self.update_config_file('locale_folders', json.dumps(self.locale_folders), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'download_option'):
self.download_option = conf_parser.get('main', 'download_option')
else:
self.download_option = 'clone'
self.update_config_file('download_option', self.download_option, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'clone_option'):
self.clone_option = conf_parser.get('main', 'clone_option')
else:
self.clone_option = 'on'
self.update_config_file('clone_option', self.clone_option, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_autocommit'):
self.git_autocommit = conf_parser.get('main', 'git_autocommit')
else:
self.git_autocommit = ''
self.update_config_file('git_autocommit', self.git_autocommit, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_username'):
self.git_username = conf_parser.get('main', 'git_username')
else:
self.git_username = ''
self.update_config_file('git_username', self.git_username, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_password'):
self.git_password = conf_parser.get('main', 'git_password')
else:
self.git_password = ''
self.update_config_file('git_password', self.git_password, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'append_option'):
self.append_option = conf_parser.get('main', 'append_option')
else:
self.append_option = 'none'
self.update_config_file('append_option', self.append_option, conf_parser, config_file_name, "")
except NoOptionError as e:
if not self.project_name:
self.api = ApiCalls(self.host, self.access_token)
project_info = self.api.get_project_info(self.community_id)
self.project_name = project_info[self.project_id]
config_file_name, conf_parser = self.init_config_file()
log_info = 'Updated project name'
self.update_config_file('project_name', self.project_name, conf_parser, config_file_name, log_info)
def _add_document(self, file_name, title, doc_id):
""" adds a document to db """
now = time.time()
# doc_id = json['properties']['id']
full_path = os.path.join(self.path, file_name)
last_modified = os.stat(full_path).st_mtime
self.doc_manager.add_document(title, now, doc_id, last_modified, now, file_name)
def _update_document(self, file_name):
""" updates a document in the db """
now = time.time()
file_path = os.path.join(self.path, file_name)
# sys_last_modified = os.stat(file_name).st_mtime
sys_last_modified = os.stat(file_path).st_mtime
entry = self.doc_manager.get_doc_by_prop('file_name', file_name)
doc_id = entry['id']
self.doc_manager.update_document('last_mod', now, doc_id)
self.doc_manager.update_document('sys_last_mod', sys_last_modified, doc_id)
# whenever a document is updated, it should have new translations
self.doc_manager.update_document('downloaded', [], doc_id)
def close(self):
self.doc_manager.close_db()
def open(self):
self.doc_manager.open_db()
def init_config_file(self):
config_file_name = os.path.join(self.path, CONF_DIR, CONF_FN)
conf_parser = ConfigParser()
conf_parser.read(config_file_name)
return config_file_name, conf_parser
def update_config_file(self, option, value, conf_parser, config_file_name, log_info):
try:
conf_parser.set('main', option, value)
with open(config_file_name, 'w') as new_file:
conf_parser.write(new_file)
self._initialize_self()
if (len(log_info)):
logger.info(log_info+"\n")
except IOError as e:
print(e.errno)
print(e)
def get_relative_path(self, path):
return get_relative_path(self.path, path)
def get_current_path(self, path):
cwd = os.getcwd()
if cwd in path:
path = path.replace(cwd,"")
return path
else:
cwd_relative_path = cwd.replace(self.path,"")
return path.replace(cwd_relative_path+os.sep,"")
def get_current_abs(self, path):
# print("orig path: "+str(path))
cwd = os.getcwd()
if cwd in path:
path = path.replace(cwd,"")
else:
# print("cwd: "+cwd)
# print("self.path: "+self.path)
cwd_relative_path = cwd.replace(self.path,"")
# print("cwd relative path: "+cwd_relative_path)
cwd_path = path.replace(cwd_relative_path+os.sep,"")
# print("cwd path: "+cwd_path)
path = cwd_path
# print("current path: "+path)
# print("abs path: "+os.path.abspath(path))
return os.path.abspath(path)
def norm_path(self, file_location):
# print("original path: "+str(file_location))
if file_location:
file_location = os.path.normpath(file_location)
# abspath=os.path.abspath(file_location)
# print("abspath: "+str(os.path.abspath(os.path.expanduser(file_location))))
# print("self.path: "+self.path)
# print("cwd: "+str(os.getcwd()))
norm_path = os.path.abspath(os.path.expanduser(file_location)).replace(self.path, '')
# print("normalized path: "+norm_path)
# print("joined path: "+str(os.path.join(self.path,file_location)))
# if file_location == ".." and self.path.rstrip('/') in norm_path:
# return norm_path.replace(self.path.rstrip('/'), '')
if file_location is not "." and ".." not in file_location and os.path.exists(os.path.join(self.path,file_location)):
# print("returning original path: "+str(file_location))
return file_location.replace(self.path, '')
elif ".." in file_location and file_location != "..":
# print("returning norm path: "+norm_path)
return norm_path.replace(self.path,'')
if not os.path.exists(os.path.join(self.path,norm_path)) and os.path.exists(os.path.join(self.path,file_location)):
# print("Starting path at project directory: "+file_location.replace(self.path, ''))
return os.path.abspath(os.path.expanduser(file_location.replace(self.path, ''))).replace(self.path, '')
elif file_location == "..":
return os.path.abspath(os.path.expanduser(file_location.replace(self.path, ''))).replace(self.path, '')
return norm_path
else:
return None
def get_docs_in_path(self, path):
files = get_files(path)
db_files = self.doc_manager.get_file_names()
docs = []
if files:
for file in files:
file_name = self.norm_path(file)
if file_name in db_files:
docs.append(self.doc_manager.get_doc_by_prop('file_name',file_name))
return docs
def get_doc_filenames_in_path(self, path):
files = get_files(path)
db_files = self.doc_manager.get_file_names()
docs = []
if files:
for file in files:
file_name = self.norm_path(file)
if file_name in db_files:
docs.append(file_name)
return docs
def get_doc_locales(self, doc_id, doc_name):
locales = []
response = self.api.document_translation_status(doc_id)
if response.status_code != 200:
if check_response(response) and response.json()['messages'] and 'No translations exist' in response.json()['messages'][0]:
return locales
if doc_name:
raise_error(response.json(), 'Failed to check target locales for document '+doc_name, True, doc_id)
else:
raise_error(response.json(), 'Failed to check target locales for document '+doc_id, True, doc_id)
try:
if 'entities' in response.json():
for entry in response.json()['entities']:
locales.append(entry['properties']['locale_code'])
except KeyError as e:
print("Error listing translations")
return
# return detailed_status
return locales
def is_locale_folder_taken(self, new_locale, path):
# Python 2
for locale, folder in self.locale_folders.iteritems():
# End Python 2
# Python 3
# for locale, folder in self.locale_folders.items():
# End Python 3
if path == folder and not locale == new_locale:
return locale
return False
def update_document_action(self, file_name, title=None, **kwargs):
try:
relative_path = self.norm_path(file_name)
entry = self.doc_manager.get_doc_by_prop('file_name', relative_path)
try:
document_id = entry['id']
except TypeError as e:
log_error(self.error_file_name, e)
logger.error("Document name specified for update doesn't exist: {0}".format(title))
return
if title:
response = self.api.document_update(document_id, file_name, title=title, **kwargs)
else:
response = self.api.document_update(document_id, file_name)
if response.status_code != 202:
raise_error(response.json(), "Failed to update document {0}".format(file_name), True)
self._update_document(relative_path)
return True
except Exception as e:
log_error(self.error_file_name, e)
if 'string indices must be integers' in str(e) or 'Expecting value: line 1 column 1' in str(e):
logger.error("Error connecting to Lingotek's TMS")
else:
logger.error("Error on updating document"+str(file_name)+": "+str(e))
def _target_action_db(self, to_delete, locales, document_id):
if to_delete:
curr_locales = self.doc_manager.get_doc_by_prop('id', document_id)['locales']
updated_locales = set(curr_locales) - set(locales)
self.doc_manager.update_document('locales', updated_locales, document_id)
else:
self.doc_manager.update_document('locales', list(locales), document_id)
def update_doc_locales(self, document_id):
try:
locale_map = self.import_locale_info(document_id)
locale_info = list(iter(locale_map))
except exceptions.RequestFailedError as e:
log_error(self.error_file_name, e)
locale_info = []
self.doc_manager.update_document('locales', locale_info, document_id)
def added_folder_of_file(self, file_path):
folders = self.folder_manager.get_file_names()
if not folders:
#print("not folders")
return
for folder in folders:
folder = os.path.join(self.path, folder)
if folder in file_path:
return folder
def get_new_name(self, file_name, curr_path):
i = 1
file_path = os.path.join(curr_path, file_name)
name, extension = os.path.splitext(file_name)
while os.path.isfile(file_path):
new_name = '{name}({i}){ext}'.format(name=name, i=i, ext=extension)
file_path = os.path.join(curr_path, new_name)
i += 1
return file_path
def import_locale_info(self, document_id, poll=False):
locale_progress = {}
response = self.api.document_translation_status(document_id)
if response.status_code != 200:
if poll or response.status_code == 404:
return {}
else:
# raise_error(response.json(), 'Failed to get locale details of document', True)
raise exceptions.RequestFailedError('Failed to get locale details of document')
try:
for entry in response.json()['entities']:
curr_locale = entry['properties']['locale_code']
curr_progress = int(entry['properties']['percent_complete'])
curr_locale = curr_locale.replace('-', '_')
locale_progress[curr_locale] = curr_progress
except KeyError:
pass
return locale_progress
def delete_local(self, title, document_id, message=None):
# print('local delete:', title, document_id)
if not title:
title = document_id
message = '{0} has been deleted locally'.format(title) if not message else message
try:
file_name = self.doc_manager.get_doc_by_prop('id', document_id)['file_name']
except TypeError:
logger.info('Document to remove not found in the local database')
return
try:
os.remove(os.path.join(self.path, file_name))
logger.info(message)
except OSError:
logger.info('Something went wrong trying to delete the local file')
def delete_local_translation(self, file_name):
try:
if not file_name:
logger.info('Please provide a valid file name')
logger.info('{0} (local translation) has been deleted'.format(self.get_relative_path(file_name)))
os.remove(os.path.join(self.path, file_name))
except OSError:
logger.info('Something went wrong trying to download the local translation')
def delete_local_path(self, path, message=None):
path = self.norm_path(path)
message = '{0} has been deleted locally.'.format(path) if not message else message
try:
os.remove(path)
logger.info(message)
except OSError:
logger.info('Something went wrong trying to delete the local file')
def raise_error(json, error_message, is_warning=False, doc_id=None, file_name=None):
try:
if json:
error = json['messages'][0]
file_name = file_name.replace("Status of ", "")
if file_name is not None and doc_id is not None:
error = error.replace(doc_id, file_name+" ("+doc_id+")")
# Sometimes api returns vague errors like 'Unknown error'
if error == 'Unknown error':
error = error_message
if not is_warning:
raise exceptions.RequestFailedError(error)
# warnings.warn(error)
logger.error(error)
except (AttributeError, IndexError):
if not is_warning:
raise exceptions.RequestFailedError(error_message)
# warnings.warn(error_message)
logger.error(error_message)
def is_initialized(project_path):
ltk_path = os.path.join(project_path, CONF_DIR)
if os.path.isdir(ltk_path) and os.path.isfile(os.path.join(ltk_path, CONF_FN)) and \
os.stat(os.path.join(ltk_path, CONF_FN)).st_size:
return True
return False
def choice_mapper(info):
mapper = {}
import operator
#sorted_info = sorted(info.iteritems(), key=operator.itemgetter(1))
sorted_info = sorted(info.items(), key = operator.itemgetter(1))
index = 0
for entry in sorted_info:
if entry[0] and entry[1]:
mapper[index] = {entry[0]: entry[1]}
index += 1
table = []
for k,v in mapper.items():
try:
for values in v:
table.append({
"ID": k,
"Name": v[values],
"UUID": values
})
except UnicodeEncodeError:
continue
print(tabulate(table, headers="keys"), "\n")
return mapper
def find_conf(curr_path):
"""
check if the conf folder exists in current directory's parent directories
"""
if os.path.isdir(os.path.join(curr_path, CONF_DIR)):
return curr_path
elif curr_path == os.path.abspath(os.sep):
return None
else:
return find_conf(os.path.abspath(os.path.join(curr_path, os.pardir)))
def printResponseMessages(response):
for message in response.json()['messages']:
logger.info(message)
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
allPatterns = []
if isinstance(patterns,list) or isinstance(patterns,tuple):
for pattern in patterns:
basename = os.path.basename(pattern)
if basename and basename != "":
allPatterns.extend(getRegexFiles(pattern,cwd))
else:
allPatterns.append(pattern)
else:
basename = os.path.basename(patterns)
if basename and basename != "":
allPatterns.extend(getRegexFiles(patterns,cwd))
else:
allPatterns.append(patterns)
matched_files = []
# print("all patterns: "+str(allPatterns))
for pattern in allPatterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
# split_path = root.split(os.sep)
# print("split_path: {0}".format(split_path))
for file in files:
if not (("desktop.ini" in file) or ('Thumbs.db' in file) or ('ehthumbs.db' in file)): # don't add desktop.ini, Thumbs.db, or ehthumbs.db files
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
# else:
# logger.info("File not found: "+pattern)
# subdir_pat, fn_pat = os.path.split(pattern)
# if not subdir_pat:
# for path, subdirs, files in os.walk(root):
# for fn in fnmatch.filter(files, pattern):
# matched_files.append(os.path.join(path, fn))
# else:
# for path, subdirs, files in os.walk(root):
# # print os.path.split(path)
# # subdir = os.path.split(path)[1] # get current subdir
# search_root = os.path.join(root, '')
# subdir = path.replace(search_root, '')
# # print subdir, subdir_pat
# if fnmatch.fnmatch(subdir, subdir_pat):
# for fn in fnmatch.filter(files, fn_pat):
# matched_files.append(os.path.join(path, fn))
if len(matched_files) == 0:
return None
return matched_files
def getRegexFiles(pattern,path):
dir_name = os.path.dirname(pattern)
if dir_name:
path = os.path.join(path,dir_name)
pattern_name = os.path.basename(pattern)
# print("path: "+path)
# print("pattern: "+str(pattern))
matched_files = []
if pattern_name and not "*" in pattern:
return [pattern]
for path, subdirs, files in os.walk(path):
for fn in fnmatch.filter(files, pattern):
matched_files.append(os.path.join(path, fn))
# print("matched files: "+str(matched_files))
return matched_files
def log_id_names(json):
"""
logs the id and titles from a json object
"""
ids = []
titles = []
for entity in json['entities']:
ids.append(entity['properties']['id'])
titles.append(entity['properties']['title'])
return ids, titles
| Lingotek/translation-utility | python2/ltk/actions/action.py | Python | mit | 24,789 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import taggit.managers
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', default=False, verbose_name='superuser status')),
('first_name', models.CharField(max_length=30, blank=True, verbose_name='first name')),
('last_name', models.CharField(max_length=30, blank=True, verbose_name='last name')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', default=False, verbose_name='staff status')),
('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', default=True, verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('title', models.CharField(max_length=255, blank=True)),
('department', models.CharField(max_length=255, blank=True)),
('position', models.TextField(blank=True)),
('bio', models.TextField(blank=True)),
('research_interests', models.CharField(max_length=255, blank=True)),
('website', models.URLField(blank=True)),
('orcid', models.CharField(max_length=100, blank=True)),
('photo', models.ImageField(blank=True, upload_to='profile_photos')),
('groups', models.ManyToManyField(blank=True, related_query_name='user', to='auth.Group', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', verbose_name='groups')),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name_plural': 'users',
'verbose_name': 'user',
},
),
migrations.CreateModel(
name='Institution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=255)),
('website', models.URLField()),
],
),
migrations.CreateModel(
name='UserLogin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='user',
name='institution',
field=models.ForeignKey(blank=True, null=True, to='accounts.Institution'),
),
migrations.AddField(
model_name='user',
name='tags_following',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, related_query_name='user', to='auth.Permission', help_text='Specific permissions for this user.', related_name='user_set', verbose_name='user permissions'),
),
]
| HMSBeagle1831/rapidscience | rlp/accounts/migrations/0001_initial.py | Python | mit | 4,298 |
# Copyright 2011-2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from datetime import datetime, timedelta, tzinfo
import re
try:
import pytz
except ImportError:
pytz = None
try:
from dateutil import parser
except ImportError:
parser = None
from touchdown.core import errors
REGEX_DELTA = re.compile(
r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?'
)
UNITS = {
'm': 60,
'h': 60 * 60,
'd': 60 * 60 * 24,
'w': 60 * 60 * 24 * 7,
}
if not pytz:
class UTC(tzinfo):
def __repr__(self):
return "<UTC>"
def utcoffset(self, value):
return timedelta(0)
def tzname(self, value):
return "UTC"
def dst(self, value):
return timedelta(0)
def localize(self, value):
value.replace(tzinfo=self)
utc = UTC()
else:
utc = pytz.utc
def now():
return datetime.utcnow().replace(tzinfo=utc)
def parse_datetime(value):
match = REGEX_DELTA.match(value)
if match:
amount, unit = match.groups()
return now() - timedelta(
seconds=int(amount) * UNITS[unit[0]],
)
if parser:
try:
return parser.parse(value)
except Exception:
raise errors.Error(
"Unable to parse {} as a date or time".format(value)
)
raise errors.Error(
"Unable to parse {} as a date or time".format(value)
)
def as_seconds(value):
return int(value.strftime("%s")) * 1000
| mitchellrj/touchdown | touchdown/core/datetime.py | Python | apache-2.0 | 2,101 |
from __future__ import print_function
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import io
import os
import sys
import contestcolld
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.txt')
setup(
name='sandman',
version=sandman.__version__,
url='https://github.com/hgn/contestcolld',
license='Public Domain',
author='Hagen Paul Pfeifer',
install_requires=['Flask>=0.10.1'],
author_email='[email protected]',
description='Continuous Test Collector Daemon',
long_description=long_description,
packages=['contestcolld'],
include_package_data=True,
platforms='any',
classifiers = [
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| hgn/hippod | setup.py | Python | mit | 1,469 |
"""
For Django < 3.1, rely on django-jsonfield-backport for JSONField
functionality
https://github.com/laymonage/django-jsonfield-backport#installation
https://github.com/laymonage/django-jsonfield-backport#why-create-another-one
"""
try:
from django.db.models import JSONField # noqa
except ImportError:
from django_jsonfield_backport.models import JSONField # noqa
| pinax/pinax-eventlog | pinax/eventlog/compat.py | Python | mit | 379 |
print('local settings enabled')
DEBUG = True
UID_SALT = ''
CACHES = {}
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
INLINE_CSS = False
SESSION_COOKIE_SECURE = False
| decadecity/ct | caffeine_tracker/settings/local_example.py | Python | unlicense | 212 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
import types
import unittest
from collections import Mapping
import mo_dots
from mo_collections.unique_index import UniqueIndex
from mo_dots import coalesce, literal_field, unwrap, wrap
from mo_future import text_type
from mo_future import zip_longest
from mo_logs import Log, Except, suppress_exception
from mo_logs.strings import expand_template
from mo_math import Math
class FuzzyTestCase(unittest.TestCase):
"""
COMPARE STRUCTURE AND NUMBERS!
ONLY THE ATTRIBUTES IN THE expected STRUCTURE ARE TESTED TO EXIST
EXTRA ATTRIBUTES ARE IGNORED.
NUMBERS ARE MATCHED BY ...
* places (UP TO GIVEN SIGNIFICANT DIGITS)
* digits (UP TO GIVEN DECIMAL PLACES, WITH NEGATIVE MEANING LEFT-OF-UNITS)
* delta (MAXIMUM ABSOLUTE DIFFERENCE FROM expected)
"""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.default_places=15
def set_default_places(self, places):
"""
WHEN COMPARING float, HOW MANY DIGITS ARE SIGNIFICANT BY DEFAULT
"""
self.default_places=places
def assertAlmostEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None):
if delta or digits:
assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta)
else:
assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=coalesce(places, self.default_places), delta=delta)
def assertEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None):
self.assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta)
def assertRaises(self, problem, function, *args, **kwargs):
try:
function(*args, **kwargs)
except Exception as e:
f = Except.wrap(e)
if isinstance(problem, text_type):
if problem in f:
return
Log.error(
"expecting an exception returning {{problem|quote}} got something else instead",
problem=problem,
cause=f
)
elif not isinstance(f, problem) and not isinstance(e, problem):
Log.error("expecting an exception of type {{type}} to be raised", type=problem)
else:
return
Log.error("Expecting an exception to be raised")
def assertAlmostEqual(test, expected, digits=None, places=None, msg=None, delta=None):
show_detail = True
test = unwrap(test)
expected = unwrap(expected)
try:
if test is None and expected is None:
return
elif test is expected:
return
elif isinstance(expected, text_type):
assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta)
elif isinstance(test, UniqueIndex):
if test ^ expected:
Log.error("Sets do not match")
elif isinstance(expected, Mapping) and isinstance(test, Mapping):
for k, v2 in unwrap(expected).items():
v1 = test.get(k)
assertAlmostEqual(v1, v2, msg=msg, digits=digits, places=places, delta=delta)
elif isinstance(expected, Mapping):
for k, v2 in expected.items():
if isinstance(k, text_type):
v1 = mo_dots.get_attr(test, literal_field(k))
else:
v1 = test[k]
assertAlmostEqual(v1, v2, msg=msg, digits=digits, places=places, delta=delta)
elif isinstance(test, (set, list)) and isinstance(expected, set):
test = set(wrap(t) for t in test)
if len(test) != len(expected):
Log.error(
"Sets do not match, element count different:\n{{test|json|indent}}\nexpecting{{expectedtest|json|indent}}",
test=test,
expected=expected
)
for e in expected:
for t in test:
try:
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
break
except Exception as _:
pass
else:
Log.error("Sets do not match. {{value|json}} not found in {{test|json}}", value=e, test=test)
elif isinstance(expected, types.FunctionType):
return expected(test)
elif hasattr(test, "__iter__") and hasattr(expected, "__iter__"):
if test == None and not expected:
return
if expected == None:
expected = [] # REPRESENT NOTHING
for a, b in zip_longest(test, expected):
assertAlmostEqual(a, b, msg=msg, digits=digits, places=places, delta=delta)
else:
assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta)
except Exception as e:
Log.error(
"{{test|json|limit(10000)}} does not match expected {{expected|json|limit(10000)}}",
test=test if show_detail else "[can not show]",
expected=expected if show_detail else "[can not show]",
cause=e
)
def assertAlmostEqualValue(test, expected, digits=None, places=None, msg=None, delta=None):
"""
Snagged from unittest/case.py, then modified (Aug2014)
"""
if expected.__class__.__name__ == "NullOp":
if test == None:
return
else:
raise AssertionError(expand_template("{{test}} != {{expected}}", locals()))
if expected == None: # None has no expectations
return
if test == expected:
# shortcut
return
if not Math.is_number(expected):
# SOME SPECIAL CASES, EXPECTING EMPTY CONTAINERS IS THE SAME AS EXPECTING NULL
if isinstance(expected, list) and len(expected) == 0 and test == None:
return
if isinstance(expected, Mapping) and not expected.keys() and test == None:
return
if test != expected:
raise AssertionError(expand_template("{{test}} != {{expected}}", locals()))
return
num_param = 0
if digits != None:
num_param += 1
if places != None:
num_param += 1
if delta != None:
num_param += 1
if num_param>1:
raise TypeError("specify only one of digits, places or delta")
if digits is not None:
with suppress_exception:
diff = Math.log10(abs(test-expected))
if diff < digits:
return
standardMsg = expand_template("{{test}} != {{expected}} within {{digits}} decimal places", locals())
elif delta is not None:
if abs(test - expected) <= delta:
return
standardMsg = expand_template("{{test}} != {{expected}} within {{delta}} delta", locals())
else:
if places is None:
places = 15
with suppress_exception:
diff = Math.log10(abs(test-expected))
if diff < Math.ceiling(Math.log10(abs(test)))-places:
return
standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{places}} places", locals())
raise AssertionError(coalesce(msg, "") + ": (" + standardMsg + ")")
| klahnakoski/JsonSchemaToMarkdown | vendor/mo_testing/fuzzytestcase.py | Python | mpl-2.0 | 7,712 |
# Generated by Django 2.2.24 on 2021-09-28 11:52
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('astrobin', '0122_gear_migration_flag_reviewer_decision'),
]
operations = [
migrations.AlterField(
model_name='gear',
name='migration_content_type',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to='contenttypes.ContentType'
),
),
migrations.AlterField(
model_name='gear',
name='migration_flag_reviewer_decision',
field=models.CharField(
blank=True,
choices=[
('APPROVED', 'Approved'),
('REJECTED_INCORRECT_STRATEGY', 'Rejected: incorrect migration strategy'),
('REJECTED_WRONG_MIGRATION_TARGET', 'Rejected: wrong migration target'),
('REJECTED_BAD_MIGRATION_TARGET', 'Rejected: bad migration target'),
('REJECTED_OTHER', 'Rejected: other')
],
max_length=32,
null=True
),
),
]
| astrobin/astrobin | astrobin/migrations/0123_rename_accepted_to_approved_for_consistency.py | Python | agpl-3.0 | 1,305 |
#! /usr/bin/env python
#
# test_errors.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for error handling
"""
import unittest
import nest
import sys
class ErrorTestCase(unittest.TestCase):
"""Tests if errors are handled correctly"""
def test_Raise(self):
"""Error raising"""
nest.ResetKernel()
try:
raise nest.NESTError('test')
self.fail('an error should have risen!') # should not be reached
except nest.NESTError:
info = sys.exc_info()[1]
if not "test" in info.__str__():
self.fail('could not pass error message to NEST!')
# another error has been thrown, this is wrong
except:
self.fail('wrong error has been thrown')
def test_StackUnderFlow(self):
"""Stack underflow"""
nest.ResetKernel()
try:
nest.sr('clear ;')
self.fail('an error should have risen!') # should not be reached
except nest.NESTError:
info = sys.exc_info()[1]
if not "StackUnderflow" in info.__str__():
self.fail('wrong error message')
# another error has been thrown, this is wrong
except:
self.fail('wrong error has been thrown')
def test_DivisionByZero(self):
"""Division by zero"""
nest.ResetKernel()
try:
nest.sr('1 0 div')
self.fail('an error should have risen!') # should not be reached
except nest.NESTError:
info = sys.exc_info()[1]
if not "DivisionByZero" in info.__str__():
self.fail('wrong error message')
# another error has been thrown, this is wrong
except:
self.fail('wrong error has been thrown')
def test_UnknownNode(self):
"""Unknown node"""
nest.ResetKernel()
try:
nest.Connect([99],[99])
self.fail('an error should have risen!') # should not be reached
except nest.NESTError:
info = sys.exc_info()[1]
if not "UnknownNode" in info.__str__():
self.fail('wrong error message')
# another error has been thrown, this is wrong
except:
self.fail('wrong error has been thrown')
def test_UnknownModel(self):
"""Unknown model name"""
nest.ResetKernel()
try:
nest.Create(-1)
self.fail('an error should have risen!') # should not be reached
except nest.NESTError:
info = sys.exc_info()[1]
if not "UnknownModelName" in info.__str__():
self.fail('wrong error message')
# another error has been thrown, this is wrong
except:
self.fail('wrong error has been thrown')
def suite():
suite = unittest.makeSuite(ErrorTestCase,'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| gewaltig/cython-neuron | pynest/nest/tests/test_errors.py | Python | gpl-2.0 | 3,727 |
from django.conf.urls import patterns, include, url
base_urlpatterns = patterns(
'',
url(r'^$', 'status.views.home', name='home'),
url(r'^ops/$', 'ops.views.ops', name='ops'),
url(r'^osd/(\d+)/$', 'status.views.osd_details', name='osd_details'),
url(r'^activity/$', 'status.views.activity', name='activity'),
url(r'^user/(.+?)(/.+?)?(/.+?)?/$', 'ops.views.user_custom',
name='user_custom'),
url(r'^api/$', "status.views.api", name="api"),
)
urlpatterns = patterns('',
url(r'krakendash/', include(base_urlpatterns)),
)
| krakendash/krakendash | kraken/urls.py | Python | bsd-3-clause | 560 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from container.utils.visibility import getLogger
logger = getLogger(__name__)
from container import host_only, conductor_only
CAPABILITIES = dict(
BUILD='building container images',
BUILD_CONDUCTOR='building the Conductor image',
DEPLOY='pushing and orchestrating containers remotely',
IMPORT='importing as Ansible Container project',
LOGIN='authenticate with registry',
PUSH='push images to registry',
RUN='orchestrating containers locally',
)
class BaseEngine(object):
"""
Interface class for implementations of various container engine integrations
into Ansible Container.
"""
# Capabilities of engine implementations
CAP_BUILD_CONDUCTOR = False
CAP_BUILD = False
CAP_DEPLOY = False
CAP_IMPORT = False
CAP_INSTALL = False
CAP_LOGIN = False
CAP_PUSH = False
CAP_RUN = False
CAP_VERSION = False
def __init__(self, project_name, services, debug=False, selinux=True, devel=False, **kwargs):
self.project_name = project_name
self.services = services
self.debug = debug
self.devel = devel
self.selinux = selinux
self.volumes = kwargs.pop('volume_data', None)
@property
def display_name(self):
return __name__.split('.')[-2].capitalize()
@property
def ansible_build_args(self):
"""Additional commandline arguments necessary for ansible-playbook runs during build"""
raise NotImplementedError()
@property
def ansible_orchestrate_args(self):
"""Additional commandline arguments necessary for ansible-playbook runs during orchestrate"""
raise NotImplementedError()
@property
def ansible_exec_path(self):
return u'ansible-playbook'
@property
def python_interpreter_path(self):
return u'/_usr/bin/python'
@property
def default_registry_url(self):
"""Default registry for pushing images"""
raise NotImplementedError()
@property
def default_registry_name(self):
"""Default registry for pushing images"""
raise NotImplementedError()
@property
def registry_name(self):
"""Name of the default registry for pushing images"""
raise NotImplementedError()
@property
def auth_config_path(self):
"""Path to config file where the engine stores registry authentication"""
raise NotImplementedError()
@host_only
def print_version_info(self):
raise NotImplementedError()
@conductor_only
def run_container(self,
image_id,
service_name,
**kwargs):
"""Run a particular container. The kwargs argument contains individual
parameter overrides from the service definition."""
raise NotImplementedError()
@host_only
def run_conductor(self, command, config, base_path, params, engine_name=None, volumes=None):
raise NotImplementedError()
def await_conductor_command(self, command, config, base_path, params, save_container=False):
raise NotImplementedError()
def service_is_running(self, service):
raise NotImplementedError()
def service_exit_code(self, service):
raise NotImplementedError()
def stop_container(self, container_id, forcefully=False):
raise NotImplementedError()
def restart_all_containers(self):
raise NotImplementedError()
def inspect_container(self, container_id):
raise NotImplementedError()
def delete_container(self, container_id, remove_volumes=False):
raise NotImplementedError()
def get_container_name_for_service(self, service_name):
raise NotImplementedError()
def get_container_id_for_service(self, service_name):
raise NotImplementedError()
def get_image_id_by_fingerprint(self, fingerprint):
raise NotImplementedError()
def get_image_id_by_tag(self, tag):
raise NotImplementedError()
def get_latest_image_id_for_service(self, service_name):
raise NotImplementedError()
def get_image_name_for_service(self, service_name):
raise NotImplementedError()
@conductor_only
def commit_role_as_layer(self,
container_id,
service_name,
fingerprint,
metadata,
with_name=False):
raise NotImplementedError()
def tag_image_as_latest(self, service_name, image_id):
raise NotImplementedError()
@conductor_only
def generate_orchestration_playbook(self, url=None, namespace=None, local_images=True):
"""
Generate an Ansible playbook to orchestrate services.
:param url: registry URL where images will be pulled from
:param namespace: registry namespace
:param local_images: bypass pulling images, and use local copies
:return: playbook dict
"""
raise NotImplementedError()
@conductor_only
def push(self, image_id, service_name, **kwargs):
"""
Push an image to a registry.
"""
raise NotImplementedError()
@host_only
def build_conductor_image(self, base_path, base_image, cache=True):
raise NotImplementedError()
def get_runtime_volume_id(self, mount_point):
"""Get the volume ID for the portable python runtime."""
raise NotImplementedError()
@host_only
def import_project(self, base_path, import_from, bundle_files=False, **kwargs):
raise NotImplementedError()
@conductor_only
def login(self, username, password, email, url, config_path):
"""
Authenticate with a registry, and update the engine's config file. Otherwise,
verify there is an existing authentication record within the config file for
the given url. Returns a username.
"""
raise NotImplementedError()
@staticmethod
@conductor_only
def get_registry_username(registry_url, config_path):
"""
Read authentication data stored at config_path for the regisrtry url, and
return the username
"""
raise NotImplementedError()
@conductor_only
def pre_deployment_setup(self, **kwargs):
"""
Perform any setup tasks required prior to writing the Ansible playbook.
return None
"""
raise NotImplementedError() | mhumeSF/ansible-container | container/engine.py | Python | lgpl-3.0 | 6,544 |
"""
Provides small event framework
"""
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
log = logging.getLogger('event')
_events = {}
class Event(object):
"""Represents one registered event."""
def __init__(self, name, func, priority=128):
self.name = name
self.func = func
self.priority = priority
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority
def __gt__(self, other):
return self.priority > other.priority
def __str__(self):
return '<Event(name=%s,func=%s,priority=%s)>' % (self.name, self.func.__name__, self.priority)
__repr__ = __str__
def __hash__(self):
return hash((self.name, self.func, self.priority))
def event(name, priority=128):
"""Register event to function with a decorator"""
def decorator(func):
add_event_handler(name, func, priority)
return func
return decorator
def get_events(name):
"""
:param String name: event name
:return: List of :class:`Event` for *name* ordered by priority
"""
if name not in _events:
raise KeyError('No such event %s' % name)
_events[name].sort(reverse=True)
return _events[name]
def add_event_handler(name, func, priority=128):
"""
:param string name: Event name
:param function func: Function that acts as event handler
:param priority: Priority for this hook
:return: Event created
:rtype: Event
:raises Exception: If *func* is already registered in an event
"""
events = _events.setdefault(name, [])
for event in events:
if event.func == func:
raise ValueError('%s has already been registered as event listener under name %s' % (func.__name__, name))
log.trace('registered function %s to event %s' % (func.__name__, name))
event = Event(name, func, priority)
events.append(event)
return event
def remove_event_handlers(name):
"""Removes all handlers for given event `name`."""
_events.pop(name, None)
def remove_event_handler(name, func):
"""Remove `func` from the handlers for event `name`."""
for e in list(_events.get(name, [])):
if e.func is func:
_events[name].remove(e)
def fire_event(name, *args, **kwargs):
"""
Trigger an event with *name*. If event is not hooked by anything nothing happens. If a function that hooks an event
returns a value, it will replace the first argument when calling next function.
:param name: Name of event to be called
:param args: List of arguments passed to handler function
:param kwargs: Key Value arguments passed to handler function
"""
if name in _events:
for event in get_events(name):
result = event(*args, **kwargs)
if result is not None:
args = (result,) + args[1:]
return args and args[0]
| jawilson/Flexget | flexget/event.py | Python | mit | 3,149 |
#!/usr/bin/env python
"""
Set to Complete all Import project forms that are referenced from Data Entry.
Technically, only Data Entry forms that have the completion status of
Unverified or Complete are considered. Typically, though, the import process
would set the Entry form to Unverified, so it would take explicit human action
to set the Entry form to Incomplete.
"""
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
from __future__ import print_function
from builtins import range
import os
import re
import sys
import datetime
import argparse
import pandas
import redcap
import requests
import hashlib
import sibispy
from sibispy import sibislogger as slog
from sibispy import cli
#
# Variables
#
date_format_ymd = '%Y-%m-%d'
# List of forms imported from the laptops
all_forms = {
# Forms for Arm 1: Standard Protocol
'dd100': 'delayed_discounting_100',
'dd1000': 'delayed_discounting_1000',
'pasat': 'paced_auditory_serial_addition_test_pasat',
'stroop': 'stroop',
'ssaga_youth': 'ssaga_youth',
'ssaga_parent': 'ssaga_parent',
'youthreport1': 'youth_report_1',
'youthreport1b': 'youth_report_1b',
'youthreport2': 'youth_report_2',
'parentreport': 'parent_report',
'mrireport': 'mri_report',
'lssaga1_youth': 'limesurvey_ssaga_part_1_youth',
'lssaga2_youth': 'limesurvey_ssaga_part_2_youth',
'lssaga3_youth': 'limesurvey_ssaga_part_3_youth',
'lssaga4_youth': 'limesurvey_ssaga_part_4_youth',
'lssaga1_parent': 'limesurvey_ssaga_part_1_parent',
'lssaga2_parent': 'limesurvey_ssaga_part_2_parent',
'lssaga3_parent': 'limesurvey_ssaga_part_3_parent',
'lssaga4_parent': 'limesurvey_ssaga_part_4_parent',
'plus': 'participant_last_use_summary'}
# Preferred format for cli.add_form_param:
all_forms_tuple = [(k, v) for k, v in all_forms.items()]
# Because of all of the issues currently not included:
# 'plus': 'participant_last_use_summary'}
def batch(iterable, n=1):
"""
For batch processing of records
:param iterable:
:param n: batch size
:return: generator
"""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
#
# MAIN
#
# Setup command line parser
parser = argparse.ArgumentParser(description="Set status of a specific form to complete in entry project and if their is a reference to a form in import project than sets it to complete too",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--verbose",
help="Verbose operation",
action="store_true")
form_subgroup = parser.add_mutually_exclusive_group(required=True)
form_subgroup.add_argument("-a", "--all-forms",
help="Update all forms of arm 1",
action="store_true")
cli.add_form_param(form_subgroup, eligible_forms=all_forms_tuple)
cli.add_subject_param(parser, dest="study_id")
cli.add_event_param(parser, accepted_regex=r'^(baseline|\dy)$', template='{}')
args = parser.parse_args()
if len(args.event) > 0:
args.event = args.event[0]
if len(args.event) > 1:
print("Currently only handling a single event; picking {}"
.format(args.event))
slog.init_log(args.verbose, None,'change_status_of_complete_field', 'change_status', None)
session = sibispy.Session()
if not session.configure():
if args.verbose:
print("Error: session configure file was not found")
sys.exit()
forms = None
if args.forms:
forms = dict()
for f in args.forms:
if f in list(all_forms.keys()):
forms[f] = all_forms[f]
elif f in list(all_forms.values()):
lookup = [k for (k, v) in all_forms.items() if v == f]
forms[lookup[0]] = f
else:
print("WARNING: no form with name or prefix '%s' defined.\n" % f)
elif args.all_forms:
forms = all_forms
if forms == None :
print("Please define forms to run this script for")
sys.exit(1)
if args.verbose:
print("Processing the following forms:\n\t", '\n\t'.join( sorted(forms.values())))
form_prefixes = list(forms.keys())
form_names = list(forms.values())
# Open connection with REDCap server - Import Project
import_project = session.connect_server('import_laptops', True)
if not import_project :
if args.verbose:
print("Error: Could not connect to Redcap for Import Project")
sys.exit()
# Open connection with REDCap server - Data Entry
redcap_project = session.connect_server('data_entry', True)
if not redcap_project :
if args.verbose:
print("Error: Could not connect to Redcap for Data Entry")
sys.exit()
form_event_mapping = redcap_project.export_fem(format='df')
fem_form_key = session.get_redcap_form_key()
#
# MAIN LOOP
#
for form_prefix, form_name in forms.items():
print("Processing form",form_prefix,"/",form_name)
complete_label = '%s_complete' % form_name
record_label = '%s_record_id' % form_prefix
# Select the events that actually have this form (first, to handle summary forms,
# figure out what actual form the "FORM_complete" field is in)
try:
summary_form_name = [ field['form_name'] for field in redcap_project.metadata if field['field_name'] == complete_label ][0]
except:
# If the above failed (due to empty list, presumably), then this is not a
# hierarchical form and we should just use the given form name
summary_form_name = form_name
event_mapping_tmp = form_event_mapping[form_event_mapping[fem_form_key] == summary_form_name ]['unique_event_name']
event_mapping = event_mapping_tmp[event_mapping_tmp.str.startswith(args.event, na=False)].tolist()
if len(event_mapping) == 0 :
print("ERROR: Event name starting with '"+ args.event + "' for '" + form_name + "' could not be found !")
continue
fields_list = [complete_label,record_label,'visit_ignore']
entry_records = session.redcap_export_records_from_api(time_label= None, api_type = 'data_entry', fields = fields_list, format='df')
if args.study_id :
entry_records = entry_records[entry_records.index.map( lambda key: key[0] in args.study_id) ]
entry_records = entry_records[entry_records.index.map( lambda key: key[1] in event_mapping) ]
if entry_records.empty :
print("No records could be found for form {}; onto next form"
.format(form_name))
continue
# print entry_records.columns
entry_records = entry_records[entry_records['visit_ignore___yes'] != 1 ]
# drop all those where status of complete label is not defined
entry_records = entry_records.dropna(axis=0,subset=[complete_label])
# currently only those that are in unverified status but not complete
# print entry_records[entry_records[complete_label] == 0 ]
# sys.exit()
if record_label in entry_records.columns :
# check all links of unverivied or complete records out
entry_records_unv_or_comp = entry_records[entry_records[complete_label] > 0 ]
# drop any that do not have import record defined
import_records = entry_records_unv_or_comp.dropna(axis=0,subset=[record_label])
if not import_records.empty :
import_complete_records = session.redcap_export_records_from_api(time_label= None, api_type = 'import_laptops', fields = [complete_label], format='df', records=import_records[record_label].tolist())
# for all records that the complete label is not 2 turn it into 2
upload_id_list = import_complete_records[import_complete_records[complete_label] < 2].index
upload_id_len = len(upload_id_list)
if upload_id_len :
print("Number of Records (Import project)", upload_id_len)
# Upload in batches as if one problem is in one record all of them are not uploaded in the batch
for upload_id_batch in batch(upload_id_list,50):
upload_records=list()
for import_id in upload_id_batch :
upload_records.append({'record_id': import_id, '%s_complete' % form_name: '2'})
# import_response = session.redcap_import_record_to_api([{'record_id': import_id, '%s_complete' % form_name: '2'}], 'import_laptops', import_id)
if len(upload_records) :
import_response = session.redcap_import_record_to_api(upload_records, 'import_laptops', '')
# import_response = "TEST"
print("Upload Records (Import project):", upload_id_batch)
print("REDCAP response:", import_response)
else :
print("Warning: '" + record_label + "' does not exist in form '" + form_name + "'")
# Now set entry record to complete
entry_records_not_complete = entry_records[entry_records[complete_label] ==1 ]
# check if their is an import record associated with it
if entry_records_not_complete.empty :
print("Entry project: No entries for form '" + form_name + "' found that were unverified")
continue
print("Number of Records", len(entry_records_not_complete))
print("Upload Records (Entry project):", entry_records_not_complete.index)
upload_records=list()
for key in entry_records_not_complete.index :
upload_records.append({'study_id': key[0], 'redcap_event_name': key[1], '%s_complete' % form_name: '2'})
#to_redcap(session,form_name, '','','', {'study_id': key[0], 'redcap_event_name': key[1], '%s_complete' % form_name: '2'})
import_response = session.redcap_import_record_to_api(upload_records, 'data_entry', '')
print("REDCAP response:", import_response)
| sibis-platform/sibispy | cmds/change_complete_field_in_entry_and_import.py | Python | bsd-3-clause | 10,126 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
import fcntl
import select
import subprocess
import sys
import os
class Dummy(object):
def __getattr__(self, name):
def f(*al, **kw):
print ('\033[32m%s\033[0m' % name)
f.__name__ = name
return f
def a(self):
pass
def b(self):
pass
def unblock(fd):
# make stdin a non-blocking file
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
class Interactor(object):
def spawn(self):
cmd = "%s graphwalker/cli.py" % sys.executable
cmd += " --planner=Interactive"
cmd += " --reporter=Print"
cmd += " --stopcond=Never"
cmd += " graphwalker/test/examples/ab.graphml"
cmd += " graphwalker.test.interactor.Dummy"
self.log('cmd: %r' % cmd)
self.sub = subprocess.Popen(
cmd.split(),
executable=sys.executable,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=os.path.join(os.path.dirname(__file__), '../..'))
unblock(self.sub.stdout.fileno())
unblock(self.sub.stderr.fileno())
def log(self, what):
r = self.context.get('reporter')
if r:
r.log('test', what)
def setup(self, context):
self.context = context
self.timeout = context.get('timeout', 0.01)
self.patience = int(context.get('wait', 2.0) / self.timeout)
self.last_out = ''
self.last_err = ''
self.spawn()
def push(self, data):
self.look()
self.last_out, self.last_err = '', ''
self.sub.stdin.write(data)
def look(self):
r, w, l = select.select(
[self.sub.stdout, self.sub.stderr], [], [],
self.timeout)
if self.sub.stdout in r:
self.last_out += self.sub.stdout.read()
if self.sub.stderr in r:
self.last_err += self.sub.stderr.read()
return self.last_out, self.last_err
def expect(self, expectation):
def show():
if out:
print ('out' + ' -' * 30)
print ('\n ' + out.strip().replace('\n', '\n '))
if err:
print ('err' + ' -' * 30)
print ('\n ' + err.strip().replace('\n', '\n '))
if out or err:
print ('- -' + ' -' * 30)
if type(expectation) is str:
x = expectation
expectation = lambda out, err: x in out or x in err
for i in range(self.patience):
out, err = self.look()
if expectation(out, err):
show()
return True
else:
show()
raise AssertionError("Did not find expected output")
def v_startup(self):
self.expect(
lambda out, err: (
out.startswith('starting ab-') and
'[stdout]\x1b[32msetup\x1b[0m\n' in out and
err.startswith('== Currently at: Start')))
def v_debugger(self):
self.expect(lambda out, err: err.endswith('(Pdb) '))
def v_vertex_a(self):
self.expect(lambda out, err: '== Currently at: a [' in err)
def v_vertex_b(self):
self.expect(lambda out, err: '== Currently at: b [' in err)
def v_break_set_a(self):
self.push('b\n')
self.expect(
lambda out, err: (
'breakpoint' in err and
'yes' in err and
'graphwalker/test/interactor.py:17' in err))
def v_break_set_b(self):
self.push('b\n')
self.expect(
lambda out, err: (
'breakpoint' in err and
'yes' in err and
'graphwalker/test/interactor.py:20' in err))
def v_actor_debugger(self):
self.push('self\n')
self.expect(
lambda out, err: (
'<graphwalker.test.interactor.Dummy object at 0x' in err and
err.endswith('(Pdb) ')))
def e_enter(self):
self.push('\n')
self.expect('huh?')
def e_follow_0(self):
self.push('0\n')
self.expect(
lambda out, err: (
'Begin step' in out and
'[stdout]\x1b[32mstep_begin\x1b[0m\n' in out and
'\nPassed step' in out))
def e_follow_9(self):
self.push('9\n')
self.expect('huh?')
def e_debug(self):
self.push('d\n')
def e_continue(self):
self.push('c\n')
def e_bug(self):
self.push('hex(3735928559)\n')
self.expect('deadbeef')
def e_bug_set_a(self):
self.push('self.vert=[v for v in self.g.V.values() if v[1]=="a"][0]\n')
def e_bug_set_b(self):
self.push('self.vert=[v for v in self.g.V.values() if v[1]=="b"][0]\n')
def e_bug_break_a(self):
self.push('import %s as x\n' % __name__)
self.push('tbreak x.Dummy.a\n')
def e_bug_break_b(self):
self.push('import %s as x\n' % __name__)
self.push('tbreak x.Dummy.b\n')
def e_jmp_a(self):
self.push('j a\n')
def e_jmp_b(self):
self.push('j b\n')
def e_goto_a(self):
self.push('g a\n')
def e_goto_b(self):
self.push('g b\n')
| bartvanherck/python-graphwalker | graphwalker/test/interactor.py | Python | apache-2.0 | 5,356 |
import click
import os
import os.path
import ntpath
import serial
import sys
import prosflasher.ports
import prosflasher.upload
import prosconfig
from proscli.utils import default_cfg, AliasGroup
from proscli.utils import get_version
@click.group(cls=AliasGroup)
def flasher_cli():
pass
@flasher_cli.command(short_help='Upload binaries to the microcontroller.', aliases=['upload'])
@click.option('-sfs/-dfs', '--save-file-system/--delete-file-system', is_flag=True, default=False,
help='Specify whether or not to save the file system when writing to the Cortex. Saving the '
'file system takes more time.')
@click.option('-y', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.option('-f', '-b', '--file', '--binary', default='default', metavar='FILE',
help='Specifies a binary file, project directory, or project config file.')
@click.option('-p', '--port', default='auto', metavar='PORT', help='Specifies the serial port.')
@click.option('--no-poll', is_flag=True, default=False)
@click.option('-r', '--retry', default=2,
help='Specify the number of times the flasher should retry the flash when it detects a failure'
' (default two times).')
@default_cfg
# @click.option('-m', '--strategy', default='cortex', metavar='STRATEGY',
# help='Specify the microcontroller upload strategy. Not currently used.')
def flash(ctx, save_file_system, y, port, binary, no_poll, retry):
"""Upload binaries to the microcontroller. A serial port and binary file need to be specified.
By default, the port is automatically selected (if you want to be pedantic, 'auto').
Otherwise, a system COM port descriptor needs to be used. In Windows/NT, this takes the form of COM1.
In *nx systems, this takes the form of /dev/tty1 or /dev/acm1 or similar.
\b
Specifying 'all' as the COM port will automatically upload to all available microcontrollers.
By default, the CLI will look around for a proper binary to upload to the microcontroller. If one was not found, or
if you want to change the default binary, you can specify it.
"""
click.echo(' ====:: PROS Flasher v{} ::===='.format(get_version()))
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
port = ports[0].device
if len(ports) > 1 and port is not None and y is False:
port = None
for p in ports:
if click.confirm('Download to ' + p.device, default=True):
port = p.device
break
if port is None:
click.echo('No additional ports found.')
click.get_current_context().abort()
sys.exit(1)
if port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
if y is False:
click.confirm('Download to ' + ', '.join(port), default=True, abort=True, prompt_suffix='?')
else:
port = [port]
if binary == 'default':
binary = os.getcwd()
if ctx.verbosity > 3:
click.echo('Default binary selected, new directory is {}'.format(binary))
binary = find_binary(binary)
if binary is None:
click.echo('No binary was found! Ensure you are in a built PROS project (run make) '
'or specify the file with the -f flag',
err=True)
click.get_current_context().exit()
if ctx.verbosity > 3:
click.echo('Final binary is {}'.format(binary))
click.echo('Flashing ' + binary + ' to ' + ', '.join(port))
for p in port:
tries = 1
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
while tries <= retry and (not code or code == -1000):
click.echo('Retrying...')
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
tries += 1
def find_binary(path):
"""
Helper function for finding the binary associated with a project
The algorithm is as follows:
- if it is a file, then check if the name of the file is 'pros.config':
- if it is 'pros.config', then find the binary based off the pros.config value (or default 'bin/output.bin')
- otherwise, can only assume it is the binary file to upload
- if it is a directory, start recursively searching up until 'pros.config' is found. max 10 times
- if the pros.config file was found, find binary based off of the pros.config value
- if no pros.config file was found, start recursively searching up (from starting path) until a directory
named bin is found
- if 'bin' was found, return 'bin/output.bin'
:param path: starting path to start the search
:param ctx:
:return:
"""
# logger = logging.getLogger(ctx.log_key)
# logger.debug('Finding binary for {}'.format(path))
if os.path.isfile(path):
if ntpath.basename(path) == 'pros.config':
pros_cfg = prosconfig.ProjectConfig(path)
return os.path.join(path, pros_cfg.output)
return path
elif os.path.isdir(path):
try:
cfg = prosconfig.ProjectConfig(path, raise_on_error=True)
if cfg is not None and os.path.isfile(os.path.join(cfg.directory, cfg.output)):
return os.path.join(cfg.directory, cfg.output)
except prosconfig.ConfigNotFoundException:
search_dir = path
for n in range(10):
dirs = [d for d in os.listdir(search_dir)
if os.path.isdir(os.path.join(path, search_dir, d)) and d == 'bin']
if len(dirs) == 1: # found a bin directory
if os.path.isfile(os.path.join(path, search_dir, 'bin', 'output.bin')):
return os.path.join(path, search_dir, 'bin', 'output.bin')
search_dir = ntpath.split(search_dir)[:-1][0] # move to parent dir
return None
@flasher_cli.command('poll', short_help='Polls a microcontroller for its system info')
@click.option('-y', '--yes', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.argument('port', default='all')
@default_cfg
def get_sys_info(cfg, yes, port):
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
port = prosflasher.ports.list_com_ports()[0].device
if port is not None and yes is False:
click.confirm('Poll ' + port, default=True, abort=True, prompt_suffix='?')
if port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
else:
port = [port]
for p in port:
sys_info = prosflasher.upload.ask_sys_info(prosflasher.ports.create_serial(p, serial.PARITY_EVEN), cfg)
click.echo(repr(sys_info))
pass
@flasher_cli.command(short_help='List connected microcontrollers')
@default_cfg
def lsusb(cfg):
if len(prosflasher.ports.list_com_ports()) == 0 or prosflasher.ports.list_com_ports() is None:
click.echo('No serial ports found.')
else:
click.echo('Available Ports:')
click.echo(prosflasher.ports.create_port_list(cfg.verbosity > 0))
# @flasher_cli.command(name='dump-cortex', short_help='Dumps user flash contents to a specified file')
# @click.option('-v', '--verbose', is_flag=True)
# @click.argument('file', default=sys.stdout, type=click.File())
# def dump_cortex(file, verbose):
# pass
| purduesigbots/purdueros-cli | proscli/flasher.py | Python | bsd-3-clause | 8,643 |
##=============== OPENING FITS TABLES =============
import numpy as np
import astropy
from astropy.io import fits
import matplotlib.pyplot as plt
##=============== settting environment ============
fermi = fits.open('3C454.3_604800.lc')
data = fermi[1].data
print data | chalkyam/Photometry_Pipeline | scripts/open.fits.py | Python | mit | 272 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Gdm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""GNOME display manager
"""
plugin_name = 'gdm'
profiles = ('desktop',)
def setup(self):
self.add_copy_spec("/etc/gdm/*")
self.add_cmd_output([
"journalctl -u gdm",
"systemctl status gdm.service"
])
# vim: set et ts=4 sw=4 :
| karibou/sosreport | sos/plugins/gdm.py | Python | gpl-2.0 | 1,116 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((3068.27, 9568.32, 6185.89), (0, 1, 0), 846)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((3926, 11632.8, 4748.76), (0.7, 0.7, 0.7), 846)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((2476.79, 11079.6, 5805.25), (0.7, 0.7, 0.7), 846)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((3055.8, 10504.5, 6311.86), (0.7, 0.7, 0.7), 846)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((3445.38, 9756.76, 5867.78), (0.7, 0.7, 0.7), 846)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2926.9, 9631.41, 7241.88), (0.7, 0.7, 0.7), 846)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3422.21, 11104.8, 8869.58), (0.7, 0.7, 0.7), 846)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2133.02, 9455.9, 7785.49), (0.7, 0.7, 0.7), 846)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2300.79, 9173.07, 7694.16), (0.7, 0.7, 0.7), 846)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2535.45, 8285.94, 8945.56), (0.7, 0.7, 0.7), 846)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((711.963, 8628.86, 8113.9), (0, 1, 0), 846)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((1308.73, 8291.51, 6105.3), (0.7, 0.7, 0.7), 846)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((574.51, 8788.98, 7851.35), (0.7, 0.7, 0.7), 846)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2260.14, 8107.69, 7924.24), (0.7, 0.7, 0.7), 846)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((1212.42, 8285.99, 7275.27), (0.7, 0.7, 0.7), 846)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((25.6174, 7649.07, 5763.12), (0.7, 0.7, 0.7), 846)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((336.505, 6893.63, 7662.69), (0.7, 0.7, 0.7), 846)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((773.049, 6986.09, 7545.26), (0.7, 0.7, 0.7), 846)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((1786.23, 7432.31, 6266.13), (0.7, 0.7, 0.7), 846)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((1380.72, 8268.31, 5449.63), (0.7, 0.7, 0.7), 846)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((1545.13, 7094.07, 6684.64), (0, 1, 0), 846)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2912.97, 7812.09, 7285.47), (0.7, 0.7, 0.7), 846)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((2328.31, 7884.83, 5083.72), (0.7, 0.7, 0.7), 846)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((2292.12, 7433.4, 5747.84), (0.7, 0.7, 0.7), 846)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((3508.39, 6626.25, 7477.04), (0.7, 0.7, 0.7), 846)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((3834.92, 7247.35, 5552.04), (0.7, 0.7, 0.7), 846)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2678.82, 7909.91, 4181.7), (0.7, 0.7, 0.7), 846)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2757.64, 7435.1, 5393.73), (0.7, 0.7, 0.7), 846)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2002.8, 6626.82, 6720.33), (0.7, 0.7, 0.7), 846)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2234.41, 7564.41, 5489.01), (0.7, 0.7, 0.7), 846)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2731.12, 6243.24, 5535.29), (0, 1, 0), 846)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3848.91, 5516.98, 4718.48), (0.7, 0.7, 0.7), 846)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3661.43, 6341.26, 4863.95), (0.7, 0.7, 0.7), 846)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4805.44, 6489.93, 6251.19), (0.7, 0.7, 0.7), 846)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4214.09, 7322.93, 6242.85), (0.7, 0.7, 0.7), 846)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((4967.91, 6542.01, 5139.98), (0.7, 0.7, 0.7), 846)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4821.35, 6553.21, 8035.41), (1, 0.7, 0), 846)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((5466.57, 7317.45, 5546.76), (0.7, 0.7, 0.7), 846)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5137.78, 5939.17, 6011.91), (0.7, 0.7, 0.7), 846)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((6189.16, 5960.08, 3854.69), (1, 0.7, 0), 846)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((5685.73, 4723.81, 5541.43), (0.7, 0.7, 0.7), 846)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((6434.25, 3725.72, 4521.33), (0.7, 0.7, 0.7), 846)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((6067.51, 4269.11, 4259.98), (0.7, 0.7, 0.7), 846)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((6409.1, 4242.46, 4590.43), (0.7, 0.7, 0.7), 846)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((6244.11, 4312.51, 3412.93), (0.7, 0.7, 0.7), 846)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((6470.82, 4648.04, 4076.96), (0.7, 0.7, 0.7), 846)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((6635.34, 3488.35, 3898.54), (0.7, 0.7, 0.7), 846)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((7083.17, 3716.85, 3352.55), (0.7, 0.7, 0.7), 846)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((7720.15, 3771.84, 3739.05), (0.7, 0.7, 0.7), 846)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7670.53, 4539.35, 5520.12), (0.7, 0.7, 0.7), 846)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((6263.67, 4243.16, 4507.85), (0.7, 0.7, 0.7), 846)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((7898.26, 4911.28, 5192.69), (0, 1, 0), 846)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((6047.53, 1997.17, 4977.04), (0.7, 0.7, 0.7), 846)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6902.13, 4173.77, 4239.51), (0.7, 0.7, 0.7), 846)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((8043.41, 4669.9, 3845.53), (0.7, 0.7, 0.7), 846)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((6634.08, 3871.73, 4320.72), (0.7, 0.7, 0.7), 846)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((6542.49, 4624.98, 5414.59), (0.7, 0.7, 0.7), 846)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((7603.46, 4139.14, 3693.44), (0.7, 0.7, 0.7), 846)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((6762.77, 3599.79, 5140.62), (0.7, 0.7, 0.7), 846)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((7841.27, 5057.35, 4614.72), (0.7, 0.7, 0.7), 846)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((9027.38, 3715.11, 4464.85), (0.7, 0.7, 0.7), 846)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((8559.91, 4418.09, 6302.81), (0, 1, 0), 846)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((7894.86, 4563.52, 8295.24), (0.7, 0.7, 0.7), 846)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((8034.54, 3597.15, 6580.02), (0.7, 0.7, 0.7), 846)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((7538.66, 2155.44, 4750), (0.7, 0.7, 0.7), 846)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((9120.98, 2671.53, 5688.41), (0.7, 0.7, 0.7), 846)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((8426.85, 1903.53, 5042.03), (0.7, 0.7, 0.7), 846)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((6861.89, 2182.23, 6015.42), (0.7, 0.7, 0.7), 846)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((7644.65, 948.227, 7404.65), (0.7, 0.7, 0.7), 846)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((7418.95, 1567.37, 6560.17), (0.7, 0.7, 0.7), 846)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((9345.55, 1355.81, 6380.72), (0.7, 0.7, 0.7), 846)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((7379.56, 2206.28, 6667.46), (0, 1, 0), 846)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((8765.64, 2193.68, 6994.48), (0.7, 0.7, 0.7), 846)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((8351.36, 1629.22, 5502.3), (0.7, 0.7, 0.7), 846)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((8479.23, 2350.36, 7381.88), (0, 1, 0), 846)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| batxes/4Cin | Six_mouse_models/Six_mouse_models_final_output_0.2_-0.1_11000/mtx1_models/Six_mouse_models17239.py | Python | gpl-3.0 | 18,216 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class RestoreRequest(ProxyOnlyResource):
"""Description of a restore request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param blob_name: Name of a blob which contains the backup.
:type blob_name: str
:param overwrite: <code>true</code> if the restore operation can overwrite
target app; otherwise, <code>false</code>. <code>true</code> is needed if
trying to restore over an existing app.
:type overwrite: bool
:param site_name: Name of an app.
:type site_name: str
:param databases: Collection of databases which should be restored. This
list has to match the list of databases included in the backup.
:type databases: list[~azure.mgmt.web.models.DatabaseBackupSetting]
:param ignore_conflicting_host_names: Changes a logic when restoring an
app with custom domains. <code>true</code> to remove custom domains
automatically. If <code>false</code>, custom domains are added to
the app's object when it is being restored, but that might fail due to
conflicts during the operation. Default value: False .
:type ignore_conflicting_host_names: bool
:param ignore_databases: Ignore the databases and only restore the site
content. Default value: False .
:type ignore_databases: bool
:param app_service_plan: Specify app service plan that will own restored
site.
:type app_service_plan: str
:param operation_type: Operation type. Possible values include: 'Default',
'Clone', 'Relocation', 'Snapshot'. Default value: "Default" .
:type operation_type: str or
~azure.mgmt.web.models.BackupRestoreOperationType
:param adjust_connection_strings: <code>true</code> if
SiteConfig.ConnectionStrings should be set in new app; otherwise,
<code>false</code>.
:type adjust_connection_strings: bool
:param hosting_environment: App Service Environment name, if needed (only
when restoring an app to an App Service Environment).
:type hosting_environment: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'storage_account_url': {'required': True},
'overwrite': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'blob_name': {'key': 'properties.blobName', 'type': 'str'},
'overwrite': {'key': 'properties.overwrite', 'type': 'bool'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'ignore_conflicting_host_names': {'key': 'properties.ignoreConflictingHostNames', 'type': 'bool'},
'ignore_databases': {'key': 'properties.ignoreDatabases', 'type': 'bool'},
'app_service_plan': {'key': 'properties.appServicePlan', 'type': 'str'},
'operation_type': {'key': 'properties.operationType', 'type': 'BackupRestoreOperationType'},
'adjust_connection_strings': {'key': 'properties.adjustConnectionStrings', 'type': 'bool'},
'hosting_environment': {'key': 'properties.hostingEnvironment', 'type': 'str'},
}
def __init__(self, storage_account_url, overwrite, kind=None, blob_name=None, site_name=None, databases=None, ignore_conflicting_host_names=False, ignore_databases=False, app_service_plan=None, operation_type="Default", adjust_connection_strings=None, hosting_environment=None):
super(RestoreRequest, self).__init__(kind=kind)
self.storage_account_url = storage_account_url
self.blob_name = blob_name
self.overwrite = overwrite
self.site_name = site_name
self.databases = databases
self.ignore_conflicting_host_names = ignore_conflicting_host_names
self.ignore_databases = ignore_databases
self.app_service_plan = app_service_plan
self.operation_type = operation_type
self.adjust_connection_strings = adjust_connection_strings
self.hosting_environment = hosting_environment
| lmazuel/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/restore_request.py | Python | mit | 5,149 |
#!/usr/bin/env python3
PKG = 'lg_common'
NAME = 'test_managed_application'
import gc
import os
import unittest
import weakref
from lg_msg_defs.msg import ApplicationState
from appctl_support import ProcController
from lg_common import ManagedApplication, ManagedWindow
TEST_CMD = ['/usr/bin/python']
class MockWindow(ManagedWindow):
def __init__(self, *args, **kwargs):
self.converge_count = 0
super(MockWindow, self).__init__(*args, **kwargs)
def converge(self):
self.converge_count += 1
class MockController(ProcController):
def __init__(self, *args, **kwargs):
super(MockController, self).__init__(*args, **kwargs)
self.start_count = 0
self.stop_count = 0
def start(self):
self.start_count += 1
def stop(self):
self.stop_count += 1
class TestManagedApplication(unittest.TestCase):
def setUp(self):
window = MockWindow(w_instance='NULL', visible=False)
self.app = ManagedApplication(cmd=TEST_CMD, window=window)
def test_init(self):
self.app.proc = MockController(TEST_CMD)
self.assertEqual(ApplicationState.STOPPED, self.app.get_state())
self.assertEqual(0, self.app.window.converge_count)
self.assertEqual(0, self.app.proc.start_count)
self.assertEqual(0, self.app.proc.stop_count)
def test_set_state_suspended(self):
self.app.proc = MockController(TEST_CMD)
self.app.set_state(ApplicationState.SUSPENDED)
self.assertFalse(self.app.window.is_visible)
self.assertEqual(1, self.app.window.converge_count)
self.assertEqual(1, self.app.proc.start_count)
self.assertEqual(0, self.app.proc.stop_count)
self.app.set_state(ApplicationState.STOPPED)
self.assertFalse(self.app.window.is_visible)
self.assertEqual(1, self.app.window.converge_count)
self.assertEqual(1, self.app.proc.start_count)
self.assertEqual(1, self.app.proc.stop_count)
def test_set_state_hidden(self):
self.app.proc = MockController(TEST_CMD)
self.app.set_state(ApplicationState.HIDDEN)
self.assertFalse(self.app.window.is_visible)
self.assertEqual(1, self.app.window.converge_count)
self.assertEqual(1, self.app.proc.start_count)
self.assertEqual(0, self.app.proc.stop_count)
self.app.set_state(ApplicationState.STOPPED)
self.assertFalse(self.app.window.is_visible)
self.assertEqual(1, self.app.window.converge_count)
self.assertEqual(1, self.app.proc.start_count)
self.assertEqual(1, self.app.proc.stop_count)
def test_set_state_visible(self):
self.app.proc = MockController(TEST_CMD)
self.app.set_state(ApplicationState.VISIBLE)
self.assertTrue(self.app.window.is_visible)
self.assertEqual(1, self.app.window.converge_count)
self.assertEqual(1, self.app.proc.start_count)
self.assertEqual(0, self.app.proc.stop_count)
self.app.set_state(ApplicationState.STOPPED)
self.assertFalse(self.app.window.is_visible)
self.assertEqual(1, self.app.window.converge_count)
self.assertEqual(1, self.app.proc.start_count)
self.assertEqual(1, self.app.proc.stop_count)
class TestCleanup(unittest.TestCase):
def test_cleanup(self):
app = ManagedApplication(cmd=TEST_CMD)
app_ref = weakref.ref(app)
app.close()
app = None
gc.collect()
self.assertIsNone(app_ref())
if __name__ == '__main__':
import rostest
rostest.rosrun(PKG, NAME, TestManagedApplication)
rostest.rosrun(PKG, NAME, TestCleanup)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| EndPointCorp/lg_ros_nodes | lg_common/test/offline/test_managed_application.py | Python | apache-2.0 | 3,712 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example use of Neo4j related operators.
"""
from datetime import datetime
from airflow import DAG
from airflow.providers.neo4j.operators.neo4j import Neo4jOperator
dag = DAG(
'example_neo4j',
start_date=datetime(2021, 1, 1),
tags=['example'],
catchup=False,
)
# [START run_query_neo4j_operator]
neo4j_task = Neo4jOperator(
task_id='run_neo4j_query',
neo4j_conn_id='neo4j_conn_id',
sql='MATCH (tom {name: "Tom Hanks"}) RETURN tom',
dag=dag,
)
# [END run_query_neo4j_operator]
| apache/incubator-airflow | airflow/providers/neo4j/example_dags/example_neo4j.py | Python | apache-2.0 | 1,304 |
from setuptools import setup, find_packages
setup(
version='4.3.0',
name='vcdriver',
description='A vcenter driver based on pyvmomi, fabric and pywinrm',
url='https://github.com/Lantero/vcdriver',
author='Carlos Ruiz Lantero',
author_email='[email protected]',
license='MIT',
install_requires=['colorama', 'Fabric3', 'pyvmomi', 'pywinrm', 'six'],
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
],
)
| Lantero/vcdriver | setup.py | Python | mit | 1,071 |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del chassis command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelChassis(TestBrokerCommand):
def test_100_del_ut3c5_used(self):
self.dsdb_expect_delete(self.net["unknown0"].usable[6])
command = "del chassis --chassis ut3c5.aqd-unittest.ms.com"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Chassis ut3c5.aqd-unittest.ms.com is "
"still in use by 3 machines or network devices. "
"Use --clear_slots if you really want to delete it.",
command.split(" "))
def test_101_del_ut3c5(self):
self.dsdb_expect_delete(self.net["unknown0"].usable[6])
command = "del chassis --chassis ut3c5.aqd-unittest.ms.com --clear_slots"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_105_verify_ut3c5(self):
command = "show chassis --chassis ut3c5.aqd-unittest.ms.com"
self.notfoundtest(command.split(" "))
def test_106_del_ut3c5_again(self):
command = ["del_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out,
"DnsRecord ut3c5.aqd-unittest.ms.com, "
"DNS environment internal not found.",
command)
def test_110_del_ut3c1(self):
command = "del chassis --chassis ut3c1.aqd-unittest.ms.com"
self.noouttest(command.split(" "))
def test_115_verify_ut3c1(self):
command = "show chassis --chassis ut3c1.aqd-unittest.ms.com"
self.notfoundtest(command.split(" "))
def test_120_del_ut9_chassis(self):
for i in range(1, 8):
self.dsdb_expect_delete(self.net["ut9_chassis"].usable[i])
command = "del chassis --chassis ut9c%d.aqd-unittest.ms.com" % i
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_125_verify_ut9_chassis(self):
for i in range(1, 6):
command = "show chassis --chassis ut9c%d.aqd-unittest.ms.com" % i
self.notfoundtest(command.split(" "))
def test_130_del_np3c5(self):
self.noouttest(["del_chassis", "--chassis", "np3c5.one-nyp.ms.com"])
def test_140_del_aurora_chassis(self):
self.noouttest(["del_chassis", "--chassis", "oy604c2.ms.com"])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelChassis)
unittest.TextTestRunner(verbosity=2).run(suite)
| quattor/aquilon | tests/broker/test_del_chassis.py | Python | apache-2.0 | 3,407 |
import sqlite3
from flask import Flask, g, render_template, request
from flask import session, flash, redirect, url_for
# configurtion
DATABASE = '/tmp/flaskr.db'
DEBUG = True
USERNAME = 'admin'
PASSWORD = 'admin'
SECRET_KEY = 'Sou4Gi3zQVZ1lJ8lUlUB'
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cursor = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cursor.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries(title, text) values(?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
# login success
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run(host='0.0.0.0')
| decimalbell/devnull | python/flask/flaskr/flaskr.py | Python | mit | 1,943 |
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
import time
from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class PrioritiseTransactionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-printpriority=1"], ["-printpriority=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)
# Test `prioritisetransaction` invalid extra parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)
# Test `prioritisetransaction` invalid `txid`
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0)
# Test `prioritisetransaction` invalid `dummy`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0)
assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)
# Test `prioritisetransaction` invalid `fee_delta`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert j in mempool
sizes[i] += mempool[j]['size']
assert sizes[i] > MAX_BLOCK_BASE_SIZE # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert txids[0][0] not in mempool
assert txids[0][1] in mempool
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert high_fee_tx is not None
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert high_fee_tx in mempool
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert high_fee_tx in mempool
for x in txids[2]:
if (x != high_fee_tx):
assert x not in mempool
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert len(utxo_list) > 0
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert tx_id not in self.nodes[0].getrawmempool()
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000-byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert tx_id in self.nodes[0].getrawmempool()
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template != new_template
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| r8921039/bitcoin | test/functional/mining_prioritisetransaction.py | Python | mit | 7,584 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent layers backed by cuDNN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util.tf_export import tf_export
class _CuDNNRNN(RNN):
"""Private base class for CuDNNGRU and CuDNNLSTM layers.
Arguments:
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
time_major: Boolean (default False). If true, the inputs and outputs will be
in shape `(timesteps, batch, ...)`, whereas in the False case, it will
be `(batch, timesteps, ...)`.
"""
def __init__(self,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
**kwargs):
# We invoke the base layer's initializer directly here because we do not
# want to create RNN cell instance.
super(RNN, self).__init__(**kwargs) # pylint: disable=bad-super-call
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.time_major = time_major
self.supports_masking = False
self.input_spec = [InputSpec(ndim=3)]
if hasattr(self.cell.state_size, '__len__'):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]
self.constants_spec = None
self._states = None
self._num_constants = None
self._num_inputs = None
self._vector_shape = constant_op.constant([-1])
def _canonical_to_params(self, weights, biases):
weights = [array_ops.reshape(x, self._vector_shape) for x in weights]
biases = [array_ops.reshape(x, self._vector_shape) for x in biases]
return array_ops.concat(weights + biases, axis=0)
def call(self, inputs, mask=None, training=None, initial_state=None):
if isinstance(mask, list):
mask = mask[0]
if mask is not None:
raise ValueError('Masking is not supported for CuDNN RNNs.')
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
if self.go_backwards:
# Reverse time axis.
inputs = K.reverse(inputs, 1)
output, states = self._process_batch(inputs, initial_state)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_state:
return [output] + states
else:
return output
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'time_major': self.time_major,
}
base_config = super( # pylint: disable=bad-super-call
RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def trainable_weights(self):
if self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def non_trainable_weights(self):
if not self.trainable and self.built:
return [self.kernel, self.recurrent_kernel, self.bias]
return []
@property
def losses(self):
return super(RNN, self).losses
def get_losses_for(self, inputs=None):
return super( # pylint: disable=bad-super-call
RNN, self).get_losses_for(inputs=inputs)
@tf_export('keras.layers.CuDNNGRU')
class CuDNNGRU(_CuDNNRNN):
"""Fast GRU implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output in the output
sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
"""
def __init__(self,
units,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs):
self.units = units
cell_spec = collections.namedtuple('cell', 'state_size')
self._cell = cell_spec(state_size=self.units)
super(CuDNNGRU, self).__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super(CuDNNGRU, self).build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.bias = self.add_weight(
shape=(self.units * 6,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_h = array_ops.expand_dims(input_h, axis=0)
params = self._canonical_to_params(
weights=[
self.kernel[:, self.units:self.units * 2],
self.kernel[:, :self.units],
self.kernel[:, self.units * 2:],
self.recurrent_kernel[:, self.units:self.units * 2],
self.recurrent_kernel[:, :self.units],
self.recurrent_kernel[:, self.units * 2:],
],
biases=[
self.bias[self.units:self.units * 2],
self.bias[:self.units],
self.bias[self.units * 2:self.units * 3],
self.bias[self.units * 4:self.units * 5],
self.bias[self.units * 3:self.units * 4],
self.bias[self.units * 5:],
],
)
outputs, h, _, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs,
input_h=input_h,
input_c=0,
params=params,
is_training=True,
rnn_mode='gru')
if self.stateful or self.return_state:
h = h[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = array_ops.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h]
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(CuDNNGRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.CuDNNLSTM')
class CuDNNLSTM(_CuDNNRNN):
"""Fast LSTM implementation backed by cuDNN.
More information about cuDNN can be found on the [NVIDIA
developer website](https://developer.nvidia.com/cudnn).
Can only be run on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate
at initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation").
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output. in the
output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
"""
def __init__(self,
units,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs):
self.units = units
cell_spec = collections.namedtuple('cell', 'state_size')
self._cell = cell_spec(state_size=(self.units, self.units))
super(CuDNNLSTM, self).__init__(
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
@property
def cell(self):
return self._cell
def build(self, input_shape):
super(CuDNNLSTM, self).build(input_shape)
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_dim = int(input_shape[-1])
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return array_ops.concat([
self.bias_initializer((self.units * 5,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
], axis=0)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 8,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def _process_batch(self, inputs, initial_state):
if not self.time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = initial_state[0]
input_c = initial_state[1]
input_h = array_ops.expand_dims(input_h, axis=0)
input_c = array_ops.expand_dims(input_c, axis=0)
params = self._canonical_to_params(
weights=[
self.kernel[:, :self.units],
self.kernel[:, self.units:self.units * 2],
self.kernel[:, self.units * 2:self.units * 3],
self.kernel[:, self.units * 3:],
self.recurrent_kernel[:, :self.units],
self.recurrent_kernel[:, self.units:self.units * 2],
self.recurrent_kernel[:, self.units * 2:self.units * 3],
self.recurrent_kernel[:, self.units * 3:],
],
biases=[
self.bias[:self.units],
self.bias[self.units:self.units * 2],
self.bias[self.units * 2:self.units * 3],
self.bias[self.units * 3:self.units * 4],
self.bias[self.units * 4:self.units * 5],
self.bias[self.units * 5:self.units * 6],
self.bias[self.units * 6:self.units * 7],
self.bias[self.units * 7:],
],
)
outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs,
input_h=input_h,
input_c=input_c,
params=params,
is_training=True)
if self.stateful or self.return_state:
h = h[0]
c = c[0]
if self.return_sequences:
if self.time_major:
output = outputs
else:
output = array_ops.transpose(outputs, perm=(1, 0, 2))
else:
output = outputs[-1]
return output, [h, c]
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(CuDNNLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| hehongliang/tensorflow | tensorflow/python/keras/layers/cudnn_recurrent.py | Python | apache-2.0 | 20,929 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0045_pendingcallback_planned_for_datetime'),
]
operations = [
migrations.AlterField(
model_name='widget',
name='callback_type',
field=models.CharField(default=b'linear', max_length=10, verbose_name=b'\xd0\x9f\xd0\xbe\xd1\x80\xd1\x8f\xd0\xb4\xd0\xbe\xd0\xba \xd0\xb4\xd0\xbe\xd0\xb7\xd0\xb2\xd0\xbe\xd0\xbd\xd0\xb0', choices=[(b'ringall', b'\xd0\x9e\xd0\xb4\xd0\xbd\xd0\xbe\xd0\xb2\xd1\x80\xd0\xb5\xd0\xbc\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe'), (b'linear', b'\xd0\x9f\xd0\xbe \xd0\xbe\xd1\x87\xd0\xb5\xd1\x80\xd0\xb5\xd0\xb4\xd0\xb8')]),
preserve_default=True,
),
]
| vesellov/callfeed.net | mainapp/migrations/0046_auto_20150807_1626.py | Python | mit | 828 |
#!/usr/bin/env python
# -*-coding:utf-8-*-
# File Name : f.py
# Description :
# Author :
# Creation Date : 2021-10-31
# Last Modified : 2021年10月31日 星期日 19时00分05秒
# Created By : lsl
def f(grid, k):
def h(grid):
ans = ''
for i in range(len(grid)):
for j in range(len(grid[0])):
ans += str(grid[i][j])
return ans
def f1(s, i, count, k):
nonlocal n, m
s = list(s)
for j in [-1, 1, -n, n]:
if i % n == 0 and j == -1:
continue
if i % n == n-1 and j == 1:
continue
if 0<= i+j < len(s):
a = s[i+j]
if s[i+j] == '0':
s[i+j] = '3'
yield ''.join(s),i+j, count+1, k
elif s[i+j] == '1':
if k > 0:
s[i+j] = '3'
yield ''.join(s),i+j, count+1, k-1
s[i+j] = a
m, n = len(grid), len(grid[0])
seen = set()
queue = [(h(grid), 0, 0, k)]
ans = float('inf')
while queue:
s,i,count, k = queue.pop(0)
if i == len(s) - 1:
ans = min(ans, s.count('3'))
seen.add(s)
for j in f1(s, i, count, k):
if j[0] not in seen:
queue.append(j)
if ans == float('inf'):
return -1
return ans
grid = [[0,0,0],
[1,1,0],
[0,0,0],
[0,1,1],
[0,0,0],
[0,0,0],
]
k = 1
#ret = f(grid, k)
#print(ret)
grid = [[0,1,0,1],[0,1,0,0],[0,0,1,0],[1,0,0,1],[0,1,0,0]]
k = 18
ret = f(grid, k)
print(ret)
| uxlsl/uxlsl.github.io | demo/code/2021-10-31/f.py | Python | mit | 1,631 |
# This file is part of ReText
# Copyright: Dmitry Shachnev 2012
# License: GNU GPL v2 or higher
from ReText import *
from ReText.highlighter import ReTextHighlighter
class HtmlDialog(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.resize(700, 600)
verticalLayout = QVBoxLayout(self)
self.textEdit = QTextEdit(self)
self.textEdit.setReadOnly(True)
self.textEdit.setFont(monofont)
self.hl = ReTextHighlighter(self.textEdit.document())
self.hl.docType = DOCTYPE_HTML
verticalLayout.addWidget(self.textEdit)
buttonBox = QDialogButtonBox(self)
buttonBox.setStandardButtons(QDialogButtonBox.Close)
self.connect(buttonBox, SIGNAL("rejected()"), self.close)
verticalLayout.addWidget(buttonBox)
| codemedic/retext | ReText/htmldialog.py | Python | gpl-3.0 | 746 |
import sys
import unittest
from cStringIO import StringIO
from pylint.checkers import similar
class SimilarTC(unittest.TestCase):
"""test the similar command line utility"""
def test(self):
sys.stdout = StringIO()
try:
similar.run(['--ignore-comments', 'input/similar1', 'input/similar2'])
output = sys.stdout.getvalue()
finally:
sys.stdout = sys.__stdout__
self.assertEquals(output.strip(), """
7 similar lines in 2 files
==input/similar1:5
==input/similar2:5
same file as this one.
more than 4
identical lines should
be
detected
TOTAL lines=38 duplicates=7 percent=0.184210526316
""".strip())
def test_help(self):
sys.stdout = StringIO()
try:
try:
similar.run(['--help'])
except SystemExit, ex:
self.assertEquals(ex.code, 0)
else:
self.fail()
finally:
sys.stdout = sys.__stdout__
def test_no_args(self):
sys.stdout = StringIO()
try:
try:
similar.run([])
except SystemExit, ex:
self.assertEquals(ex.code, 1)
else:
self.fail()
finally:
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main()
| FrankBian/kuma | vendor/packages/pylint/test/test_similar.py | Python | mpl-2.0 | 1,399 |
# -*- coding: utf-8 -*-
class DecodeError(Exception):
pass
class EncodeError(Exception):
pass
class ConnectionError(Exception):
pass
class ResponseError(Exception):
def __init__(self, message, response):
super(ResponseError, self).__init__(message)
self.response = response
class ClientError(ResponseError):
pass
class ServerError(ResponseError):
pass
class SerializationError(Exception):
pass
| VaclavDedik/infinispan-py | infinispan/error.py | Python | mit | 452 |
#!/usr/bin/env python
# coding: utf-8
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import filecmp
import os
import plistlib
import shutil
import stat
import subprocess
import sys
import time
def _stat_or_none(path, root):
"""Calls os.stat or os.lstat to obtain information about a path.
This program traverses parallel directory trees, which may have subtle
differences such as directory entries that are present in fewer than all
trees. It also operates on symbolic links directly, instead of on their
targets.
Args:
path: The path to call os.stat or os.lstat on.
root: True if called on the root of a tree to be merged, False
otherwise. See the discussion below.
Returns:
The return value of os.stat or os.lstat, or possibly None if the path
does not exist.
When root is True, indicating that path is at the root of one of these
trees, this permissiveness is disabled, as all roots are required to be
present. If one is absent, an exception will be raised. When root is True,
os.stat will be used, as this is the one case when it is desirable to
operate on a symbolic link’s target.
When root is False, os.lstat will be used to operate on symbolic links
directly, and a missing path will cause None to be returned.
"""
if root:
return os.stat(path)
try:
return os.lstat(path)
except OSError as e:
if e.errno == errno.ENOENT:
return None
raise
def _file_type_for_stat(st):
"""Returns a string indicating the type of directory entry in st.
Args:
st: The return value of os.stat or os.lstat.
Returns:
'symbolic link', 'file', or 'directory'.
"""
if stat.S_ISLNK(st.st_mode):
return 'symbolic_link'
if stat.S_ISREG(st.st_mode):
return 'file'
if stat.S_ISDIR(st.st_mode):
return 'directory'
raise Exception('unknown file type for mode 0o%o' % mode)
def _sole_list_element(l, exception_message):
"""Assures that every element in a list is identical.
Args:
l: The list to consider.
exception_message: A message used to convey failure if every element in
l is not identical.
Returns:
The value of each identical element in the list.
"""
s = set(l)
if len(s) != 1:
raise Exception(exception_message)
return l[0]
def _read_plist(path):
"""Reads a macOS property list, API compatibility adapter."""
with open(path, 'rb') as file:
try:
# New API, available since Python 3.4.
return plistlib.load(file)
except AttributeError:
# Old API, available (but deprecated) until Python 3.9.
return plistlib.readPlist(file)
def _write_plist(value, path):
"""Writes a macOS property list, API compatibility adapter."""
with open(path, 'wb') as file:
try:
# New API, available since Python 3.4.
plistlib.dump(value, file)
except AttributeError:
# Old API, available (but deprecated) until Python 3.9.
plistlib.writePlist(value, file)
class CantMergeException(Exception):
"""Raised when differences exist between input files such that they cannot
be merged successfully.
"""
pass
def _merge_info_plists(input_paths, output_path):
"""Merges multiple macOS Info.plist files.
Args:
input_plists: A list of paths containing Info.plist files to be merged.
output_plist: The path of the merged Info.plist to create.
Raises:
CantMergeException if all input_paths could not successfully be merged
into output_path.
A small number of differences are tolerated in the input Info.plists. If a
key identifying the build environment (OS or toolchain) is different in any
of the inputs, it will be removed from the output. There are valid reasons
to produce builds for different architectures using different toolchains or
SDKs, and there is no way to rationalize these differences into a single
value.
If present, the Chrome KSChannelID family of keys are rationalized by using
“universal” to identify the architecture (compared to, for example,
“arm64”.)
"""
input_plists = [_read_plist(x) for x in input_paths]
output_plist = input_plists[0]
for index in range(1, len(input_plists)):
input_plist = input_plists[index]
for key in set(input_plist.keys()) | set(output_plist.keys()):
if input_plist.get(key, None) == output_plist.get(key, None):
continue
if key in ('BuildMachineOSBuild', 'DTCompiler', 'DTPlatformBuild',
'DTPlatformName', 'DTPlatformVersion', 'DTSDKBuild',
'DTSDKName', 'DTXcode', 'DTXcodeBuild'):
if key in input_plist:
del input_plist[key]
if key in output_plist:
del output_plist[key]
elif key == 'KSChannelID' or key.startswith('KSChannelID-'):
# These keys are Chrome-specific, where it’s only present in the
# outer browser .app’s Info.plist.
#
# Ensure that the values match the expected format as a
# prerequisite to what follows.
key_tail = key[len('KSChannelID'):]
input_value = input_plist.get(key, '')
output_value = output_plist.get(key, '')
assert input_value.endswith(key_tail)
assert output_value.endswith(key_tail)
# Find the longest common trailing sequence of hyphen-separated
# elements, and use that as the trailing sequence of the new
# value.
input_parts = reversed(input_value.split('-'))
output_parts = output_value.split('-')
output_parts.reverse()
new_parts = []
for input_part, output_part in zip(input_parts, output_parts):
if input_part == output_part:
new_parts.append(output_part)
else:
break
# Prepend “universal” to the entire value if it’s not already
# there.
if len(new_parts) == 0 or new_parts[-1] != 'universal':
new_parts.append('universal')
output_plist[key] = '-'.join(reversed(new_parts))
assert output_plist[key] != ''
else:
raise CantMergeException(input_paths[index], output_path)
_write_plist(output_plist, output_path)
def _universalize(input_paths, output_path, root):
"""Merges multiple trees into a “universal” tree.
This function provides the recursive internal implementation for
universalize.
Args:
input_paths: The input directory trees to be merged.
output_path: The merged tree to produce.
root: True if operating at the root of the input and output trees.
"""
input_stats = [_stat_or_none(x, root) for x in input_paths]
for index in range(len(input_paths) - 1, -1, -1):
if input_stats[index] is None:
del input_paths[index]
del input_stats[index]
input_types = [_file_type_for_stat(x) for x in input_stats]
type = _sole_list_element(
input_types,
'varying types %r for input paths %r' % (input_types, input_paths))
if type == 'file':
identical = True
for index in range(1, len(input_paths)):
if (not filecmp.cmp(input_paths[0], input_paths[index]) and
input_paths[index].find('.pak') == -1 and
input_paths[index].find('/Sparkle.framework/') == -1 and
input_paths[index].find('/Resources/vivaldi/') == -1):
identical = False
if (os.path.basename(output_path) == 'Info.plist' or
os.path.basename(output_path).endswith('-Info.plist')):
_merge_info_plists(input_paths, output_path)
else:
command = ['lipo', '-create']
command.extend(input_paths)
command.extend(['-output', output_path])
subprocess.check_call(command)
if identical:
shutil.copyfile(input_paths[0], output_path)
elif type == 'directory':
os.mkdir(output_path)
entries = set()
for input in input_paths:
entries.update(os.listdir(input))
for entry in entries:
input_entry_paths = [os.path.join(x, entry) for x in input_paths]
output_entry_path = os.path.join(output_path, entry)
_universalize(input_entry_paths, output_entry_path, False)
elif type == 'symbolic_link':
targets = [os.readlink(x) for x in input_paths]
target = _sole_list_element(
targets, 'varying symbolic link targets %r for input paths %r' %
(targets, input_paths))
os.symlink(target, output_path)
input_permissions = [stat.S_IMODE(x.st_mode) for x in input_stats]
permission = _sole_list_element(
input_permissions, 'varying permissions %r for input paths %r' %
(['0o%o' % x for x in input_permissions], input_paths))
os.lchmod(output_path, permission)
if type != 'file' or identical:
input_mtimes = [x.st_mtime for x in input_stats]
if len(set(input_mtimes)) == 1:
times = (time.time(), input_mtimes[0])
try:
# follow_symlinks is only available since Python 3.3.
os.utime(output_path, times, follow_symlinks=False)
except TypeError:
# If it’s a symbolic link and this version of Python isn’t able
# to set its timestamp, just leave it alone.
if type != 'symbolic_link':
os.utime(output_path, times)
elif type == 'directory':
# Always touch directories, in case a directory is a bundle, as a
# cue to LaunchServices to invalidate anything it may have cached
# about the bundle as it was being built.
os.utime(output_path, None)
def universalize(input_paths, output_path):
"""Merges multiple trees into a “universal” tree.
Args:
input_paths: The input directory trees to be merged.
output_path: The merged tree to produce.
input_paths are expected to be parallel directory trees. Each directory
entry at a given subpath in the input_paths, if present, must be identical
to all others when present, with these exceptions:
- Mach-O files that are not identical are merged using lipo.
- Info.plist files that are not identical are merged by _merge_info_plists.
"""
rmtree_on_error = not os.path.exists(output_path)
try:
return _universalize(input_paths, output_path, True)
except:
if rmtree_on_error and os.path.exists(output_path):
shutil.rmtree(output_path)
raise
def main(args):
parser = argparse.ArgumentParser(
description='Merge multiple single-architecture directory trees into a '
'single universal tree.')
parser.add_argument(
'inputs',
nargs='+',
metavar='input',
help='An input directory tree to be merged. At least two inputs must '
'be provided.')
parser.add_argument('output', help='The merged directory tree to produce.')
parsed = parser.parse_args(args)
if len(parsed.inputs) < 2:
raise Exception('too few inputs')
universalize(parsed.inputs, parsed.output)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| ric2b/Vivaldi-browser | chromium/chrome/installer/mac/universalizer.py | Python | bsd-3-clause | 12,026 |
"""
This extenssion provides preprocessing functionality to the compiler. The following
instructions:
define X Y - replaces the occurances of X with Ys. This can be very efficient in defining
constants
import x - imports the file located at the path x
"""
from malicehelpers import promptErrorMessage
from re import sub
import sys
#Takes the file
'''
Returns a list of all the imports from a file
'''
def __getImports(lines):
i = 0
importedFiles = []
while(i<len(lines) and lines[i].count("import")>0):
if(lines[i].startswith("import")):
aux = lines[i].split(" ")
importedFiles.append(aux[1])
i+=1
return importedFiles
'''
Returns a list of all the definitions from a file
'''
def __getDefinitions(lines):
i = 0
definitionsDict = {}
while(i<len(lines)):
if(lines[i].startswith("define")):
aux = lines[i].split(" ")
if definitionsDict.has_key(aux[1]):
promptErrorMessage("Preprocessing error:",aux[1], "defined twice")
definitionsDict.update({aux[1]:aux[2]})
i+=1
return definitionsDict
'''
Clears the file from 'imports' and 'defines'
'''
def __clearFile(malice_file):
malice_text = malice_file
while(malice_text.count("import") or malice_text.count("define")):
malice_text = malice_text[malice_text.index("\n")+1:]
return malice_text
'''
Checks (using the DFS algorithm) if the file has cyclic imports:
'''
def has_cyclic_imports(imp, file_path, viz):
for i in imp:
if viz.count(i) > 0:
return True
viz.append(i)
iFile = open(file_path + i, "r").read()
lines = iFile.split("\n")
im = __getImports(lines)
for j in im:
imp.append(j)
return False
'''
The function where the whole preprocessing is done:
'''
def preprocess(malice_file, file_path):
lines = malice_file.split("\n")
imp = __getImports(lines)
defs = __getDefinitions(lines)
malice_text = __clearFile(malice_file)
for definition in defs:
malice_text = sub("(?<![A-Za-z0-9_])" + definition + "(?![A-Za-z0-9_])", defs[definition], malice_text)
if(has_cyclic_imports(imp, file_path, [])):
promptErrorMessage("Preprocessing error: cyclic dependencies in imports detected")
for imported_file in imp:
myFile = open(file_path + imported_file, "r")
file_text = myFile.read()
file_text = preprocess(file_text, file_path)
print "INDEX:", malice_file.index("The")
print malice_text
index = malice_text.index("The") - 1
malice_text = malice_text[:index] + '\n' + file_text + '\n' + malice_text[index:]
return malice_text
| IrinaDmt/MAlice | preprocessing.py | Python | mit | 2,695 |
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
root = LinuxArmFSSystem(mem_mode='timing',
mem_class=DDR3_1600_x64,
cpu_class=MinorCPU,
num_cpus=2).create_root()
| nilmini20s/gem5-2016-08-13 | tests/configs/realview-minor-dual.py | Python | bsd-3-clause | 2,338 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'createProfileMothur.ui'
#
# Created: Fri Dec 16 13:39:59 2011
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_CreateProfileMothurDlg(object):
def setupUi(self, CreateProfileMothurDlg):
CreateProfileMothurDlg.setObjectName(_fromUtf8("CreateProfileMothurDlg"))
CreateProfileMothurDlg.resize(396, 144)
CreateProfileMothurDlg.setWindowTitle(QtGui.QApplication.translate("CreateProfileMothurDlg", "Create profile", None, QtGui.QApplication.UnicodeUTF8))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/icons/programIcon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
CreateProfileMothurDlg.setWindowIcon(icon)
self.verticalLayout_2 = QtGui.QVBoxLayout(CreateProfileMothurDlg)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.lblTaxonomyFile = QtGui.QLabel(CreateProfileMothurDlg)
self.lblTaxonomyFile.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Taxonomy file:", None, QtGui.QApplication.UnicodeUTF8))
self.lblTaxonomyFile.setObjectName(_fromUtf8("lblTaxonomyFile"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblTaxonomyFile)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.txtTaxonomyFile = QtGui.QLineEdit(CreateProfileMothurDlg)
self.txtTaxonomyFile.setObjectName(_fromUtf8("txtTaxonomyFile"))
self.horizontalLayout.addWidget(self.txtTaxonomyFile)
self.btnTaxonomyFile = QtGui.QPushButton(CreateProfileMothurDlg)
self.btnTaxonomyFile.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Load", None, QtGui.QApplication.UnicodeUTF8))
self.btnTaxonomyFile.setObjectName(_fromUtf8("btnTaxonomyFile"))
self.horizontalLayout.addWidget(self.btnTaxonomyFile)
self.formLayout.setLayout(0, QtGui.QFormLayout.FieldRole, self.horizontalLayout)
self.label_2 = QtGui.QLabel(CreateProfileMothurDlg)
self.label_2.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Groups file:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.txtGroupsFile = QtGui.QLineEdit(CreateProfileMothurDlg)
self.txtGroupsFile.setObjectName(_fromUtf8("txtGroupsFile"))
self.horizontalLayout_2.addWidget(self.txtGroupsFile)
self.btnGroupsFile = QtGui.QPushButton(CreateProfileMothurDlg)
self.btnGroupsFile.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Load", None, QtGui.QApplication.UnicodeUTF8))
self.btnGroupsFile.setObjectName(_fromUtf8("btnGroupsFile"))
self.horizontalLayout_2.addWidget(self.btnGroupsFile)
self.formLayout.setLayout(1, QtGui.QFormLayout.FieldRole, self.horizontalLayout_2)
self.label_3 = QtGui.QLabel(CreateProfileMothurDlg)
self.label_3.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Names file (optional):", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_3)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.txtNamesFile = QtGui.QLineEdit(CreateProfileMothurDlg)
self.txtNamesFile.setObjectName(_fromUtf8("txtNamesFile"))
self.horizontalLayout_3.addWidget(self.txtNamesFile)
self.btnNamesFile = QtGui.QPushButton(CreateProfileMothurDlg)
self.btnNamesFile.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Load", None, QtGui.QApplication.UnicodeUTF8))
self.btnNamesFile.setObjectName(_fromUtf8("btnNamesFile"))
self.horizontalLayout_3.addWidget(self.btnNamesFile)
self.formLayout.setLayout(2, QtGui.QFormLayout.FieldRole, self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.btnCreateProfile = QtGui.QPushButton(CreateProfileMothurDlg)
self.btnCreateProfile.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Create STAMP profile", None, QtGui.QApplication.UnicodeUTF8))
self.btnCreateProfile.setObjectName(_fromUtf8("btnCreateProfile"))
self.horizontalLayout_4.addWidget(self.btnCreateProfile)
self.btnCancel = QtGui.QPushButton(CreateProfileMothurDlg)
self.btnCancel.setText(QtGui.QApplication.translate("CreateProfileMothurDlg", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.btnCancel.setDefault(True)
self.btnCancel.setObjectName(_fromUtf8("btnCancel"))
self.horizontalLayout_4.addWidget(self.btnCancel)
self.formLayout.setLayout(3, QtGui.QFormLayout.FieldRole, self.horizontalLayout_4)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.formLayout.setItem(3, QtGui.QFormLayout.LabelRole, spacerItem)
self.verticalLayout_2.addLayout(self.formLayout)
self.retranslateUi(CreateProfileMothurDlg)
QtCore.QMetaObject.connectSlotsByName(CreateProfileMothurDlg)
def retranslateUi(self, CreateProfileMothurDlg):
pass
| dparks1134/STAMP | stamp/GUI/createProfileMothurUI.py | Python | gpl-3.0 | 5,930 |
from django.contrib import admin
from markedit.widgets import AdminMarkEdit
class MarkEditAdmin(admin.ModelAdmin):
class MarkEdit:
fields = ['text', ]
options = {}
class Media:
css = {'all': ('css/jquery-ui-1.10.3.min.css', 'css/jquery.markedit.css', )}
js = ('js/jquery.admin.js',
'js/jquery-ui-1.10.3.custom.min.js',
'js/jquery.markedit.js',
'js/showdown.js', )
def formfield_for_dbfield(self, db_field, **kwargs):
formfield = super(MarkEditAdmin, self).formfield_for_dbfield(db_field, **kwargs)
if db_field.name in self.MarkEdit.fields:
formfield.widget = AdminMarkEdit(attrs={
'options': self.MarkEdit.options,
})
return formfield
| Diwahars/pycon | markedit/admin.py | Python | bsd-3-clause | 790 |
"""CSS selector structure items."""
import copyreg
from collections.abc import Hashable, Mapping
__all__ = (
'Selector',
'SelectorNull',
'SelectorTag',
'SelectorAttribute',
'SelectorContains',
'SelectorNth',
'SelectorLang',
'SelectorList',
'Namespaces',
'CustomSelectors'
)
SEL_EMPTY = 0x1
SEL_ROOT = 0x2
SEL_DEFAULT = 0x4
SEL_INDETERMINATE = 0x8
SEL_SCOPE = 0x10
SEL_DIR_LTR = 0x20
SEL_DIR_RTL = 0x40
SEL_IN_RANGE = 0x80
SEL_OUT_OF_RANGE = 0x100
SEL_DEFINED = 0x200
SEL_PLACEHOLDER_SHOWN = 0x400
class Immutable(object):
"""Immutable."""
__slots__ = ('_hash',)
def __init__(self, **kwargs):
"""Initialize."""
temp = []
for k, v in kwargs.items():
temp.append(type(v))
temp.append(v)
super(Immutable, self).__setattr__(k, v)
super(Immutable, self).__setattr__('_hash', hash(tuple(temp)))
@classmethod
def __base__(cls):
"""Get base class."""
return cls
def __eq__(self, other):
"""Equal."""
return (
isinstance(other, self.__base__()) and
all([getattr(other, key) == getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __ne__(self, other):
"""Equal."""
return (
not isinstance(other, self.__base__()) or
any([getattr(other, key) != getattr(self, key) for key in self.__slots__ if key != '_hash'])
)
def __hash__(self):
"""Hash."""
return self._hash
def __setattr__(self, name, value):
"""Prevent mutability."""
raise AttributeError("'{}' is immutable".format(self.__class__.__name__))
def __repr__(self): # pragma: no cover
"""Representation."""
return "{}({})".format(
self.__base__(), ', '.join(["{}={!r}".format(k, getattr(self, k)) for k in self.__slots__[:-1]])
)
__str__ = __repr__
class ImmutableDict(Mapping):
"""Hashable, immutable dictionary."""
def __init__(self, *args, **kwargs):
"""Initialize."""
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if (
is_dict and not all([isinstance(v, Hashable) for v in arg.values()]) or
not is_dict and not all([isinstance(k, Hashable) and isinstance(v, Hashable) for k, v in arg])
):
raise TypeError('All values must be hashable')
self._d = dict(*args, **kwargs)
self._hash = hash(tuple([(type(x), x, type(y), y) for x, y in sorted(self._d.items())]))
def __iter__(self):
"""Iterator."""
return iter(self._d)
def __len__(self):
"""Length."""
return len(self._d)
def __getitem__(self, key):
"""Get item: `namespace['key']`."""
return self._d[key]
def __hash__(self):
"""Hash."""
return self._hash
def __repr__(self): # pragma: no cover
"""Representation."""
return "{!r}".format(self._d)
__str__ = __repr__
class Namespaces(ImmutableDict):
"""Namespaces."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('Namespace keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('Namespace keys and values must be Unicode strings')
super(Namespaces, self).__init__(*args, **kwargs)
class CustomSelectors(ImmutableDict):
"""Custom selectors."""
def __init__(self, *args, **kwargs):
"""Initialize."""
# If there are arguments, check the first index.
# `super` should fail if the user gave multiple arguments,
# so don't bother checking that.
arg = args[0] if args else kwargs
is_dict = isinstance(arg, dict)
if is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg.items()]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
elif not is_dict and not all([isinstance(k, str) and isinstance(v, str) for k, v in arg]):
raise TypeError('CustomSelectors keys and values must be Unicode strings')
super(CustomSelectors, self).__init__(*args, **kwargs)
class Selector(Immutable):
"""Selector."""
__slots__ = (
'tag', 'ids', 'classes', 'attributes', 'nth', 'selectors',
'relation', 'rel_type', 'contains', 'lang', 'flags', '_hash'
)
def __init__(
self, tag, ids, classes, attributes, nth, selectors,
relation, rel_type, contains, lang, flags
):
"""Initialize."""
super(Selector, self).__init__(
tag=tag,
ids=ids,
classes=classes,
attributes=attributes,
nth=nth,
selectors=selectors,
relation=relation,
rel_type=rel_type,
contains=contains,
lang=lang,
flags=flags
)
class SelectorNull(Immutable):
"""Null Selector."""
def __init__(self):
"""Initialize."""
super(SelectorNull, self).__init__()
class SelectorTag(Immutable):
"""Selector tag."""
__slots__ = ("name", "prefix", "_hash")
def __init__(self, name, prefix):
"""Initialize."""
super(SelectorTag, self).__init__(
name=name,
prefix=prefix
)
class SelectorAttribute(Immutable):
"""Selector attribute rule."""
__slots__ = ("attribute", "prefix", "pattern", "xml_type_pattern", "_hash")
def __init__(self, attribute, prefix, pattern, xml_type_pattern):
"""Initialize."""
super(SelectorAttribute, self).__init__(
attribute=attribute,
prefix=prefix,
pattern=pattern,
xml_type_pattern=xml_type_pattern
)
class SelectorContains(Immutable):
"""Selector contains rule."""
__slots__ = ("text", "_hash")
def __init__(self, text):
"""Initialize."""
super(SelectorContains, self).__init__(
text=text
)
class SelectorNth(Immutable):
"""Selector nth type."""
__slots__ = ("a", "n", "b", "of_type", "last", "selectors", "_hash")
def __init__(self, a, n, b, of_type, last, selectors):
"""Initialize."""
super(SelectorNth, self).__init__(
a=a,
n=n,
b=b,
of_type=of_type,
last=last,
selectors=selectors
)
class SelectorLang(Immutable):
"""Selector language rules."""
__slots__ = ("languages", "_hash",)
def __init__(self, languages):
"""Initialize."""
super(SelectorLang, self).__init__(
languages=tuple(languages)
)
def __iter__(self):
"""Iterator."""
return iter(self.languages)
def __len__(self): # pragma: no cover
"""Length."""
return len(self.languages)
def __getitem__(self, index): # pragma: no cover
"""Get item."""
return self.languages[index]
class SelectorList(Immutable):
"""Selector list."""
__slots__ = ("selectors", "is_not", "is_html", "_hash")
def __init__(self, selectors=tuple(), is_not=False, is_html=False):
"""Initialize."""
super(SelectorList, self).__init__(
selectors=tuple(selectors),
is_not=is_not,
is_html=is_html
)
def __iter__(self):
"""Iterator."""
return iter(self.selectors)
def __len__(self):
"""Length."""
return len(self.selectors)
def __getitem__(self, index):
"""Get item."""
return self.selectors[index]
def _pickle(p):
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]])
def pickle_register(obj):
"""Allow object to be pickled."""
copyreg.pickle(obj, _pickle)
pickle_register(Selector)
pickle_register(SelectorNull)
pickle_register(SelectorTag)
pickle_register(SelectorAttribute)
pickle_register(SelectorContains)
pickle_register(SelectorNth)
pickle_register(SelectorLang)
pickle_register(SelectorList)
| SickGear/SickGear | lib/soupsieve_py3/css_types.py | Python | gpl-3.0 | 8,916 |
from vt_manager.communication.sfa.rspecs.elements.element import Element
class Spectrum(Element):
fields = []
| dana-i2cat/felix | vt_manager/src/python/vt_manager/communication/sfa/rspecs/elements/spectrum.py | Python | apache-2.0 | 116 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
import logging
from PyQt4 import QtGui, QtCore, Qt
from .uic_generated.FAUcardPaymentDialog import Ui_FAUcardPaymentDialog
from ..faucardPayment.faucardStates import Status, Info
from decimal import Decimal
class FAUcardPaymentDialog(QtGui.QDialog, Ui_FAUcardPaymentDialog):
"""
The FAUcardPaymentDialog works as the GUI for the FAUcardThread. It informs the user about the current state
of the payment process and gives the ability to hold and cancel the FAUcardThread. The process waits after each
step untill the FAUcardPaymentDialog signals readyness to continue further.
"""
# Signal to tell the FAUcardThread to send response acknowledge to the MagnaBox + optional cancel of the process
response_ack = QtCore.pyqtSignal(bool)
# Signal to tell the payment handler to terminate the thread
request_termination = QtCore.pyqtSignal()
def __init__(self, parent, amount):
"""
Initializes the FAUcardPaymentDialog. It sets it's member variables and sets up the GUI, including
a QTimer to periodically update the GUI to show life sign.
:param parent: parent of the Dialog
:type parent: QObject
:param amount: Amount the user has to pay (only used to display)
:type amount: Decimal
"""
QtGui.QDialog.__init__(self, parent)
logging.info("FAUcardPayment: started")
self.setupUi(self)
self.setModal(True)
assert isinstance(amount, Decimal), "PayupFAUCard: Amount to pay not Decimal or float"
# Set up member variables and fill GUI
self.amount = amount
self.label_betrag.setText(u'{} €'.format(unicode(self.amount)).replace('.', ','))
self.label_status.setText(u'Starte FAUcard-Zahlung\n')
self.counter = 0
self.thread_aborted = False
self.thread_broken = False
self.timer_terminate = QtCore.QTimer()
self.timer_terminate.timeout.connect(self.request_thread_termination)
self.timer_terminate.setSingleShot(True)
self.status = Status.initializing
# Start a timer to periodically update the GUI (show life sign)
self.utimer = QtCore.QTimer()
QtCore.QObject.connect(self.utimer, QtCore.SIGNAL("timeout()"), self.show_active)
QtCore.QObject.connect(self.pushButton_abbrechen, QtCore.SIGNAL("clicked()"), self.reject)
self.utimer.start(1000)
@QtCore.pyqtSlot()
def thread_terminated(self):
"""
A Slot to recognize if the Thread was terminated and tell the user
"""
logging.error("FAUcardPayment: Thread was terminated")
self.thread_aborted = True
self.update_gui([Info.unknown_error])
@QtCore.pyqtSlot(bool)
def set_cancel_button_enabled(self, enabled):
"""
Sets self.pushButton_abbrechen.setEnabled to the given bool enabled
:param enabled: bool if Button should be enabled
:type enabled: bool
"""
self.pushButton_abbrechen.setEnabled(enabled)
@QtCore.pyqtSlot()
def show_transaction_error(self):
"""
A Slot to inform the user about an occured transaction error.
"""
QtGui.QMessageBox.warning(self, "Zahlung mit FAU-Karte", "Das Programm hat einen Fehler in der Abbuchung \
festgestellt.\nFalls dir von der Karte mehr abgebucht wurde als es sollte melde \
dich bitte unter [email protected] mit Datum, Uhrzeit und Betrag.")
@QtCore.pyqtSlot()
def process_aborted(self):
"""
SLOT is called by the process's process_aborted signal, which is emitted if the process terminated on an
expected error
thread_aborted must be set to be able to abort the payment
"""
self.thread_aborted = True
@QtCore.pyqtSlot(list)
def update_gui(self, response):
"""
Displays different Messages on the GUI according to the thread's response which executes the payment.
:param response: List of response data. First index always contains Status code or Info code
:type response: list[Status] list[Info]
"""
assert isinstance(response, list), "Thread response no list!"
assert len(response) > 0, "Thread response is empty!"
assert isinstance(response[0], (Status, Info)), "Thread response code is not Status or Info!"
self.thread_broken = False
if self.timer_terminate.isActive():
self.timer_terminate.stop()
if isinstance(response[0], Info):
# Abort if checking the last transaction failed
if response[0] == Info.check_transaction_failed:
self.utimer.stop()
self.label_status.setText(u'Letzte Transaktion konnte nicht überprüft werden.\nBitte wechseln Sie die Zahlungsmethode')
self.utimer.singleShot(10000, self.reject)
return
# Abort if balance underflow would occur
elif response[0] == Info.balance_underflow:
self.utimer.stop()
self.label_status.setText(u'Zu wenig Guthaben\nBitte wechseln Sie die Zahlungsmethode')
self.utimer.singleShot(10000, self.reject)
self.response_ack.emit(True)
return
# Abort on an unknown / not processable error
elif response[0] == Info.unknown_error:
logging.error("FAUcardPayment: terminated on error")
self.utimer.stop()
self.label_status.setText(u'Fehler\nBitte wechseln Sie die Zahlungsmethode')
self.response_ack.emit(True)
return
# Inform the user about the lost connection to the MagnaBox
elif response[0] == Info.con_error:
self.label_status.setText(u'Verbindung zum Terminal verloren.\n Versuche wieder zu verbinden')
logging.warning("FAUcardPayment: Verbindung zur MagnaBox beim abbuchen verloren.")
return
# Inform the user about the reestablished connection to the MagnaBox
elif response[0] == Info.con_back:
self.label_status.setText(u'Verbindung zum Terminal wieder da.\nBuche {}€ ab\n'.format(unicode(self.amount)).replace('.', ','))
logging.warning("FAUcardPayment: Verbindung zur MagnaBox wieder aufgebaut.")
return
elif isinstance(response[0], Status):
self.status = response[0]
# Initializing: Do nothing
if response[0] == Status.initializing:
self.response_ack.emit(False)
# Initialized: Inform user to lay card on reader
elif response[0] == Status.waiting_card:
self.label_status.setText(u'Warte auf Karte\n')
self.response_ack.emit(False)
# Card and Balance read: Check if balance is enough and inform user the balance will be decreased
elif response[0] == Status.decreasing_balance: # Card and Balance recognized
self.label_status.setText(u'Buche {}€ ab\n'.format(unicode(self.amount)).replace('.', ','))
self.response_ack.emit(False)
# Successfully decreased: Inform the user the payment is done and close after 2 seconds
elif response[0] == Status.decreasing_done:
self.utimer.stop()
self.label_status.setText(u"Vielen Dank für deine Zahlung von {}.\nBitte das Aufräumen nicht vergessen!".format(unicode(self.amount)))
self.utimer.singleShot(5000, self.accept)
self.response_ack.emit(False)
self.pushButton_abbrechen.hide()
logging.info("FAUcardPayment: successfully payed")
@QtCore.pyqtSlot()
def show_active(self):
"""
Periodically updates the GUI to show sign of life
Checks if payment thread is done
"""
self.counter = (self.counter+1) % 4
if self.counter == 0:
self.label_status.setText(self.label_status.text().replace('.', ''))
else:
self.label_status.setText(self.label_status.text() + ".")
if self.thread_aborted is True:
self.label_status.setText("Breche Bezahlung ab.")
self.pushButton_abbrechen.hide()
Qt.QTimer.singleShot(2000,self.reject_final)
@QtCore.pyqtSlot()
def reject(self):
"""
SLOT that handles the abortion of the payment process.
If the process terminated on error it trys to abort, otherwises it tries in 1 second (to let the process finish)
"""
if not self.pushButton_abbrechen.isEnabled():
QtGui.QMessageBox.warning(None, "FAUCard Zahlung", "Du kannst zu diesem Zeitpunkt nicht mehr abbrechen")
return
if self.thread_aborted is not True:
Qt.QTimer.singleShot(1000, self.try_reject)
else:
self.try_reject()
@QtCore.pyqtSlot()
def try_reject(self):
"""
Tries to do the final abortion
If the thread is still not finished, Tell the user that it waits for the thread to abort.
If the thread is finished tell the user the process is aborting and process the final abortion in 2 seconds
"""
if self.thread_aborted is not True:
QtGui.QMessageBox.warning(None, u"FAUCard Zahlung",
u"Abbrechen gerade nicht möglich.\nBitte warten Sie, bis das Programm an der nächst \
möglichen Stelle abbricht. Beachte bitte, das falls dein Geld schon abgebucht \
wurde, es nicht automatisch rückabgewickelt wird.")
self.label_status.setText(u"Warte auf Möglichkeit zum Abbruch")
if not self.timer_terminate.isActive():
self.timer_terminate.start(60000)
else:
self.label_status.setText("Breche Bezahlung ab.")
self.pushButton_abbrechen.hide()
self.timer_terminate.stop()
Qt.QTimer.singleShot(2000,self.reject_final)
@QtCore.pyqtSlot()
def reject_final(self):
"""
Final rejection of the Payment
"""
QtGui.QDialog.reject(self)
@QtCore.pyqtSlot()
def request_thread_termination(self):
"""
Sets the thread_broken flag to let the user terminate the thread if necessary.
"""
self.thread_broken = True
terminate = QtGui.QMessageBox.question(None, u'FAUCard Zahlung',
u'Willst du den Thread terminieren? Wenn du dir nicht sicher bist, antworte mit nein.',
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if terminate == QtGui.QMessageBox.Yes:
logging.error("FAUcardPayment: thread termination was requested")
self.request_termination.emit()
Qt.QTimer.singleShot(500, self.reject_final)
| fau-fablab/FabLabKasse | FabLabKasse/UI/FAUcardPaymentDialogCode.py | Python | gpl-3.0 | 11,138 |
#!/usr/bin/env python
# ===--- generate_harness.py ---------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
# Generate CMakeLists.txt and utils/main.swift from templates.
import glob
import os
import re
import jinja2
script_dir = os.path.dirname(os.path.realpath(__file__))
perf_dir = os.path.realpath(os.path.join(script_dir, '../..'))
single_source_dir = os.path.join(perf_dir, 'single-source')
multi_source_dir = os.path.join(perf_dir, 'multi-source')
template_map = {
'CMakeLists.txt_template': os.path.join(perf_dir, 'CMakeLists.txt'),
'main.swift_template': os.path.join(perf_dir, 'utils/main.swift')
}
ignored_run_funcs = ["Ackermann", "Fibonacci"]
template_loader = jinja2.FileSystemLoader(searchpath="/")
template_env = jinja2.Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
if __name__ == '__main__':
# CMakeList single-source
test_files = glob.glob(os.path.join(single_source_dir, '*.swift'))
tests = sorted(os.path.basename(x).split('.')[0] for x in test_files)
# CMakeList multi-source
class MultiSourceBench(object):
def __init__(self, path):
self.name = os.path.basename(path)
self.files = [x for x in os.listdir(path)
if x.endswith('.swift')]
if os.path.isdir(multi_source_dir):
multisource_benches = [
MultiSourceBench(os.path.join(multi_source_dir, x))
for x in os.listdir(multi_source_dir)
if os.path.isdir(os.path.join(multi_source_dir, x))
]
else:
multisource_benches = []
# main.swift imports
imports = sorted(tests + [msb.name for msb in multisource_benches])
# main.swift run functions
def get_run_funcs(filepath):
content = open(filepath).read()
matches = re.findall(r'func run_(.*?)\(', content)
return filter(lambda x: x not in ignored_run_funcs, matches)
def find_run_funcs(dirs):
ret_run_funcs = []
for d in dirs:
for root, _, files in os.walk(d):
for name in filter(lambda x: x.endswith('.swift'), files):
run_funcs = get_run_funcs(os.path.join(root, name))
ret_run_funcs.extend(run_funcs)
return ret_run_funcs
run_funcs = sorted(
[(x, x)
for x in find_run_funcs([single_source_dir, multi_source_dir])],
key=lambda x: x[0]
)
# Replace originals with files generated from templates
for template_file in template_map:
template_path = os.path.join(script_dir, template_file)
template = template_env.get_template(template_path)
print(template_map[template_file])
open(template_map[template_file], 'w').write(
template.render(tests=tests,
multisource_benches=multisource_benches,
imports=imports,
run_funcs=run_funcs)
)
| IngmarStein/swift | benchmark/scripts/generate_harness/generate_harness.py | Python | apache-2.0 | 3,411 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the JSON property.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .primitive import String
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'JSON',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class JSON(String):
''' Accept JSON string values.
The value is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
Args:
default (string or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
'''
def validate(self, value, detail=True):
super().validate(value, detail)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
msg = "" if not detail else "expected JSON text, got %r" % value
raise ValueError(msg)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ericmjl/bokeh | bokeh/core/property/json.py | Python | bsd-3-clause | 2,991 |
"""
This config file contains the information specific to the individual usage of ask-a-tech.
Please rename this file to
"config.py" and add or replace the information with your own.
"""
__author__ = 'ericpoe'
## Google Apps Connection Information
# The user email/password defined should have edit-rights to the appropriate spreadsheet in Google Drive
gapps = {} # DO NOT CHANGE THIS LINE
gapps['email'] = '' # eg. '[email protected]'
gapps['password'] = ''
gapps['project_name'] = 'Ask-A-Tech'
## Google Spreadsheet Information
# The spreadsheet's key is found in the URL for the spreadsheet between the "key=" and the "&".
# The sheet's "sheet" and "archive" IDs should not change. However, if they do, they can be found in the output for
# 'feed = gd_client.GetWorksheetsFeed(config.sheet['key'])'
sheet = {} # DO NOT CHANGE THIS LINE
sheet['key'] = ''
sheet['sheet'] = 'od6'
sheet['archive'] = 'od7'
## Web Help Desk Information
# The apikey is generated by a tech in WHD by following the instructions at http://www.webhelpdesk.com/api/#auth
#
# The easiest way to figure out the JSON data is create a bogus ticket within WHD that has the problem type,
# location, status, and priority that you want all of your Ask-A-Tech tickets to have in common. Once that ticket is
# created, grab the JSON generated from following http://www.webhelpdesk.com/api/#tickets-detail
whd = {} # DO NOT CHANGE THIS LINE
whd['apikey'] = ''
whd['host'] = '' # eg. 'helpdesk.example.com'
whd['json'] = {} # DO NOT CHANGE THIS LINE
whd['json']['ProblemType'] = 0 # eg. 42
whd['json']['Location'] = 0 # eg. 17
whd['json']['StatusType'] = 0 # eg. 1
whd['json']['PriorityType'] = 0 # eg. 1 | ericpoe/ask-a-tech | config-default.py | Python | mit | 1,682 |
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferScene
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferScene.ExecutableRender,
"description",
"""
Base class for nodes which perform batch rendering.
""",
plugs = {
"in" : [
"description",
"""
The scene to be rendered.
""",
"nodule:type", "GafferUI::StandardNodule"
],
"out" : [
"description",
"""
A pass-through of the input scene.
""",
],
}
)
| chippey/gaffer | python/GafferSceneUI/ExecutableRenderUI.py | Python | bsd-3-clause | 2,410 |
#!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import scipy.stats as stats
import numpy as np
from matplotlib import pyplot as plt
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('inputVCF', help='multi-sample VCF file containing genotyped MEI')
parser.add_argument('metadata', help='Text file in tabular format containing donor_id/project_code equivalencies')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
inputVCF = args.inputVCF
metadata = args.metadata
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "inputVCF: ", inputVCF
print "metadata: ", metadata
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Read input multi-sample VCF and generate a VCF object
#############################################################
header("1. Process multi-sample VCF as input")
VCFObj = formats.VCF()
donorIdList = VCFObj.read_VCF_multiSample(inputVCF)
#### 2. Read metadata file
##########################
# Make four dictionaries with the following structure:
# - dict1a: key(projectId) -> dict2a: key(donorId) -> totalNbMEI
# - dict1b: key(projectId) -> dict2a: key(donorId) -> nbHeterozygousMEI
# - dict1c: key(projectId) -> dict2b: key(donorId) -> nbHomozygousMEI
# - dict1d: key(donorId) -> projectCode
# - dict1e: key(projectId) -> dict2a: key(donorId) -> totalNbMEIAlleles
# Note: nbHeterozygousMEI and nbHomozygousMEI are the number of heterozygous and homozygous MEI for a given donor
# Note: nbHeterozygousMEI and nbHomozygousMEI are iniciated with a value of 0
header("2. Read metadata file")
metadataFile = open(metadata, 'r')
totalNbMEIDict = {}
nbHeterozMEIDict = {}
nbHomozMEIDict = {}
donorIdProjectCodes = {}
totalNbMEIAllelesDict = {}
for line in metadataFile:
line = line.rstrip('\n')
line = line.split('\t')
projectCode = line[0]
donorId = line[1]
## Dict1a, Dict1b, Dict1c and Dict1e
if projectCode not in nbHeterozMEIDict:
# Create dictionary
totalNbMEIDict[projectCode] = {}
nbHeterozMEIDict[projectCode] = {}
nbHomozMEIDict[projectCode] = {}
totalNbMEIAllelesDict[projectCode] = {}
# Initialize to 0 values:
totalNbMEIDict[projectCode][donorId] = 0
nbHeterozMEIDict[projectCode][donorId] = 0
nbHomozMEIDict[projectCode][donorId] = 0
totalNbMEIAllelesDict[projectCode][donorId] = 0
## Dict1d
donorIdProjectCodes[donorId] = projectCode
#print "totalNbMEIAllelesDict-empty: ", len(totalNbMEIAllelesDict), len(totalNbMEIDict), totalNbMEIAllelesDict
#### 3. Compute parameters:
###########################
# - Variant allele frequency per MEI across PCAWG cohort
# - Variant allele count per MEI across PCAWG cohort
# - Total number of MEI per donor (Save into dictionary generated in step 2)
# - Total number of MEI alleles per donor (Save into dictionary generated in step 2)
# - Number of heterozygous MEI per donor (Save into dictionary generated in step 2)
# - Number of homozygous MEI per donor (Save into dictionary generated in step 2)
# - Predicted zygosity based on number of supporting reads for heterozygous MEI
# - Predicted zygosity based on number of supporting reads for homozygous MEI
totalAlleleCountList = []
L1AlleleCountList = []
L1AlleleFreqList = []
AluAlleleCountList = []
AluAlleleFreqList = []
SvaAlleleCountList = []
SvaAlleleFreqList = []
ErvkAlleleCountList = []
ErvkAlleleFreqList = []
zygosityHeterozList = []
zygosityHomozList = []
### Make table containing for each germline insertions its VAF
## Two columns:
# insertionsId(chrom:pos) VAF
# Open output file
outFilePath = outDir + '/MEI_CLASS_VAF.txt'
outFile = open(outFilePath, 'a')
# Write header:
row = '#MEI' + "\t" + 'CLASS' + "\t" +'VAF' + "\n"
outFile.write(row)
header("3. Compute parameters")
for MEIObj in VCFObj.lineList:
## Total number of chromosome copies in the population
# Number of donors * 2 (diploid, two copies of a given chromosome)
totalNbChrom = len(MEIObj.genotypesDict) * 2
## Compute MEI allele count, genotyping VAF and update counters to heterozygous and homozygous MEI per donor
# MEI allele count: number of chromosomes in the population harvouring the MEI
# Genotyping VAF: Ratio (number_reads_supporting_MEI)/(total_nb_reads_covering_MEI)
alleleCount = 0
# print "MEI: ", MEIObj.chrom, MEIObj.pos
for donorId, genotypeField in MEIObj.genotypesDict.iteritems():
projectCode = donorIdProjectCodes[donorId]
genotypeFieldList = genotypeField.split(":")
genotype = genotypeFieldList[0]
nbReadsMEI = float(genotypeFieldList[1])
totalNbReads = float(genotypeFieldList[2])
# Compute genotyping VAF:
if totalNbReads == 0:
zygosity = 0
else:
zygosity = nbReadsMEI / totalNbReads
## Update counters and store VAF values
# A) Heterozygous
if (genotype == "0/1"):
totalNbMEIDict[projectCode][donorId] += 1
totalNbMEIAllelesDict[projectCode][donorId] += 1
alleleCount += 1
nbHeterozMEIDict[projectCode][donorId] += 1
zygosityHeterozList.append(zygosity)
# print "test: ", projectCode, donorId, genotype, totalNbMEIAllelesDict[projectCode][donorId]
# B) Homozygous
elif (genotype == "1/1"):
totalNbMEIDict[projectCode][donorId] += 1
totalNbMEIAllelesDict[projectCode][donorId] += 2
alleleCount += 2
nbHomozMEIDict[projectCode][donorId] += 1
zygosityHomozList.append(zygosity)
# print "test: ", projectCode, donorId, genotype, totalNbMEIAllelesDict[projectCode][donorId]
## Compute MEI allele frequency:
alleleFrequency = float(alleleCount) / totalNbChrom
## Save into list. One per MEI type
totalAlleleCountList.append(alleleCount)
if (MEIObj.infoDict['CLASS'] == 'L1'):
L1AlleleCountList.append(alleleCount)
L1AlleleFreqList.append(alleleFrequency)
elif (MEIObj.infoDict['CLASS'] == 'Alu'):
AluAlleleCountList.append(alleleCount)
AluAlleleFreqList.append(alleleFrequency)
elif (MEIObj.infoDict['CLASS'] == 'SVA'):
SvaAlleleCountList.append(alleleCount)
SvaAlleleFreqList.append(alleleFrequency)
else:
ErvkAlleleCountList.append(alleleCount)
ErvkAlleleFreqList.append(alleleFrequency)
## Add row to the table
row = MEIObj.chrom + ":" + str(MEIObj.pos) + "\t" + MEIObj.infoDict['CLASS'] + "\t" + str(alleleFrequency) + "\n"
outFile.write(row)
# print "***********************"
print "totalNbMEIAllelesDict-filled: ", len(totalNbMEIAllelesDict), totalNbMEIAllelesDict
#### 4. Make plots:
#####################
header("4. Make plots")
# - Variant allele frequencies histogram across PCAWG donors (done)
# - Number of MEI per donor and tumor type boxplot (done)
# - Predicted zygosity histogram for homozygous and heterozygous variants (done)
# - Variant allele counts line graph (to do)
#### 4.1 Variant allele frequencies histogram across PCAWG donors
header("4.1 Make variant allele frequencies plot")
fig = plt.figure(figsize=(16,13))
fig.suptitle('Variant allele frequencies (VAF) across PCAWG donors', fontsize=20)
### a) L1
## Make plot
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title("LINE-1", fontsize=16)
plt.hist(L1AlleleFreqList, bins=40, color='#008000', alpha=0.75)
plt.xlabel("VAF", fontsize=14)
plt.ylabel("# MEI")
plt.xlim(0, 1)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 1.01, 0.1))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
### b) ALU
## Make plot
ax2 = fig.add_subplot(2, 2, 2)
ax2.set_title("ALU", fontsize=16)
plt.hist(AluAlleleFreqList, bins=40, color='#008000', alpha=0.75)
plt.xlabel("VAF", fontsize=14)
plt.ylabel("# MEI")
plt.xlim(0, 1)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax2.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax2.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 1.01, 0.1))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
### c) SVA
## Make plot
ax3 = fig.add_subplot(2, 2, 3)
ax3.set_title("SVA", fontsize=16)
plt.hist(SvaAlleleFreqList, bins=40, color='#008000', alpha=0.75)
plt.xlabel("VAF", fontsize=14)
plt.ylabel("# MEI")
plt.xlim(0, 1)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax3.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax3.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 1.01, 0.1))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
### d) ERVK (no ERVK for now.. we need to improve script for identifying ERVK TSD)
## Make plot
#ax4 = fig.add_subplot(2, 2, 4)
#ax4.set_title("ERVK", fontsize=16)
#plt.hist(ErvkAlleleFreqList, bins=40, color='#008000', alpha=0.75)
#plt.xlabel("VAF", fontsize=14)
#plt.ylabel("# MEI")
#plt.xlim(0, 1)
#plt.ylim(0, max(ErvkAlleleFreqList) + 10)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
#ax4.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
#ax4.set_axisbelow(True)
## Customize ticks
#plt.xticks(np.arange(0, 1.01, 0.1))
#locs, labels = plt.xticks()
#plt.setp(labels, rotation=30)
#plt.yticks(np.arange(0, max(ErvkAlleleFreqList)))
## Save figure
fileName = outDir + "/PCAWG_cohortVAFs_hist.pdf"
plt.savefig(fileName)
#### 4.2 Make total number and number of heterozygous and homozygous MEI per donor and tumor type boxplots
header("4.2 Make boxplots")
## Organize the data for plotting
tupleListTotalMEI = []
tupleListTotalMEIAlleles = []
tupleListHeterozMEI = []
tupleListHomozMEI = []
for projectCode in sorted(nbHeterozMEIDict):
# Make tuple (projectCode, number of MEI per donor) for total number of MEI, number of heterozygous and homozygous MEI
projectCodeNbTotalMEITuple = (projectCode, totalNbMEIDict[projectCode].values())
projectCodeNbTotalMEIAllelesTuple = (projectCode, totalNbMEIAllelesDict[projectCode].values())
projectCodeNbHeterozMEITuple = (projectCode, nbHeterozMEIDict[projectCode].values())
projectCodeNbHomozMEITuple = (projectCode, nbHomozMEIDict[projectCode].values())
# Add tuple to the list
tupleListTotalMEI.append(projectCodeNbTotalMEITuple)
tupleListTotalMEIAlleles.append(projectCodeNbTotalMEIAllelesTuple)
tupleListHeterozMEI.append(projectCodeNbHeterozMEITuple)
tupleListHomozMEI.append(projectCodeNbHomozMEITuple)
print "tuple-list: ", tupleListTotalMEIAlleles
## Make nested list with the following format:
# [donor1_nbMEI, donor2_nbMEI, ..., donorN_nbMEI], [donor1_nbMEI, donor2_nbMEI, ..., donorN_nbMEI] , ... [donor1_nbMEI, donor2_nbMEI, ..., donorN_nbMEI]
# project1_list project2_list projectN_list
# Total MEI
tmpList = map(list, zip(*tupleListTotalMEI))
projectCodesListTotal = tmpList[0]
nbMEIPerDonorTotal = tmpList[1]
# Total MEI alleles
tmpList = map(list, zip(*tupleListTotalMEIAlleles))
projectCodesListTotalAlleles = tmpList[0]
nbMEIPerDonorTotalAlleles = tmpList[1]
# Heterozygous MEI
tmpList = map(list, zip(*tupleListHeterozMEI))
projectCodesListHet = tmpList[0]
nbMEIPerDonorHet = tmpList[1]
# Homozygous MEI
tmpList = map(list, zip(*tupleListHomozMEI))
projectCodesListHom = tmpList[0]
nbMEIPerDonorHom = tmpList[1]
#### A) Make boxplot for total number of MEI
fig = plt.figure(figsize=(14,9))
fig.suptitle('Total # MEI per donor', fontsize=18)
ax1 = fig.add_subplot(1, 1, 1)
# Create the boxplot
bp = ax1.boxplot(nbMEIPerDonorTotal)
plt.ylabel("# MEI", fontsize=18)
## Customize boxplot:
# change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#696969', linewidth=1)
# change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#696969', linewidth=1)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#696969', linewidth=1)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#8b0000', linewidth=2)
# Add the project codes to the x-axis
ax1.set_xticklabels(projectCodesListTotal)
locs, labels = plt.xticks()
plt.setp(labels, rotation=75)
# Remove top and right axes
ax1.get_xaxis().tick_bottom()
ax1.get_yaxis().tick_left()
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
## Save figure
fileName = outDir + "/PCAWG_totalNbMEIperDonor_boxplot.pdf"
fig.savefig(fileName)
#### B) Make boxplot for total number of MEI
fig = plt.figure(figsize=(14,9))
fig.suptitle('# MEI Alleles per donor', fontsize=18)
ax2 = fig.add_subplot(1, 1, 1)
# Create the boxplot
bp = ax2.boxplot(nbMEIPerDonorTotalAlleles)
plt.ylabel("# MEI Alleles", fontsize=18)
## Customize boxplot:
# change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#696969', linewidth=1)
# change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#696969', linewidth=1)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#696969', linewidth=1)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#8b0000', linewidth=2)
# Add the project codes to the x-axis
ax2.set_xticklabels(projectCodesListTotalAlleles)
locs, labels = plt.xticks()
plt.setp(labels, rotation=75)
# Remove top and right axes
ax2.get_xaxis().tick_bottom()
ax2.get_yaxis().tick_left()
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax2.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax2.set_axisbelow(True)
## Save figure
fileName = outDir + "/PCAWG_totalNbMEIAllelesPerDonor_boxplot.pdf"
fig.savefig(fileName)
#### C) Make boxplots for number of homozygous and heterozygous MEI
fig = plt.figure(figsize=(18,12))
fig.suptitle('# MEI per donor', fontsize=24)
### C.a) Heterozygous MEI
ax3 = fig.add_subplot(2, 1, 1)
# Create the boxplot
bp = ax3.boxplot(nbMEIPerDonorHet)
plt.ylabel("# Heterozygous MEI", fontsize=18)
## Customize boxplot:
# change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#696969', linewidth=1)
# change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#696969', linewidth=1)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#696969', linewidth=1)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#8b0000', linewidth=2)
# Add the project codes to the x-axis
ax3.set_xticklabels(projectCodesListHet)
locs, labels = plt.xticks()
plt.setp(labels, rotation=75)
# Remove top and right axes
ax3.get_xaxis().tick_bottom()
ax3.get_yaxis().tick_left()
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax3.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax3.set_axisbelow(True)
### C.b) Homozygous MEI
ax4 = fig.add_subplot(2, 1, 2)
# Create the boxplot
bp = ax4.boxplot(nbMEIPerDonorHom)
plt.ylabel("# Homozygous MEI", fontsize=18)
## Customize boxplot:
# change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#696969', linewidth=1)
# change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#696969', linewidth=1)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#696969', linewidth=1)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#8b0000', linewidth=2)
# Add the project codes to the x-axis
ax4.set_xticklabels(projectCodesListHom)
locs, labels = plt.xticks()
plt.setp(labels, rotation=75)
# Remove top and right axes
ax4.get_xaxis().tick_bottom()
ax4.get_yaxis().tick_left()
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax4.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax4.set_axisbelow(True)
## Save figure
fileName = outDir + "/PCAWG_nbMEIperDonor_boxplot.pdf"
fig.savefig(fileName)
#### 4.3 Predicted zygosity histogram for homozygous and heterozygous variants
header("4.3 Make zygosity histogram")
fig = plt.figure(figsize=(8,6))
fig.suptitle('Predicted zygosity', fontsize=14)
## Make plot
ax5 = fig.add_subplot(1, 1, 1)
plt.hist(zygosityHeterozList, bins=20, color='#008000', edgecolor='#000000', alpha=0.75, label='Heterozygous')
plt.hist(zygosityHomozList, bins=5, color='#A67D3D', edgecolor='#000000', alpha=0.75, label='Homozygous')
plt.xlabel("Zygosity", fontsize=12)
plt.ylabel("# MEI", fontsize=12)
plt.xlim(0, 1)
# Add an horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax5.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax5.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 1.01, 0.1))
locs, labels = plt.xticks()
plt.setp(labels, rotation=30)
## Legend
plt.legend(fontsize=12, loc='upper left')
## Save figure
fileName = outDir + "/PCAWG_zygosity_histogram.pdf"
fig.savefig(fileName)
#### 4.4 Variant allele counts line chart
header("4.4 Make allele counts scatterplot")
### Organize the data for plotting
alleleCountNbMEITuple = [(i, totalAlleleCountList.count(i)) for i in set(totalAlleleCountList)]
tmpList = map(list, zip(*alleleCountNbMEITuple))
alleleCountList = tmpList[0]
nbMEIList = tmpList[1]
### Make plot
fig = plt.figure(figsize=(8,8))
fig.suptitle('Allele count spectrum', fontsize=16)
ax6 = fig.add_subplot(1, 1, 1)
plt.scatter(alleleCountList, nbMEIList, color='#008000', alpha=.4)
plt.xlabel("Variant allele count", fontsize=14)
plt.ylabel("# MEI")
ax6.set_xscale('log', basex=10)
ax6.set_yscale('log', basex=10)
plt.xlim(0.5, max(alleleCountList) + 100)
plt.ylim(0.5, max(nbMEIList) + 100)
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax6.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax6.set_axisbelow(True)
## Customize ticks
# X axis
xPosList = [ 1, 5, 10, 50, 100, 200, 300, 400, 500, 1000, 2500, max(alleleCountList) ]
ax6.set_xticks(xPosList)
ax6.set_xticklabels(xPosList)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
# y axis
yPosList = [ 1, 10, 100, 1000, max(nbMEIList) ]
ax6.set_yticks(yPosList)
ax6.set_yticklabels(yPosList)
## Save figure
fileName = outDir + "/PCAWG_alleleCount_scatterplot.pdf"
fig.savefig(fileName)
####
header("Finished")
| brguez/TEIBA | src/python/genotypedMEI.stats.py | Python | gpl-3.0 | 20,614 |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
def is_number(s):
"""
check if a string represents an integer or float number
parameter
--------
s: a string
return
------
return True iff S represents a number (int or float)
"""
try:
float(s)
return True
except ValueError:
return False
def get_daily_data(y, m, d, icao):
"""
grab daily weather data for an airport from wunderground.com
parameter
---------
y: year
m: month
d: day
ICAO: ICAO identification number for an airport
return
------
a dictionary containing
"Min Temperature": daily minimum temperature
"Max Temperature": daily maximum temperature
"Precipitation": daily precipitation
"Max Humidity": daily maximum humidity
"Min Humidify": daily minimum humidify
"""
# construct url from (y, m, d)
url = "http://www.wunderground.com/history/airport/" + icao + '/'+\
str(y) + "/" + str(m) + "/" + str(d) + "/DailyHistory.html"
page = urlopen(url)
# parse html
soup = BeautifulSoup(page, 'html5lib')
# return dictionary
daily_data = {'Min Temperature':'nan', 'Max Temperature':'nan',
'Precipitation':'nan', 'Maximum Humidity':'nan', 'Minimum Humidity':'nan'}
# find rows in the main table
all_rows = soup.find(id="historyTable").find_all('tr')
for row in all_rows:
# attempt to find item name
try:
item_name = row.findAll('td', class_='indent')[0].get_text()
except Exception as e:
# if run into error, skip this row
continue
# temperature and precipitation
if item_name in ('Min Temperature','Max Temperature', 'Precipitation'):
try:
val = row.find_all('span', class_='wx-value')[0].get_text()
except Exception as e:
continue
if is_number(val):
daily_data[item_name] = val
if item_name in ('Maximum Humidity', 'Minimum Humidity'):
try:
val = row.find_all('td')[1].get_text()
except Exception as e:
continue
if is_number(val):
daily_data[item_name] = val
return daily_data
def write2csv(time_range, icao_list, filename):
"""
scrape Weather Underground for weather info for airports
listed in ICAO_LIST for the period in TIME_RANGE
and write data into FILENAME
parameter
---------
time_range: a timestamp/datetime iterator
icao_list: a list of standard 4 character strings
representing a list of airports
filename: name of the output file
return
------
None
output to file FILENAME
"""
f = open(filename, 'w')
for date in time_range:
for icao in icao_list:
y, m, d = date.year, date.month, date.day
# get data from Weather Underground
daily_data = get_daily_data(y, m, d, icao)
min_temp = daily_data['Min Temperature']
max_temp = daily_data['Max Temperature']
min_hum = daily_data['Minimum Humidity']
max_hum = daily_data['Maximum Humidity']
prec = daily_data['Precipitation']
str2write = ','.join([str(date), icao, min_temp, max_temp, \
min_hum, max_hum, prec])
str2write +='\n'
# print(str(date), icao)
f.write(str2write)
# Done getting data! Close file.
f.close()
def fetch_df(date):
"""
build a pandas DataFrame holding weather data from a csv file specified by DATE
parameter
--------
date: pandas DatetimeIndex or python Datetime obj
return
------
a DataFrame with the follwing info for 50 cities
date, icao, min_temperature, max_temperature,
min_humidity, max_humidity, precipitation
"""
filename = 'hw_5_data/weather_data/' + date.strftime('%Y') + '/'+ \
date.strftime('%Y%m%d')+'.csv'
col_names = ('date', 'icao', 'min_temp', 'max_temp', 'min_hum', 'max_hum', 'prec')
df = pd.read_csv(filename, header=None, names=col_names)
return df
def lat_lon_2_distance(lat1, lon1, lat2, lon2):
"""
return distance (in km) between two locations (lat1, lon1) and (lat2, lon2)
parameter
---------
lat1, lat2: latitude in degrees
lon1, lon2: longitude in degrees
return
------
distance in km
"""
from math import sin, cos, sqrt, atan2, radians
# approximate radius of earth in km
R = 6373.0
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
#--------------------------------------------------
#
# NOT YET FINISHED BELOW
#
#--------------------------------------------------
def AIO_get_data_from_soup(soup):
"""
find the weather info from SOUP, a html parsed by the BeautifulSoup
- Taken from function get_daily_data(y, m, d, icao)
- reimplemented to use together with AsyncIO library to speedup IO
parameter
---------
soup: a html parsed by the BeautifulSoup
return
------
same as get_daily_data
"""
# return dictionary
daily_data = {'Min Temperature':'nan', 'Max Temperature':'nan',
'Precipitation':'nan', 'Maximum Humidity':'nan', 'Minimum Humidity':'nan'}
# find rows in the main table
all_rows = soup.find(id="historyTable").find_all('tr')
for row in all_rows:
# attempt to find item name
try:
item_name = row.findAll('td', class_='indent')[0].get_text()
except Exception as e:
# if run into error, skip this row
continue
# temperature and precipitation
if item_name in ('Min Temperature','Max Temperature', 'Precipitation'):
try:
val = row.find_all('span', class_='wx-value')[0].get_text()
except Exception as e:
continue
if is_number(val):
daily_data[item_name] = val
if item_name in ('Maximum Humidity', 'Minimum Humidity'):
try:
val = row.find_all('td')[1].get_text()
except Exception as e:
continue
if is_number(val):
daily_data[item_name] = val
return daily_data
| YuguangTong/AY250-hw | hw_5/util.py | Python | mit | 6,967 |
from django.db import models
import datetime
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self ):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| okaram/django | tutorial/polls/models.py | Python | unlicense | 641 |
from basescript import BaseScript
import os
import gc
import sys
import json
import time
import code
import inspect
import logging
import resource
import string
import random
import threading
import msgpack
import cStringIO
import traceback
import urlparse
from multiprocessing.pool import ThreadPool
import statsd
import requests
import tornado.ioloop
import tornado.iostream
import tornado.web
import tornado.websocket
import tornado.iostream
from tornado.template import BaseLoader, Template
from tornado.web import StaticFileHandler, HTTPError
MSG_TYPE_INFO = 0
MSG_TYPE_CONSOLE = 1
MSG_TYPE_LOG = 2
MAX_LOG_FILE_SIZE = 100 * 1024 * 1024 # 100MB
def disable_requests_debug_logs():
# set the logging level of requests module to warning
# otherwise it swamps with too many logs
logging.getLogger("requests").setLevel(logging.WARNING)
def tag(*tags):
"""
Constructs a decorator that tags a function with specified
strings (@tags). The tags on the decorated function are
available via fn.tags
"""
def dfn(fn):
_tags = getattr(fn, "tags", set())
_tags.update(tags)
fn.tags = _tags
return fn
return dfn
def get_fn_tags(fn):
return getattr(fn, "tags", set())
def mime(mime):
"""
Constructs a decorator that sets the preferred mime type
to be written in the http response when returning the
function result.
"""
def dfn(fn):
fn.mime = mime
return fn
return dfn
def raw(mime="application/octet-stream"):
"""
Constructs a decorator that marks the fn
as raw response format
"""
def dfn(fn):
tags = getattr(fn, "tags", set())
tags.add("raw")
fn.tags = tags
fn.mime = getattr(fn, "mime", mime)
return fn
return dfn
class RPCCallException(Exception):
pass
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
a = self.application
self.server = s = a.funcserver
self.stats = s.stats
self.log = s.log
self.api = s.api
def get_template_namespace(self):
ns = super(BaseHandler, self).get_template_namespace()
ns.update(sys.funcserver.define_template_namespace())
return ns
class PyInterpreter(code.InteractiveInterpreter):
def __init__(self, *args, **kwargs):
code.InteractiveInterpreter.__init__(self, *args, **kwargs)
self.output = []
def write(self, data):
self.output.append(data)
class WSConnection(tornado.websocket.WebSocketHandler):
"""
Websocket based communication channel between a
client and the server.
"""
WRITE_BUFFER_THRESHOLD = 1 * 1024 * 1024 # 1MB
def open(self, pysession_id):
"""
Called when client opens connection. Initialization
is done here.
"""
self.id = id(self)
self.funcserver = self.application.funcserver
self.pysession_id = pysession_id
# register this connection with node
self.state = self.funcserver.websocks[self.id] = {"id": self.id, "sock": self}
def on_message(self, msg):
"""
Called when client sends a message.
Supports a python debugging console. This forms
the "eval" part of a standard read-eval-print loop.
Currently the only implementation of the python
console is in the WebUI but the implementation
of a terminal based console is planned.
"""
msg = json.loads(msg)
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession is None:
interpreter = PyInterpreter(self.funcserver.define_python_namespace())
psession = dict(interpreter=interpreter, socks=set([self.id]))
self.funcserver.pysessions[self.pysession_id] = psession
else:
interpreter = psession["interpreter"]
psession["socks"].add(self.id)
code = msg["code"]
msg_id = msg["id"]
stdout = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
interpreter.runsource(code)
output = sys.stdout.getvalue() or interpreter.output
if isinstance(output, list):
output = "".join(output)
interpreter.output = []
finally:
sys.stdout = stdout
msg = {"type": MSG_TYPE_CONSOLE, "id": msg_id, "data": output}
self.send_message(msg)
def on_close(self):
"""
Called when client closes this connection. Cleanup
is done here.
"""
if self.id in self.funcserver.websocks:
self.funcserver.websocks[self.id] = None
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(lambda: self.funcserver.websocks.pop(self.id, None))
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession:
psession["socks"].remove(self.id)
if not psession["socks"]:
del self.funcserver.pysessions[self.pysession_id]
def send_message(self, msg, binary=False):
# TODO: check if following two lines are required
# tornado documentation seems to indicate that
# this might be handled internally.
if not isinstance(msg, str):
msg = json.dumps(msg)
try:
if self.ws_connection:
self.write_message(msg, binary=binary)
except tornado.iostream.StreamClosedError:
self.on_close()
@property
def is_buffer_full(self):
bsize = sum([len(x) for x in self.stream._write_buffer])
return bsize >= self.WRITE_BUFFER_THRESHOLD
def _msg_from(self, msg):
return {"type": msg.get("type", ""), "id": msg["id"]}
def call(fn):
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(fn)
def make_handler(template, handler):
class SimpleHandler(handler):
def get(self):
return self.render(template)
return SimpleHandler
def resolve_path(path):
return (
path if os.path.isabs(path) else os.path.join(os.path.dirname(__file__), path)
)
class TemplateLoader(BaseLoader):
def __init__(self, dirs=None, **kwargs):
super(TemplateLoader, self).__init__(**kwargs)
self.dirs = dirs or []
def add_dir(self, d):
self.dirs.append(d)
def del_dir(self, d):
self.dirs.remove(d)
def resolve_path(self, name, parent_path=None):
for d in reversed(self.dirs):
p = os.path.join(d, name)
if not os.path.exists(p):
continue
return os.path.abspath(p)
return name
def _create_template(self, name):
f = open(name, "rb")
template = Template(f.read(), name=name, loader=self)
f.close()
return template
class CustomStaticFileHandler(StaticFileHandler):
PATHS = []
@classmethod
def get_absolute_path(cls, root, path):
for p in reversed(cls.PATHS):
ap = os.path.join(p, path)
if not os.path.exists(ap):
continue
return ap
return path
def validate_absolute_path(self, root, absolute_path):
if os.path.isdir(absolute_path) and self.default_filename is not None:
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
class RPCHandler(BaseHandler):
def __init__(self, *args, **kwargs):
super(RPCHandler, self).__init__(*args, **kwargs)
# stop_iteration is turned on when the connection closes
# or self.finish is called. This is a signal to stop
# the iteration of any generator that is writing to the client.
self.stop_iteration = False
def on_finish(self):
super(RPCHandler, self).on_finish()
self.stop_iteration = True
def on_connection_close(self):
super(RPCHandler, self).on_connection_close()
self.stop_iteration = True
def _get_apifn(self, fn_name):
obj = self.api
for part in fn_name.split("."):
obj = getattr(obj, part)
return obj
def _clean_kwargs(self, kwargs, fn):
"""
Remove unexpected keyword arguments from the
set of received keyword arguments.
"""
# Do not do the cleaning if server config
# doesnt ask to ignore
if not self.server.IGNORE_UNEXPECTED_KWARGS:
return kwargs
expected_kwargs = set(inspect.getargspec(fn).args)
got_kwargs = set(kwargs.keys())
unexpected_kwargs = got_kwargs - expected_kwargs
for k in unexpected_kwargs:
del kwargs[k]
return kwargs
def _handle_single_call(self, request, m):
fn_name = m.get("fn", None)
t = time.time()
tags = {"fn": fn_name or "unknown", "success": False}
try:
fn = self._get_apifn(fn_name)
args = m["args"]
kwargs = self._clean_kwargs(m["kwargs"], fn)
self.server.on_api_call_start(fn_name, args, kwargs, self)
if self.get_status() == 304:
return
r = fn(*args, **kwargs)
r = {"success": True, "result": r}
tags["success"] = True
except Exception, e:
tags["success"] = False
self.log.exception(
"RPC failed", fn=fn_name, args=m.get("args"), kwargs=m.get("kwargs")
)
r = {"success": False, "result": repr(e)}
finally:
tdiff = (time.time() - t) * 1000
(self.stats.measure("api", **tags).count(invoked=1).time(duration=tdiff))
try:
_r = self.server.on_api_call_end(fn_name, args, kwargs, self, r)
if _r is not None:
r = _r
except (SystemExit, KeyboardInterrupt):
raise
except:
self.log.exception(
"on_api_call_end failed", fn=fn_name, args=args, kwargs=kwargs
)
return r
def _handle_call(self, request, fn, m, protocol):
if fn != "__batch__":
r = self._handle_single_call(request, m)
else:
# Batch calls
r = []
for call in m["calls"]:
_r = self._handle_single_call(request, call)
# If the func invoked above is a streaming function, then fail
# this operation as we don't handle streaming functions in batch mode
if inspect.isgenerator(_r.get("result")):
raise APIException("Cannot invoke streaming API fn in batch mode")
if isinstance(_r, dict) and "success" in _r:
_r = _r["result"] if _r["success"] else None
r.append(_r)
if self.get_status() == 304:
return
# Get the API function object
fnobj = self._get_apifn(fn) if fn != "__batch__" else (lambda: 0)
# Set response header based on chosen serialization mechanism
mime = getattr(fnobj, "mime", self.get_mime(protocol))
self.set_header("Content-Type", mime)
is_raw = "raw" in get_fn_tags(fnobj)
serializer = (lambda x: x) if is_raw else self.get_serializer(protocol)
if fn == "__batch__" or not r["success"]:
r = serializer(r)
self.set_header("Content-Length", len(r))
self.write(r)
return
result = r["result"]
if not inspect.isgenerator(result):
# Full response is available - Write it out in one shot
r = serializer(r)
self.set_header("Content-Length", len(r))
self.write(r)
return
# Iterating over the results until iteration completes or
# the connection is closed.
try:
while not self.stop_iteration:
part = next(result)
part = serializer(part)
self.write(part)
sep = "" if is_raw else self.get_record_separator(protocol)
if sep:
self.write(sep)
self.flush()
except StopIteration:
# TODO iter stats that it completed successfully
pass
except tornado.iostream.StreamClosedError:
# TODO iter stats that it was closed
self.log.warning("stream closed by client")
pass
finally:
del result
gc.collect()
def get_record_separator(self, protocol):
return {"msgpack": "", "json": "\n", "python": "\n"}.get(
protocol, self.server.SERIALIZER_RECORD_SEP
)
def get_serializer(self, name):
return {"msgpack": msgpack.packb, "json": json.dumps, "python": repr}.get(
name, self.server.SERIALIZER
)
def get_deserializer(self, name):
return {"msgpack": msgpack.packb, "json": json.loads, "python": eval}.get(
name, self.server.DESERIALIZER
)
def get_mime(self, name):
return {
"msgpack": "application/x-msgpack",
"json": "application/json",
"python": "application/x-python",
}.get(name, self.server.MIME)
def _handle_call_wrapper(self, request, fn, m, protocol):
try:
return self._handle_call(request, fn, m, protocol)
except Exception, e:
self.log.exception(
"RPC failed", fn=m.get("fn"), args=m.get("args"), kwargs=m.get("kwargs")
)
self.clear()
self.set_status(500)
finally:
self.finish()
def _set_headers(self):
for k, v in self.server.define_headers().iteritems():
self.set_header(k, v)
@tornado.web.asynchronous
def post(self, protocol="default"):
self._set_headers()
m = self.get_deserializer(protocol)(self.request.body)
fn = m["fn"]
self.server.threadpool.apply_async(
lambda: self._handle_call_wrapper(self.request, fn, m, protocol)
)
def failsafe_json_decode(self, v):
try:
v = json.loads(v)
except ValueError:
pass
return v
@tornado.web.asynchronous
def get(self, protocol="default"):
self._set_headers()
D = self.failsafe_json_decode
args = dict(
[
(k, D(v[0]) if len(v) == 1 else [D(x) for x in v])
for k, v in self.request.arguments.iteritems()
]
)
fn = args.pop("fn")
m = dict(kwargs=args, fn=fn, args=[])
self.server.threadpool.apply_async(
lambda: self._handle_call_wrapper(self.request, fn, m, protocol)
)
class Server(BaseScript):
NAME = "FuncServer"
DESC = "Default Functionality Server"
DEFAULT_PORT = 9345
VIRTUAL_HOST = r".*"
STATIC_PATH = "static"
TEMPLATE_PATH = "templates"
APP_CLASS = tornado.web.Application
RPC_HANDLER_CLASS = RPCHandler
SERIALIZER = staticmethod(msgpack.packb)
SERIALIZER_RECORD_SEP = ""
DESERIALIZER = staticmethod(msgpack.unpackb)
MIME = "application/x-msgpack"
IGNORE_UNEXPECTED_KWARGS = False
# Number of worker threads in the threadpool
THREADPOOL_WORKERS = 32
DISABLE_REQUESTS_DEBUG_LOGS = True
def dump_stacks(self):
"""
Dumps the stack of all threads. This function
is meant for debugging. Useful when a deadlock happens.
borrowed from: http://blog.ziade.org/2012/05/25/zmq-and-gevent-debugging-nightmares/
"""
dump = []
# threads
threads = dict([(th.ident, th.name) for th in threading.enumerate()])
for thread, frame in sys._current_frames().items():
if thread not in threads:
continue
dump.append("Thread 0x%x (%s)\n" % (thread, threads[thread]))
dump.append("".join(traceback.format_stack(frame)))
dump.append("\n")
return "".join(dump)
@property
def name(self):
return ".".join([x for x in (self.NAME, self.args.name) if x])
def new_pysession(self):
chars = list(set(string.letters + string.digits))
name = "".join([random.choice(chars) for i in xrange(10)])
if name in self.pysessions:
return self.new_pysession()
return name
def define_args(self, parser):
super(Server, self).define_args(parser)
parser.add_argument(
"--port",
default=self.DEFAULT_PORT,
type=int,
help="port to listen on for server",
)
parser.add_argument(
"--debug",
action="store_true",
help="When enabled, auto reloads server on code change",
)
def define_log_pre_format_hooks(self):
"""
adds a hook to send to websocket if the run command was selected
"""
hooks = super(Server, self).define_log_pre_format_hooks()
# NOTE enabling logs only on debug mode
if self.args.func == self.run and self.args.debug:
hooks.append(self._send_log_to_ws)
return hooks
def _send_log_to_ws(self, msg):
websocks = getattr(self, "websocks", None)
if websocks is None or len(websocks) == 0:
return
msg = {"type": MSG_TYPE_LOG, "id": self.log_id, "data": msg}
bad_ws = []
for _id, ws in websocks.iteritems():
if ws is None:
bad_ws.append(_id)
continue
ws["sock"].send_message(msg)
for _id in bad_ws:
del self.websocks[_id]
self.log_id += 1
def prepare_base_handlers(self):
# Tornado URL handlers for core functionality
debug_mode_only = [
(r"/ws/(.*)", WSConnection),
(r"/logs", make_handler("logs.html", BaseHandler)),
(r"/console", make_handler("console.html", BaseHandler)),
]
others = [
(r"/", make_handler("home.html", BaseHandler)),
(r"/rpc(?:/([^/]*)/?)?", self.RPC_HANDLER_CLASS),
]
if self.args.debug:
return debug_mode_only + others
else:
return others
def prepare_handlers(self):
# Tornado URL handlers for additional functionality
return []
def prepare_template_loader(self, loader):
# add additional template dirs by using
# loader.add_dir(path)
return loader
def prepare_static_paths(self, paths):
# add static paths that can contain
# additional of override files
# eg: paths.append(PATH)
return paths
def prepare_nav_tabs(self, nav_tabs):
# Add additional tab buttons in the UI toolbar
# eg: nav_tabs.append(('MyTab', '/mytab'))
return nav_tabs
def define_python_namespace(self):
return {"server": self, "logging": logging, "call": call, "api": self.api}
def define_template_namespace(self):
return self.define_python_namespace()
def on_api_call_start(self, fn, args, kwargs, handler):
pass
def on_api_call_end(self, fn, args, kwargs, handler, result):
return result
def prepare_api(self):
"""
Prepare the API object that is exposed as
functionality by the Server
"""
return None
def define_headers(self):
"""
the dictionary returned by define_headers will be used as
header key and value in every response to a client.
"""
return {}
def run(self):
""" prepares the api and starts the tornado funcserver """
self.log_id = 0
# all active websockets and their state
self.websocks = {}
# all active python interpreter sessions
self.pysessions = {}
if self.DISABLE_REQUESTS_DEBUG_LOGS:
disable_requests_debug_logs()
self.threadpool = ThreadPool(self.THREADPOOL_WORKERS)
self.api = None
# tornado app object
base_handlers = self.prepare_base_handlers()
handlers = self.prepare_handlers()
self.template_loader = TemplateLoader([resolve_path(self.TEMPLATE_PATH)])
_ = self.prepare_template_loader(self.template_loader)
if _ is not None:
self.template_loader = _
shclass = CustomStaticFileHandler
shclass.PATHS.append(resolve_path(self.STATIC_PATH))
_ = self.prepare_static_paths(shclass.PATHS)
if _ is not None:
shclass.PATHS = _
self.static_handler_class = shclass
self.nav_tabs = [("Home", "/")]
if self.args.debug:
self.nav_tabs += [("Console", "/console"), ("Logs", "/logs")]
self.nav_tabs = self.prepare_nav_tabs(self.nav_tabs)
settings = {
"static_path": "<DUMMY-INEXISTENT-PATH>",
"static_handler_class": self.static_handler_class,
"template_loader": self.template_loader,
"compress_response": True,
"debug": self.args.debug,
}
all_handlers = handlers + base_handlers
self.app = self.APP_CLASS(**settings)
self.app.add_handlers(self.VIRTUAL_HOST, all_handlers)
sys.funcserver = self.app.funcserver = self
self.api = self.prepare_api()
if self.api is not None and not hasattr(self.api, "log"):
self.api.log = self.log
if self.args.port != 0:
self.app.listen(self.args.port)
tornado.ioloop.IOLoop.instance().start()
def _passthrough(name):
def fn(self, *args, **kwargs):
p = self.prefix + "." + name
if self.parent is None:
return self._call(p, args, kwargs)
else:
return self.parent._call(p, args, kwargs)
return fn
class Client(object):
SERIALIZER = staticmethod(msgpack.packb)
DESERIALIZER = staticmethod(msgpack.unpackb)
DISABLE_REQUESTS_DEBUG_LOGS = True
def __init__(self, server_url, prefix=None, parent=None, is_batch=False, auth=None):
self.server_url = server_url
self.rpc_url = urlparse.urljoin(server_url, "rpc")
self.is_batch = is_batch
self.prefix = prefix
self.parent = parent
self._calls = []
self.auth = auth
if self.DISABLE_REQUESTS_DEBUG_LOGS:
disable_requests_debug_logs()
def __getattr__(self, attr):
prefix = self.prefix + "." + attr if self.prefix else attr
return self.__class__(
self.server_url,
prefix=prefix,
parent=self.parent or self,
is_batch=self.is_batch,
)
def __call__(self, *args, **kwargs):
if self.parent is None:
return self._call(self.prefix, args, kwargs)
else:
return self.parent._call(self.prefix, args, kwargs)
def _call(self, fn, args, kwargs):
if not self.is_batch:
return self._do_single_call(fn, args, kwargs)
else:
self._calls.append(dict(fn=fn, args=args, kwargs=kwargs))
__getitem__ = _passthrough("__getitem__")
__setitem__ = _passthrough("__setitem__")
__delitem__ = _passthrough("__delitem__")
__contains__ = _passthrough("__contains__")
__len__ = _passthrough("__len__")
def __nonzero__(self):
return True
def set_batch(self):
self.is_batch = True
def unset_batch(self):
self.is_batch = False
def _do_single_call(self, fn, args, kwargs):
m = self.SERIALIZER(dict(fn=fn, args=args, kwargs=kwargs))
if self.auth:
req = requests.post(self.rpc_url, data=m, auth=self.auth)
else:
req = requests.post(self.rpc_url, data=m)
res = self.DESERIALIZER(req.content)
if not res["success"]:
raise RPCCallException(res["result"])
else:
return res["result"]
def execute(self):
if not self._calls:
return
m = dict(fn="__batch__", calls=self._calls)
m = self.SERIALIZER(m)
if self.auth:
req = requests.post(self.rpc_url, data=m, auth=self.auth)
else:
req = requests.post(self.rpc_url, data=m)
res = self.DESERIALIZER(req.content)
self._calls = []
return res
if __name__ == "__main__":
Server().start()
| deep-compute/funcserver | funcserver/funcserver.py | Python | mit | 25,163 |
import math
class BoundingBox(object):
def __init__(self, *args, **kwargs):
self.lat_min = None
self.lon_min = None
self.lat_max = None
self.lon_max = None
def get_bounding_box(latitude_in_degrees, longitude_in_degrees, half_side_in_km):
assert half_side_in_km > 0
assert latitude_in_degrees >= -180.0 and latitude_in_degrees <= 180.0
assert longitude_in_degrees >= -180.0 and longitude_in_degrees <= 180.0
lat = math.radians(latitude_in_degrees)
lon = math.radians(longitude_in_degrees)
radius = 6371
# Radius of the parallel at given latitude
parallel_radius = radius*math.cos(lat)
lat_min = lat - half_side_in_km/radius
lat_max = lat + half_side_in_km/radius
lon_min = lon - half_side_in_km/parallel_radius
lon_max = lon + half_side_in_km/parallel_radius
rad2deg = math.degrees
box = BoundingBox()
box.lat_min = rad2deg(lat_min)
box.lon_min = rad2deg(lon_min)
box.lat_max = rad2deg(lat_max)
box.lon_max = rad2deg(lon_max)
return (box) | insacloud/insacloud-back | insacloud/services/geolocalisation.py | Python | mit | 1,059 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from qtpy.QtWidgets import QApplication
import matplotlib as mpl
mpl.use('Agg') # noqa
from mantid.simpleapi import CreateSampleWorkspace
from mantidqt.utils.qt.testing import GuiTest
from mantidqt.utils.qt.testing.qt_widget_finder import QtWidgetFinder
from mantidqt.widgets.samplelogs.presenter import SampleLogs
class SampleLogsViewTest(GuiTest, QtWidgetFinder):
def test_deleted_on_close(self):
ws = CreateSampleWorkspace()
pres = SampleLogs(ws)
self.assert_widget_created()
pres.view.close()
QApplication.processEvents()
self.assert_no_toplevel_widgets()
| mganeva/mantid | qt/python/mantidqt/widgets/samplelogs/test/test_samplelogs_view.py | Python | gpl-3.0 | 939 |
# -*- coding: utf-8 -*-
"""Python's builtin :class:`list` is a very fast and efficient
sequence type, but it could be better for certain access patterns,
such as non-sequential insertion into a large lists. ``listutils``
provides a pure-Python solution to this problem.
For utilities for working with iterables and lists, check out
:mod:`iterutils`. For the a :class:`list`-based version of
:class:`collections.namedtuple`, check out :mod:`namedutils`.
"""
from __future__ import print_function, division
import operator
from math import log as math_log
from itertools import chain, islice
try:
from compat import make_sentinel
_MISSING = make_sentinel(var_name='_MISSING')
except ImportError:
_MISSING = object()
try:
xrange
except NameError:
# Python 3 compat
xrange = range
# TODO: expose splaylist?
__all__ = ['BList', 'BarrelList']
# TODO: keep track of list lengths and bisect to the right list for
# faster getitem (and slightly slower setitem and delitem ops)
class BarrelList(list):
"""The ``BarrelList`` is a :class:`list` subtype backed by many
dynamically-scaled sublists, to provide better scaling and random
insertion/deletion characteristics. It is a subtype of the builtin
:class:`list` and has an identical API, supporting indexing,
slicing, sorting, etc. If application requirements call for
something more performant, consider the `blist module available on
PyPI`_.
The name comes by way of Kurt Rose, who said it reminded him of
barrel shifters. Not sure how, but it's BList-like, so the name
stuck. BList is of course a reference to `B-trees`_.
Args:
iterable: An optional iterable of initial values for the list.
>>> blist = BList(xrange(100000))
>>> blist.pop(50000)
50000
>>> len(blist)
99999
>>> len(blist.lists) # how many underlying lists
8
>>> slice_idx = blist.lists[0][-1]
>>> blist[slice_idx:slice_idx + 2]
BarrelList([11637, 11638])
Slicing is supported and works just fine across list borders,
returning another instance of the BarrelList.
.. _blist module available on PyPI: https://pypi.python.org/pypi/blist
.. _B-trees: https://en.wikipedia.org/wiki/B-tree
"""
_size_factor = 1520
"This size factor is the result of tuning using the tune() function below."
def __init__(self, iterable=None):
self.lists = [[]]
if iterable:
self.extend(iterable)
@property
def _cur_size_limit(self):
len_self, size_factor = len(self), self._size_factor
return int(round(size_factor * math_log(len_self + 2, 2)))
def _translate_index(self, index):
if index < 0:
index += len(self)
rel_idx, lists = index, self.lists
for list_idx in range(len(lists)):
len_list = len(lists[list_idx])
if rel_idx < len_list:
break
rel_idx -= len_list
if rel_idx < 0:
return None, None
return list_idx, rel_idx
def _balance_list(self, list_idx):
if list_idx < 0:
list_idx += len(self.lists)
cur_list, len_self = self.lists[list_idx], len(self)
size_limit = self._cur_size_limit
if len(cur_list) > size_limit:
half_limit = size_limit // 2
while len(cur_list) > half_limit:
next_list_idx = list_idx + 1
self.lists.insert(next_list_idx, cur_list[-half_limit:])
del cur_list[-half_limit:]
return True
return False
def insert(self, index, item):
if len(self.lists) == 1:
self.lists[0].insert(index, item)
self._balance_list(0)
else:
list_idx, rel_idx = self._translate_index(index)
if list_idx is None:
raise IndexError()
self.lists[list_idx].insert(rel_idx, item)
self._balance_list(list_idx)
return
def append(self, item):
self.lists[-1].append(item)
def extend(self, iterable):
self.lists[-1].extend(iterable)
def pop(self, *a):
lists = self.lists
if len(lists) == 1 and not a:
return self.lists[0].pop()
index = a and a[0]
if index == () or index is None or index == -1:
ret = lists[-1].pop()
if len(lists) > 1 and not lists[-1]:
lists.pop()
else:
list_idx, rel_idx = self._translate_index(index)
if list_idx is None:
raise IndexError()
ret = lists[list_idx].pop(rel_idx)
self._balance_list(list_idx)
return ret
def iter_slice(self, start, stop, step=None):
iterable = self # TODO: optimization opportunities abound
# start_list_idx, stop_list_idx = 0, len(self.lists)
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is not None and step < 0:
step = -step
start, stop = -start, -stop - 1
iterable = reversed(self)
if start < 0:
start += len(self)
# start_list_idx, start_rel_idx = self._translate_index(start)
if stop < 0:
stop += len(self)
# stop_list_idx, stop_rel_idx = self._translate_index(stop)
return islice(iterable, start, stop, step)
def del_slice(self, start, stop, step=None):
if step is not None and abs(step) > 1: # punt
new_list = chain(self.iter_slice(0, start, step),
self.iter_slice(stop, None, step))
self.lists[0][:] = new_list
self._balance_list(0)
return
if start is None:
start = 0
if stop is None:
stop = len(self)
start_list_idx, start_rel_idx = self._translate_index(start)
stop_list_idx, stop_rel_idx = self._translate_index(stop)
if start_list_idx is None:
raise IndexError()
if stop_list_idx is None:
raise IndexError()
if start_list_idx == stop_list_idx:
del self.lists[start_list_idx][start_rel_idx:stop_rel_idx]
elif start_list_idx < stop_list_idx:
del self.lists[start_list_idx + 1:stop_list_idx]
del self.lists[start_list_idx][start_rel_idx:]
del self.lists[stop_list_idx][:stop_rel_idx]
else:
assert False, ('start list index should never translate to'
' greater than stop list index')
__delslice__ = del_slice
@classmethod
def from_iterable(cls, it):
return cls(it)
def __iter__(self):
return chain(*self.lists)
def __reversed__(self):
return chain.from_iterable(reversed(l) for l in reversed(self.lists))
def __len__(self):
return sum([len(l) for l in self.lists])
def __contains__(self, item):
for cur in self.lists:
if item in cur:
return True
return False
def __getitem__(self, index):
try:
start, stop, step = index.start, index.stop, index.step
except AttributeError:
index = operator.index(index)
else:
iter_slice = self.iter_slice(start, stop, step)
ret = self.from_iterable(iter_slice)
return ret
list_idx, rel_idx = self._translate_index(index)
if list_idx is None:
raise IndexError()
return self.lists[list_idx][rel_idx]
def __delitem__(self, index):
try:
start, stop, step = index.start, index.stop, index.step
except AttributeError:
index = operator.index(index)
else:
self.del_slice(start, stop, step)
return
list_idx, rel_idx = self._translate_index(index)
if list_idx is None:
raise IndexError()
del self.lists[list_idx][rel_idx]
def __setitem__(self, index, item):
try:
start, stop, step = index.start, index.stop, index.step
except AttributeError:
index = operator.index(index)
else:
if len(self.lists) == 1:
self.lists[0][index] = item
else:
tmp = list(self)
tmp[index] = item
self.lists[:] = [tmp]
self._balance_list(0)
return
list_idx, rel_idx = self._translate_index(index)
if list_idx is None:
raise IndexError()
self.lists[list_idx][rel_idx] = item
def __getslice__(self, start, stop):
iter_slice = self.iter_slice(start, stop, 1)
return self.from_iterable(iter_slice)
def __setslice__(self, start, stop, sequence):
if len(self.lists) == 1:
self.lists[0][start:stop] = sequence
else:
tmp = list(self)
tmp[start:stop] = sequence
self.lists[:] = [tmp]
self._balance_list(0)
return
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self))
def sort(self):
# poor pythonist's mergesort, it's faster than sorted(self)
# when the lists' average length is greater than 512.
if len(self.lists) == 1:
self.lists[0].sort()
else:
for li in self.lists:
li.sort()
tmp_sorted = sorted(chain.from_iterable(self.lists))
del self.lists[:]
self.lists[0] = tmp_sorted
self._balance_list(0)
def reverse(self):
for cur in self.lists:
cur.reverse()
self.lists.reverse()
def count(self, item):
return sum([cur.count(item) for cur in self.lists])
def index(self, item):
len_accum = 0
for cur in self.lists:
try:
rel_idx = cur.index(item)
return len_accum + rel_idx
except ValueError:
len_accum += len(cur)
raise ValueError('%r is not in list' % (item,))
BList = BarrelList
class SplayList(list):
"""Like a `splay tree`_, the SplayList facilitates moving higher
utility items closer to the front of the list for faster access.
.. _splay tree: https://en.wikipedia.org/wiki/Splay_tree
"""
def shift(self, item_index, dest_index=0):
if item_index == dest_index:
return
item = self.pop(item_index)
self.insert(dest_index, item)
def swap(self, item_index, dest_index):
self[dest_index], self[item_index] = self[item_index], self[dest_index]
# Tests and tuning
if __name__ == '__main__':
def test_splay():
splay = SplayList(xrange(10))
splay.swap(0, 9)
assert splay[0] == 9
assert splay[-1] == 0
splay.shift(-2)
assert splay[0] == 8
assert splay[-1] == 0
assert len(splay) == 10
def main():
import os
bl = BarrelList()
bl.insert(0, 0)
bl.insert(1, 1)
bl.insert(0, -1)
bl.extend(range(100000))
bl._balance_list(0)
bl.pop(50000)
rands = [ord(i) * x for i, x in zip(os.urandom(1024), range(1024))]
bl2 = BarrelList(rands)
bl2.sort()
print(bl2[:10])
print(bl2[:-10:-1])
bl3 = BarrelList(range(int(1e5)))
for i in range(10000):
bl3.insert(0, bl3.pop(len(bl3) // 2))
del bl3[10:5000]
bl3[:20:2] = range(0, -10, -1)
import pdb;pdb.set_trace()
_TUNE_SETUP = """\
from listutils import BarrelList
bl = BarrelList()
bl._size_factor = %s
bl.extend(range(int(%s)))
"""
def tune():
from collections import defaultdict
import gc
from timeit import timeit
data_size = 1e5
old_size_factor = size_factor = 512
all_times = defaultdict(list)
min_times = {}
step = 512
while abs(step) > 4:
gc.collect()
for x in range(3):
tottime = timeit('bl.insert(0, bl.pop(len(bl)//2))',
_TUNE_SETUP % (size_factor, data_size),
number=10000)
all_times[size_factor].append(tottime)
min_time = round(min(all_times[size_factor]), 3)
min_times[size_factor] = min_time
print(size_factor, min_time, step)
if min_time > (min_times[old_size_factor] + 0.002):
step = -step // 2
old_size_factor = size_factor
size_factor += step
print(tottime)
try:
tune() # main()
except Exception as e:
import pdb;pdb.post_mortem()
raise
| mgaitan/boltons | boltons/listutils.py | Python | bsd-3-clause | 12,823 |
#!/usr/bin/env python
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the replication synchronization checker utility. It is used
to check the data consistency between master and slaves (and synchronize the
data if requested by the user).
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import os
import sys
from mysql.utilities.command.rpl_sync_check import check_data_consistency
from mysql.utilities.common.messages import (
ERROR_MASTER_IN_SLAVES, PARSE_ERR_DISCO_REQ_MASTER,
PARSE_ERR_OPT_REQ_NON_NEGATIVE_VALUE, PARSE_ERR_OPT_REQ_GREATER_VALUE,
PARSE_ERR_OPT_REQ_VALUE, PARSE_ERR_OPTS_EXCLD,
PARSE_ERR_SLAVE_DISCO_REQ
)
from mysql.utilities.common.options import (add_discover_slaves_option,
add_master_option,
add_slaves_option,
add_ssl_options, add_verbosity,
check_server_lists,
db_objects_list_to_dictionary,
setup_common_options,
check_password_security)
from mysql.utilities.common.server import check_hostname_alias, Server
from mysql.utilities.common.tools import check_connector_python
from mysql.utilities.common.topology import parse_topology_connections
from mysql.utilities.exception import UtilError, UtilRplError
# Check for connector/python
if not check_connector_python():
sys.exit(1)
# Constants
NAME = "MySQL Utilities - mysqlrplsync"
DESCRIPTION = "mysqlrplsync - replication synchronization checker utility"
USAGE = ("%prog --master=user:pass@host:port --slaves=user:pass@host:port \\\n"
" [<db_name>[.<tbl_name>]]")
EXTENDED_HELP = """
Introduction
------------
The mysqlrplsync utility is designed to check if replication servers with
GTIDs enabled are synchronized. In other words, it checks the data consistency
between a master and a slave or between two slaves.
The utility permits you to run the check while replication is active. The
synchronization algorithm is applied using GTID information to identify those
transactions that differ (missing, not read, etc.) between the servers. During
the process, the utility waits for the slave to catch up to the master to
ensure all GTIDs have been read prior to performing the data consistency
check.
Note: if replication is not running (e.g., all slaves are stopped), the
utility can still perform the check, but the step to wait for the slave to
catch up to the master will be skipped. If you want to run the utility on a
stopped replication topology, you should ensure the slaves are up to date
first.
By default, all data is included in the comparison. To check specific
databases or tables, list each element as a separated argument for the
utility using full qualified names as shown in the following examples.
# Check the data consistency of a replication topology, explicitly
# specifying the master and slaves.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--slaves=rpl:pass@host2:3306,rpl:pass@host3:3306
# Check the data consistency of a replication topology, specifying the
# master and using the slaves discovery feature.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--discover-slaves-login=rpl:pass
# Check the data consistency only between specific slaves (no check
# performed on the master).
$ mysqlrplsync --slaves=rpl:pass@host2:3306,rpl:pass@host3:3306
# Check the data consistency of a specific database (db1) and table
# (db2.t1), explicitly specifying master and slaves.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--slaves=rpl:pass@host2:3306,rpl:pass@host3:3306 \\
db1 db2.t1
# Check the data consistency of all data excluding a specific database
# (db2) and table (db1.t2), specifying the master and using slave
# discovery.
$ mysqlrplsync --master=root:pass@host1:3306 \\
--discover-slaves-login=rpl:pass --exclude=db2,db1.t2
Helpful Hints
-------------
- The default timeout for performing the table checksum is 5 seconds.
This value can be changed with the --checksum-timeout option.
- The default timeout for waiting for slaves to catch up is 300 seconds.
This value can be changed with the --rpl-timeout option.
- The default interval to periodically verify if a slave has read all of
the GTIDs from the master is 3 seconds. This value can be changed
with the --interval option.
"""
if __name__ == '__main__':
# Setup the command parser (with common options).
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE, server=False,
extended_help=EXTENDED_HELP)
# Add the --discover-slaves-login option.
add_discover_slaves_option(parser)
# Add the --master option.
add_master_option(parser)
# Add the --slaves option.
add_slaves_option(parser)
# Add the --ssl options
add_ssl_options(parser)
# Add verbosity option (no --quite option).
add_verbosity(parser, False)
# Add timeout options.
parser.add_option("--rpl-timeout", action="store", dest="rpl_timeout",
type="int", default=300,
help="maximum timeout in seconds to wait for "
"synchronization (slave waiting to catch up to "
"master). Default = 300.")
parser.add_option("--checksum-timeout", action="store",
dest="checksum_timeout", type="int", default=5,
help="maximum timeout in seconds to wait for CHECKSUM "
"query to complete. Default = 5.")
# Add polling interval option.
parser.add_option("--interval", "-i", action="store", dest="interval",
type="int", default="3", help="interval in seconds for "
"polling slaves for sync status. Default = 3.")
# Add option to exclude databases/tables check.
parser.add_option("--exclude", action="store", dest="exclude",
type="string", default=None,
help="databases or tables to exclude. Example: "
"<db_name>[.<tbl_name>]. List multiple names in a "
"comma-separated list.")
# Parse the options and arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args)
# At least one of the options --discover-slaves-login or --slaves is
# required.
if not opt.discover and not opt.slaves:
parser.error(PARSE_ERR_SLAVE_DISCO_REQ)
# The --discover-slaves-login and --slaves options cannot be used
# simultaneously (only one).
if opt.discover and opt.slaves:
parser.error(PARSE_ERR_OPTS_EXCLD.format(
opt1='--discover-slaves-login', opt2='--slaves'
))
if opt.discover and not opt.master:
parser.error(PARSE_ERR_DISCO_REQ_MASTER)
# Check timeout values, must be greater than zero.
if opt.rpl_timeout < 0:
parser.error(
PARSE_ERR_OPT_REQ_NON_NEGATIVE_VALUE.format(opt='--rpl-timeout')
)
if opt.checksum_timeout < 0:
parser.error(
PARSE_ERR_OPT_REQ_NON_NEGATIVE_VALUE.format(
opt='--checksum-timeout'
)
)
# Check interval value, must be greater than zero.
if opt.interval < 1:
parser.error(PARSE_ERR_OPT_REQ_GREATER_VALUE.format(opt='--interval',
val='zero'))
# Check slaves list (master cannot be included in slaves list).
if opt.master:
check_server_lists(parser, opt.master, opt.slaves)
# Parse the master and slaves connection parameters (no candidates).
try:
master_val, slaves_val, _ = parse_topology_connections(
opt, parse_candidates=False
)
except UtilRplError:
_, err, _ = sys.exc_info()
sys.stderr.write("ERROR: {0}\n".format(err.errmsg))
sys.exit(1)
# Check host aliases (master cannot be included in slaves list).
if master_val:
for slave_val in slaves_val:
if check_hostname_alias(master_val, slave_val):
master = Server({'conn_info': master_val})
slave = Server({'conn_info': slave_val})
parser.error(
ERROR_MASTER_IN_SLAVES.format(master_host=master.host,
master_port=master.port,
slaves_candidates="slaves",
slave_host=slave.host,
slave_port=slave.port)
)
# Process list of databases/tables to exclude (check format errors).
data_to_exclude = {}
if opt.exclude:
exclude_list = [val for val in opt.exclude.split(',') if val]
data_to_exclude = db_objects_list_to_dictionary(parser, exclude_list,
'the --exclude option')
elif opt.exclude == '':
# Issue an error if --exclude is used with no value.
parser.error(PARSE_ERR_OPT_REQ_VALUE.format(opt='--exclude'))
# Process list of databases/tables to include (check format errors).
data_to_include = {}
if args:
data_to_include = db_objects_list_to_dictionary(parser, args,
'the database/table '
'arguments')
# Create dictionary of options
options = {
'discover': opt.discover,
'verbosity': 0 if opt.verbosity is None else opt.verbosity,
'rpl_timeout': opt.rpl_timeout,
'checksum_timeout': opt.checksum_timeout,
'interval': opt.interval,
}
# Create a replication synchronizer and check the topology's consistency.
issues_found = 0
try:
issues_found = check_data_consistency(master_val, slaves_val, options,
data_to_include, data_to_exclude)
except UtilError:
_, err, _ = sys.exc_info()
sys.stderr.write("ERROR: {0}\n".format(err.errmsg))
sys.exit(1)
# Exit with the appropriate status.
if issues_found == 0:
sys.exit(0)
else:
sys.exit(1)
| scavarda/mysql-dbcompare | mysql-utilities-1.6.0/scripts/mysqlrplsync.py | Python | apache-2.0 | 11,414 |
from pylab import *
import scipy
import mpmath as mp
import traceback
import numpy
#import fpectl
#fpectl.turnon_sigfpe()
import scipy.linalg as la
import scipy.sparse.linalg as sla
import SlepcDet
import gkcStyle
import iCode
class Species:
def __init__(self, m=0., q=1., T=1., n=0., eta=0., name="Unamed"):
self.m = m
self.q = q
self.T = T
self.n = n
self.eta = eta
self.name = name
############################## Settings for Integral Mode ######################################
Nx = 129
# My setup
#species = [ Species(m=0.,q=-1.,T=1.,n=1., eta=0.,name= "Adiab"), Species(1.,1.,1.,1., 5., "Ion") ]#, Species(1./1836.,-1.,1.,1., 4., "Electron")]
species = [ Species(name= "Adiab"), Species(1.,1.,1.,1.,5., "Ion"), Species(1./1837.,-1.,1.,1., 2., "Electron") ]
#Ln, Ls, Lx, Ly, lambda_D2, ky_list, w0 = 1., 1./0.2, 32., 64., 0., [0.4908] , -0.25 + 0.05j
# adiab Ln, Ls, Lx, Ly, lambda_D2, ky_list, w0 = 1., 1./0.2, 12., 64., 0., [2.55] , -0.25 + 0.05j
Ln, Ls, Lx, Ly, lambda_D2, ky_list, w0 = 1., 1./0.2, 12., 64., 0., [2.55] , -0.3 + 0.06j
#Ln, Ls, Lx, Ly, lambda_D2, ky_list, w0 = 1., 1./0.2, 32., 64., 0., logspace(-1, log10(2*pi), 32 ), -0.25 + 0.05j
## Gao Setup
#species = [ Species(name= "Adiab"), Species(m=1836.,q=1.,T=1.,n=1.,eta=0., name="Ion"), Species(m=1.,q=-1.,T=1.,n=1., eta=3., name="Electron") ]
#Ln, Ls, Lx, Ly, lambda_D2, ky_list, w0 = 1., 0.025, 30., 256., 1., [0.3], -0.01 + 0.025j
######################## Setup Grid ######################
dx, kx_list, dk = Lx/Nx, 2.*pi/Lx * linspace(-(Nx-1)/2., (Nx-1)/2., Nx), 2.*pi/Lx
X = linspace(-Lx/2, Lx/2, Nx)
sol = []
def getMinEigenvalue(M):
eigvals = scipy.linalg.eigvals(M)
idx = argmin(abs(eigvals))
return eigvals[idx]
"""
# get minimal non-negative eigenvalue
eigvals_lz = eigvals[where(real(eigvals) > 0.)]
idx = argmin(real(eigvals_lz))
return eigvals_lz[idx]
"""
def solveDispersion(ky):
L = zeros((Nx,Nx), dtype=complex)
def setup_L(w):
L[:,:] = 0.
iCode.setupMatrixPy(species, w, ky, X, kx_list, Ls, Ln, Nx, L, dk*dx, lambda_D2)
return L
def solveEquation(w):
w = complex(w)
L = setup_L(w)
# Get Minium absolute eigenvalue.
#
# The non-linear eigenvalue problem obeys
# det(L) = 0. This requirement is equal
# to an eigenvalue 0 for the linear eigenvalue
# problem det(L'- lambda I) = 0, which is identical
# to the non-linear requirenement once lambda = 0.
#
# [ This is less sensitive (and thus numerically less demanding)
# than caluculating directly the determinant
val = getMinEigenvalue(L)
#val = det(A)
#(sign, logdet) = np.linalg.slogdet(L)
#val = sign * logdet
print ky, " w : %.8f+%.8f j" % (real(complex(w)), imag(complex(w))) , " Determinant : %.2e " % abs(val)
return val
try :
#omega = complex(mp.findroot(solveEquation, (w0, w0+0.05, w0-0.005j), solver='muller', tol=1.e-15, ftol=1.e-15, maxsteps=5000))
omega = complex(mp.findroot(solveEquation, (w0, w0-0.01, w0+0.001j), solver='muller', tol=1.e-15, ftol=1.e-15, maxsteps=5000))
except:
return float('nan') + 1.j * float('nan')
#traceback.print_exc(file=sys.stdout)
try:
# n = 0
# solution found for w0, get solution vector
werr = solveEquation(omega)
L = setup_L(omega)
# We found our eigenvalue omega, now we use the
# inverse iteration to find the closest eigenvector
# to the eigenvalue
L__lambda_I = L - omega * eye(Nx)
# Start with random inital eigenvector
b = 1. * rand(Nx) + 1.j * rand(Nx)
# Convergence is fast thus large iteration number not required
# However, how to best check the error ?
# e.g. A dot phi
# Tested also, Rayleigh-Quotient Iteration but less successfull.
for n in range(128):
# rescale b
b = b/real(sqrt(vdot(b,b)))
# inverse iteration
b = solve(L__lambda_I, b)
# calculate error
r = dot(L,b)
residual = real(sqrt(vdot(r,r)))
print("I-I Residual : %.2e " % residual )
if (residual < 1.e-9) : break
clf()
fig = gkcStyle.newFigure(ratio='1.41:1', basesize=9)
fig.suptitle("$\omega_0$ = %.4f %.4fi $\pm$ %.2e %.2e i" % (real(omega), imag(omega), real(werr), imag(werr)))
###################### Plot Fourier Modes ##########3
plot(kx_list, real(b), 'r.-', label="real")
plot(kx_list, imag(b), '.-', label="imag", color=gkcStyle.color_indigo)
xlim((min(kx_list), max(kx_list)))
xlabel("$k_x$")
ylabel("$\phi(k_x)$")
legend(ncol=2).draw_frame(0)
savefig(str(ky) + "_Plot1.pdf", bbox_inches='tight')
################### Plot real modes ########3
clf()
# We have to transform to FFTW format, which has
# form of [ k=0, k=1, ..., k = N/2, k = -(N/2-1), ..., k=-1 ]
F = append(append(b[Nx/2], b[Nx/2+1:]), b[:Nx/2])
K = np.fft.ifft(F)
K = append(append(K[Nx/2], K[Nx/2+1:]), K[:Nx/2])
# Correct for phase
K = K * exp(-1.j*arctan2(imag(sum(K)),real(sum(K))))
#print " Phase : " , arctan2(imag(sum(K)),real(sum(K)))
#F_list = append(append(kx_list[Nx/2], kx_list[Nx/2+1:]), kx_list[:Nx/2])
#print "fft kx_list -----------> " , F_list
plot(X, real(K), 'r.-', label='real')
plot(X, imag(K), '.-', label='imag', color=gkcStyle.color_indigo)
plot(X, abs(K), 'g-', label='abs', linewidth=5., alpha=0.5)
xlim((min(X), max(X)))
xlabel("$x$")
ylabel("$\phi(x)$")
legend(ncol=2).draw_frame(0)
savefig(str(ky) + "_Plot2.pdf", bbox_inches='tight')
################ Plot Contour
clf()
y = linspace(0., Ly, 512)
KxKy = zeros((Nx, 65), dtype=complex)
nky = ky * Ly / (2.*pi)
KxKy[:,nky] = K
#np.fft.ifft(F)
XY = np.fft.irfft(KxKy, axis=1, n=512)
xlabel("$x$")
ylabel("$y$")
contourf(X, y, XY.T, 20, vmin=-abs(XY).max(), vmax=abs(XY).max())
#contourf(X, y, XY.T, 20, vmin=-abs(XY).max(), vmax=abs(XY).max())
colorbar()
savefig(str(ky) + "_Plot3.pdf", bbox_inches='tight')
# append and normalize
sol.append(np.fft.ifft(b/abs(b).max()))
except:
traceback.print_exc(file=sys.stdout)
return omega
w_list1 = []
def plotMode():
for ky in ky_list:
wn = solveDispersion(ky)
w_list1.append (wn)
def plotContours():
ky = 0.5
R = linspace(-0.5, 0.5, 8)
I = linspace(-.3, 0.3, 8)
V = zeros((len(R),len(I)), dtype=complex)
fig = figure(figsize=(30,10))
n = 0
for r in range(len(R)):
for i in range(len(I)):
A = zeros((Nx,Nx), dtype=complex)
iCode.setupMatrixPy(species, R[r]+1.j*I[i], ky, X, kx_list, Ls, Ln, Nx, A, dk*dx, lambda_D2)
#val = getMinEigenvalue(A)
(sign, logdet) = np.linalg.slogdet(A)
val = sign * logdet
V[r,i] = val
#print "x, y", R[r], I[i] , " r : ", val
print n, "/", len(R) * len(I)
n = n+1
"""
#subplot(131)
#norm = mpl.colors.Normalize(vmin = -1., vmax = 1.)
#contourf(R,I,real(V), 100, vmin=-1., vmax=1., norm = norm)
xlabel("Real")
ylabel("Imag")
cb = colorbar()
cb.set_clim(vmin=-1, vmax=1)
#subplot(132)
#contourf(R,I,imag(V), 100, vmin=-1., vmax=1.)
#norm = mpl.colors.Normalize(vmin = -1., vmax = 1.)
contourf(R,I,imag(V), 100, vmin=-1., vmax=1., norm = norm)
xlabel("Real")
ylabel("Imag")
cb = colorbar()
cb.set_clim(vmin=-1, vmax=1)
subplot(133)
"""
pcolor(R,I,log10(abs(V)))
xlabel("Real")
ylabel("Imag")
cb = colorbar()
#cb.set_clim(vmin=0., vmax=1)
#pcolor(R,I,imag(V))
savefig("Contour.png")
#print "(Integral) Solution is w : ",w0
#print "(local) Solution is w : ",w_Local
#plotContours()
plotMode()
################################## Plot Figures ############################
### Plot
clf()
ky_list = array(ky_list)
fig = figure(figsize=(30,10))
subplot(211)
semilogx(ky_list, real(w_list1), 'o-', label='real')
subplot(212)
semilogx(ky_list, imag(w_list1), 'o-', label='imag')
legend(ncol=2, loc='best').draw_frame(0)
xlim((min(ky_list), max(ky_list)))
savefig("Results.png")
"""
# Make 3D Plot kx, ky, z
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import numpy as np
from matplotlib import cm
from matplotlib import pyplot as plt
clf()
Z = array(sol)
ax = fig.add_subplot(121, projection='3d')
_X,_ky = np.meshgrid(X,ky_list)
ax.plot_surface(_X, _ky, real(Z), rstride=1, cstride=1, cmap=cm.jet)
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(_X, _ky, imag(Z), rstride=1, cstride=1, cmap=cm.jet)
#ax.set_zlim3d(0, 1)
ax.set_xlabel(r'$\phi_\mathrm{real}$')
ax.set_ylabel(r'$\phi_\mathrm{im}$')
ax.w_yaxis.set_scale("log")
savefig("Results_3D.png")
"""
| philscher/gkc | Benchmarks/IntegralCode/solveiCode.py | Python | gpl-3.0 | 9,492 |
# https://www.codewars.com/kata/515e271a311df0350d00000f
def square_sum(numbers):
return sum(x**2 for x in numbers)
| fahadkaleem/CodeWars | 8 kyu/python/Square(n) Sum.py | Python | mit | 121 |
#!/usr/bin/env python
# coding=UTF-8
import os
from multiprocessing import Process, Queue, current_process, Lock
from time import sleep
def ntptest(host):
sleep (10)
return 0
def worker(queue,lock):
lock.acquire()
print 'starting child process with id: ', os.getpid()
print 'parent process:', os.getppid()
lock.release()
host = queue.get()
ntptest(host)
lock.acquire()
print "changing ntp for %s"%host
lock.release()
return 0
def main():
PROCESSUS = 3
hosts = ["10.10.10.100","10.10.10.99","127.0.0.1","192.168.22.2", "172.15.1.1"]
queue = Queue()
lock = Lock()
for host in hosts:
print ">>> Host %s"%host
queue.put(host)
for i in hosts:
proc = Process(target=worker, args=(queue,lock))
proc.start()
#proc.join()
if __name__ == '__main__':
main()
| phocean/netios | test/jc.py | Python | gpl-2.0 | 789 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - WSGI application
@copyright: 2003-2008 MoinMoin:ThomasWaldmann,
2008-2008 MoinMoin:FlorianKrupicka
@license: GNU GPL, see COPYING for details.
"""
import os
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin.web.contexts import AllContext, Context, XMLRPCContext
from MoinMoin.web.exceptions import HTTPException
from MoinMoin.web.request import Request, MoinMoinFinish, HeaderSet
from MoinMoin.web.utils import check_forbidden, check_surge_protect, fatal_response, \
redirect_last_visited
from MoinMoin.Page import Page
from MoinMoin import auth, config, i18n, user, wikiutil, xmlrpc, error
from MoinMoin.action import get_names, get_available_actions
def set_umask(new_mask=0777^config.umask):
""" Set the OS umask value (and ignore potential failures on OSes where
this is not supported).
Default: the bitwise inverted value of config.umask
"""
try:
old_mask = os.umask(new_mask)
except:
# maybe we are on win32?
pass
def init(request):
"""
Wraps an incoming WSGI request in a Context object and initializes
several important attributes.
"""
set_umask() # do it once per request because maybe some server
# software sets own umask
if isinstance(request, Context):
context, request = request, request.request
else:
context = AllContext(request)
context.clock.start('total')
context.clock.start('init')
context.lang = setup_i18n_preauth(context)
context.session = context.cfg.session_service.get_session(context)
context.user = setup_user(context, context.session)
context.lang = setup_i18n_postauth(context)
def finish():
pass
context.finish = finish
context.reset()
context.clock.stop('init')
return context
def run(context):
""" Run a context trough the application. """
context.clock.start('run')
request = context.request
# preliminary access checks (forbidden, bots, surge protection)
try:
try:
check_forbidden(context)
check_surge_protect(context)
action_name = context.action
# handle XMLRPC calls
if action_name == 'xmlrpc':
response = xmlrpc.xmlrpc(XMLRPCContext(request))
elif action_name == 'xmlrpc2':
response = xmlrpc.xmlrpc2(XMLRPCContext(request))
else:
response = dispatch(request, context, action_name)
context.cfg.session_service.finalize(context, context.session)
return response
except MoinMoinFinish:
return request
finally:
context.finish()
context.clock.stop('run')
def remove_prefix(path, prefix=None):
""" Remove an url prefix from the path info and return shortened path. """
# we can have all action URLs like this: /action/ActionName/PageName?action=ActionName&...
# this is just for robots.txt being able to forbid them for crawlers
if prefix is not None:
prefix = '/%s/' % prefix # e.g. '/action/'
if path.startswith(prefix):
# remove prefix and action name
path = path[len(prefix):]
action, path = (path.split('/', 1) + ['', ''])[:2]
path = '/' + path
return path
def dispatch(request, context, action_name='show'):
cfg = context.cfg
# The last component in path_info is the page name, if any
path = remove_prefix(request.path, cfg.url_prefix_action)
if path.startswith('/'):
pagename = wikiutil.normalize_pagename(path, cfg)
else:
pagename = None
# need to inform caches that content changes based on:
# * cookie (even if we aren't sending one now)
# * User-Agent (because a bot might be denied and get no content)
# * Accept-Language (except if moin is told to ignore browser language)
hs = HeaderSet(('Cookie', 'User-Agent'))
if not cfg.language_ignore_browser:
hs.add('Accept-Language')
request.headers['Vary'] = str(hs)
# Handle request. We have these options:
# 1. jump to page where user left off
if not pagename and context.user.remember_last_visit and action_name == 'show':
response = redirect_last_visited(context)
# 2. handle action
else:
response = handle_action(context, pagename, action_name)
if isinstance(response, Context):
response = response.request
return response
def handle_action(context, pagename, action_name='show'):
""" Actual dispatcher function for non-XMLRPC actions.
Also sets up the Page object for this request, normalizes and
redirects to canonical pagenames and checks for non-allowed
actions.
"""
_ = context.getText
cfg = context.cfg
# pagename could be empty after normalization e.g. '///' -> ''
# Use localized FrontPage if pagename is empty
if not pagename:
context.page = wikiutil.getFrontPage(context)
else:
context.page = Page(context, pagename)
if '_' in pagename and not context.page.exists():
pagename = pagename.replace('_', ' ')
page = Page(context, pagename)
if page.exists():
url = page.url(context)
return context.http_redirect(url)
msg = None
# Complain about unknown actions
if not action_name in get_names(cfg):
msg = _("Unknown action %(action_name)s.") % {
'action_name': wikiutil.escape(action_name), }
# Disallow non available actions
elif action_name[0].isupper() and not action_name in \
get_available_actions(cfg, context.page, context.user):
msg = _("You are not allowed to do %(action_name)s on this page.") % {
'action_name': wikiutil.escape(action_name), }
if not context.user.valid:
# Suggest non valid user to login
msg += " " + _("Login and try again.")
if msg:
context.theme.add_msg(msg, "error")
context.page.send_page()
# Try action
else:
from MoinMoin import action
handler = action.getHandler(context, action_name)
if handler is None:
msg = _("You are not allowed to do %(action_name)s on this page.") % {
'action_name': wikiutil.escape(action_name), }
if not context.user.valid:
# Suggest non valid user to login
msg += " " + _("Login and try again.")
context.theme.add_msg(msg, "error")
context.page.send_page()
else:
handler(context.page.page_name, context)
return context
def setup_user(context, session):
""" Try to retrieve a valid user object from the request, be it
either through the session or through a login. """
# first try setting up from session
userobj = auth.setup_from_session(context, session)
userobj, olduser = auth.setup_setuid(context, userobj)
context._setuid_real_user = olduser
# then handle login/logout forms
form = context.request.values
if 'login' in form:
params = {
'username': form.get('name'),
'password': form.get('password'),
'attended': True,
'openid_identifier': form.get('openid_identifier'),
'stage': form.get('stage')
}
userobj = auth.handle_login(context, userobj, **params)
elif 'logout' in form:
userobj = auth.handle_logout(context, userobj)
else:
userobj = auth.handle_request(context, userobj)
# if we still have no user obj, create a dummy:
if not userobj:
userobj = user.User(context, auth_method='invalid')
return userobj
def setup_i18n_preauth(context):
""" Determine language for the request in absence of any user info. """
if i18n.languages is None:
i18n.i18n_init(context)
lang = None
if i18n.languages:
cfg = context.cfg
if not cfg.language_ignore_browser:
for l, w in context.request.accept_languages:
logging.debug("client accepts language %r, weight %r" % (l, w))
if l in i18n.languages:
logging.debug("moin supports language %r" % l)
lang = l
break
else:
logging.debug("moin does not support any language client accepts")
if not lang:
if cfg.language_default in i18n.languages:
lang = cfg.language_default
logging.debug("fall back to cfg.language_default (%r)" % lang)
if not lang:
lang = 'en'
logging.debug("emergency fallback to 'en'")
logging.debug("setup_i18n_preauth returns %r" % lang)
return lang
def setup_i18n_postauth(context):
""" Determine language for the request after user-id is established. """
user = context.user
if user and user.valid and user.language:
logging.debug("valid user that has configured some specific language to use in his user profile")
lang = user.language
else:
logging.debug("either no valid user or no specific language configured in user profile, using lang setup by setup_i18n_preauth")
lang = context.lang
logging.debug("setup_i18n_postauth returns %r" % lang)
return lang
class Application(object):
def __init__(self, app_config=None):
class AppRequest(Request):
given_config = app_config
self.Request = AppRequest
def __call__(self, environ, start_response):
try:
request = None
request = self.Request(environ)
context = init(request)
response = run(context)
context.clock.stop('total')
except HTTPException, e:
response = e
except error.ConfigurationError, e:
# this is stuff the user should see on the web interface:
response = fatal_response(e)
except Exception, e:
# we avoid raising more exceptions here to preserve the original exception
url_info = request and ' [%s]' % request.url or ''
# have exceptions logged within the moin logging framework:
logging.exception("An exception has occurred%s." % url_info)
# re-raise exception, so e.g. the debugger middleware gets it
raise
return response(environ, start_response)
#XXX: default application using the default config from disk
application = Application()
| Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/wsgiapp.py | Python | mit | 10,909 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform.googletest import GetTempDir
from tensorflow.python.platform.googletest import main
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import IsGoogleCudaEnabled as IsBuiltWithCuda
get_temp_dir = GetTempDir
| arunhotra/tensorflow | tensorflow/python/platform/test.py | Python | apache-2.0 | 423 |
import json, random, time
import stockfighter_minimal as sf
account = "NOISEBOTS"
venue, symbol = "TESTEX", "FOOBAR"
sf.set_web_url("http://127.0.0.1:8000/ob/api/")
sf.change_api_key("noisekey")
def main():
global account
global venue
global symbol
orderType = "limit"
all_orders = []
while 1:
try:
price = sf.quote(venue, symbol)["last"]
if price == 0:
price = 5000
except:
price = 5000
price += random.randint(-100, 100)
if price < 0:
price = 0
qty = 100
qty += random.randint(-50, 50)
direction = random.choice(["buy", "sell"])
r = sf.execute_d(
{
"price" : price,
"qty" : qty,
"direction" : direction,
"orderType" : orderType,
"account" : account,
"venue" : venue,
"stock" : symbol
},
verbose = True)
try:
id = r["id"]
all_orders.append(id)
except:
print("Trouble getting ID.")
time.sleep(random.uniform(0.3, 0.7))
if len(all_orders) > 10:
id = all_orders.pop(0)
sf.cancel(venue, symbol, id, verbose = True)
if __name__ == "__main__":
main()
| fohristiwhirl/disorderBook | bots/bot_noise.py | Python | mit | 1,391 |
# -*- coding: utf-8 -*-
import codecs
import jieba
from jieba.analyse import extract_tags
import pandas as pd
import numpy as np
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib
import matplotlib.pyplot as plt
from scipy.misc import imread
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)
st_text = codecs.open('data/origin/st.txt', 'r', encoding='UTF-8').read()
jieba.dt.add_word("章北海")
jieba.dt.add_word("黑暗森林")
def jieba_segment():
"""
分词
"""
word_list = jieba.cut(st_text, cut_all=False)
# 分词写入到文件
with open("data/segdata/st_seg_jb.txt", "a", encoding='UTF-8') as f:
f.write(" ".join(word_list))
print("分词完成")
def jieba_keywords():
"""
关键字提取
"""
key_words = extract_tags(st_text, topK=300, withWeight=True, allowPOS=())
# 停用词
stopwords = pd.read_csv("data/origin/stop_words.txt", index_col=False,
quoting=3, sep="\n", names=['stopword'], encoding='utf-8')
words = [word for word, weight in key_words]
keywords_df = pd.DataFrame({'keywords': words})
# 去掉停用词
keywords_df = keywords_df[~keywords_df.keywords.isin(stopwords.stopword.tolist())]
word_freq = []
for word in keywords_df.keywords.tolist():
for w, k in key_words:
if word == w:
word_freq.append((word, k))
print("================去掉停用词之后================")
print(word_freq)
show_wordCloud(word_freq)
def show_wordCloud(word_freq):
"""
词云显示
"""
font = r'C:\Windows\Fonts\msyh.ttc' # 指定字体,不指定会报错
color_mask = imread("resource/timg.jpg") # 读取背景图片
wcloud = WordCloud(
font_path=font,
# 背景颜色
background_color="white",
# 词云形状
mask=color_mask,
# 允许最大词汇
max_words=2000,
# 最大号字体
max_font_size=80)
wcloud.generate_from_frequencies(dict(word_freq))
# 以下代码显示图片
plt.imshow(wcloud)
plt.axis("off")
plt.show()
wcloud.to_file("data/wcimage/三体词云_3.png")
if __name__ == '__main__':
# jieba_segment()
# jieba_keywords()
word_list = jieba.cut("韩国 OLENS 奥伦斯清纯超自然巧克力225度韩国 OLENS teen teen natural choco ", cut_all=False)
print(word_list)
rss = ''
for each in word_list:
rss = rss + each + ' / '
print(rss)
| jarvisqi/nlp_learn | segment/jieba_segment.py | Python | mit | 2,548 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_FAILED = mcl.status.framework.ERR_START + 1
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 2
ERR_CALLBACK_FAILED = mcl.status.framework.ERR_START + 3
ERR_FUNCTION_NOT_FOUND = mcl.status.framework.ERR_START + 4
ERR_ALLOC_FAILED = mcl.status.framework.ERR_START + 5
ERR_QUERY_FAILED = mcl.status.framework.ERR_START + 6
ERR_EXCEPTION_THROWN = mcl.status.framework.ERR_START + 7
ERR_OPEN_FAILED = mcl.status.framework.ERR_START + 8
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_FAILED: 'Unspecified failure',
ERR_MARSHAL_FAILED: 'Failed to marshal data',
ERR_CALLBACK_FAILED: 'Failed to send callback data',
ERR_FUNCTION_NOT_FOUND: 'Unable to find a required function',
ERR_ALLOC_FAILED: 'Failed to allocate necessary memory',
ERR_QUERY_FAILED: 'Query failed',
ERR_EXCEPTION_THROWN: 'Exception encountered',
ERR_OPEN_FAILED: 'Unable to open required resource'
} | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/netconnections/errors.py | Python | unlicense | 1,200 |
#!/usr/bin/env python3
from yr.libyr import Yr
weather = Yr(location_name='Norge/Telemark/Skien/Skien')
wind_speed = dict()
wind_speed['data'] = [{'from': forecast['@from'], 'to': forecast['@to'], 'speed': float(forecast['windSpeed']['@mps'])} for forecast in weather.forecast()]
wind_speed['credit'] = weather.credit
print(wind_speed)
| HugoShamrock/python-yr | yr/examples/wuurrd.py | Python | gpl-3.0 | 340 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Guewen Baconnier (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name" : "Base Products Merge",
"version" : "1.0",
"author" : "Camptocamp",
"category" : "Generic Modules/Base",
"description":"""
To merge 2 products, select them in the list view and execute the Action "Merge Products".
The selected products are deactivated and a new one is created with :
- When a value is the same on each resources : the value
- When a value is different between the resources : you can choose the value to keep in a selection list
- When a value is set on a resource and is empty on the second one : the value is set on the resource
- All many2many relations of the 2 resources are created on the new resource.
- All the one2many relations (invoices, sale_orders, ...) are updated in order to link to the new resource.
""",
"website": "http://camptocamp.com",
"depends" : ['product'],
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [
"wizard/base_product_merge_view.xml",
],
"active": False,
"installable": False
}
| gurneyalex/stock-logistics-warehouse | __unported__/base_product_merge/base_product_merge/__openerp__.py | Python | agpl-3.0 | 2,398 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""ErrorRendezvous handler for collecting errors from multiple threads."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import sys
import threading
import time
import six
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
_UNINTERESTING_ERRORS = (errors.CancelledError,)
class ErrorRendezvous(object):
"""Resolve errors from multiple threads during TPU execution.
TPU errors can occur on the infeed or outfeed threads as well as the main
training thread.
Depending on which thread "wins" and receives the session error first, we may
end up showing users a confusing and non-actionable error message (session
cancelled) instead of a root cause (e.g. a bad filename).
The rendezvous object provides a location to capture these errors until all
threads terminate. At that point we can choose the most informative error
to report.
"""
def __init__(self, num_sources):
# string -> (message, traceback)
self._errors = {}
self._num_sources = num_sources
self._session_cancel_timer = None
def record_error(self, source, exc_info, session=None):
"""Report an exception from the given source.
If a session is passed, a timer will be registered to close it after a few
seconds. This is necessary to ensure the main training loop does not hang
if an infeed/oufeed error occurs. We sleep a few seconds to allow a more
interesting error from another thread to propagate.
Args:
source: string, source of the error
exc_info: Output from `sys.exc_info` (type, value, traceback)
session: Session to close after delay.
"""
_, value, _ = exc_info
self._errors[source] = exc_info
logging.error('Error recorded from %s: %s', source, value)
if session is not None and self._session_cancel_timer is None:
def _cancel_session():
time.sleep(5)
logging.error('Closing session due to error %s' % value)
try:
session.close()
except: # pylint: disable=bare-except
logging.error(
'\n\n\nFailed to close session after error.'
'Other threads may hang.\n\n\n')
self._session_cancel_timer = threading.Thread(target=_cancel_session,)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def record_done(self, source):
"""Mark execution source `source` as done.
If an error was originally reported from `source` it is left intact.
Args:
source: `str`, source being recorded
"""
logging.info('%s marked as finished', source)
if source not in self._errors:
self._errors[source] = None
@contextlib.contextmanager
def catch_errors(self, source, session=None):
"""Context manager to report any errors within a block."""
try:
yield
except Exception: # pylint: disable=broad-except
self.record_error(source, sys.exc_info(), session)
def raise_errors(self, timeout_sec=0):
"""Wait for up to `timeout` seconds for all error sources to finish.
Preferentially raise "interesting" errors (errors not in the
_UNINTERESTING_ERRORS) set.
Args:
timeout_sec: Seconds to wait for other error sources.
"""
for _ in range(timeout_sec):
if len(self._errors) == self._num_sources:
break
time.sleep(1)
kept_errors = [(k, v) for (k, v) in self._errors.items() if v is not None]
# First check for any interesting errors, then fall back on the session
# cancelled errors etc.
for k, (typ, value, traceback) in kept_errors:
if isinstance(value, _UNINTERESTING_ERRORS):
continue
else:
logging.warn('Reraising captured error')
six.reraise(typ, value, traceback)
for k, (typ, value, traceback) in kept_errors:
logging.warn('Reraising captured error')
six.reraise(typ, value, traceback)
| kevin-coder/tensorflow-fork | tensorflow/python/tpu/error_handling.py | Python | apache-2.0 | 4,683 |
print int("0", base=2)
print long("1", base=2)
| ArcherSys/ArcherSys | skulpt/test/run/t532.py | Python | mit | 47 |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import trunk_details
from neutron.api import extensions
# NOTE(armax): because of the API machinery, this extension must be on
# its own. This aims at providing subport information for ports that
# are parent in a trunk so that consumers of the Neutron API, like Nova
# can efficiently access trunk information for things like metadata or
# config-drive configuration.
class Trunk_details(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return trunk_details.NAME
@classmethod
def get_alias(cls):
return trunk_details.ALIAS
@classmethod
def get_description(cls):
return trunk_details.DESCRIPTION
@classmethod
def get_updated(cls):
return trunk_details.TIMESTAMP
def get_required_extensions(self):
return trunk_details.REQUIRED_EXTENSIONS or []
def get_optional_extensions(self):
return trunk_details.OPTIONAL_EXTENSIONS or []
def get_extended_resources(self, version):
if version == "2.0":
return trunk_details.RESOURCE_ATTRIBUTE_MAP
else:
return {}
| cloudbase/neutron | neutron/extensions/trunk_details.py | Python | apache-2.0 | 1,741 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import namedtuple
import json
import time
from itertools import groupby
from odoo import api, fields, models, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
from odoo.tools.float_utils import float_compare, float_round
from odoo.exceptions import UserError
from odoo.addons.stock.models.stock_move import PROCUREMENT_PRIORITIES
from operator import itemgetter
class PickingType(models.Model):
_name = "stock.picking.type"
_description = "The operation type determines the picking view"
_order = 'sequence, id'
name = fields.Char('Operation Types Name', required=True, translate=True)
color = fields.Integer('Color')
sequence = fields.Integer('Sequence', help="Used to order the 'All Operations' kanban view")
sequence_id = fields.Many2one('ir.sequence', 'Reference Sequence', required=True)
default_location_src_id = fields.Many2one(
'stock.location', 'Default Source Location',
help="This is the default source location when you create a picking manually with this operation type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the supplier location on the partner. ")
default_location_dest_id = fields.Many2one(
'stock.location', 'Default Destination Location',
help="This is the default destination location when you create a picking manually with this operation type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the customer location on the partner. ")
code = fields.Selection([('incoming', 'Vendors'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True)
return_picking_type_id = fields.Many2one('stock.picking.type', 'Operation Type for Returns')
show_entire_packs = fields.Boolean('Allow moving packs', help="If checked, this shows the packs to be moved as a whole in the Operations tab all the time, even if there was no entire pack reserved.")
warehouse_id = fields.Many2one(
'stock.warehouse', 'Warehouse', ondelete='cascade',
default=lambda self: self.env['stock.warehouse'].search([('company_id', '=', self.env.user.company_id.id)], limit=1))
active = fields.Boolean('Active', default=True)
use_create_lots = fields.Boolean(
'Create New Lots/Serial Numbers', default=True,
help="If this is checked only, it will suppose you want to create new Lots/Serial Numbers, so you can provide them in a text field. ")
use_existing_lots = fields.Boolean(
'Use Existing Lots/Serial Numbers', default=True,
help="If this is checked, you will be able to choose the Lots/Serial Numbers. You can also decide to not put lots in this operation type. This means it will create stock with no lot or not put a restriction on the lot taken. ")
show_operations = fields.Boolean(
'Show Detailed Operations', default=False,
help="If this checkbox is ticked, the pickings lines will represent detailed stock operations. If not, the picking lines will represent an aggregate of detailed stock operations.")
show_reserved = fields.Boolean(
'Show Reserved', default=True, help="If this checkbox is ticked, Odoo will show which products are reserved (lot/serial number, source location, source package).")
# Statistics for the kanban view
last_done_picking = fields.Char('Last 10 Done Pickings', compute='_compute_last_done_picking')
count_picking_draft = fields.Integer(compute='_compute_picking_count')
count_picking_ready = fields.Integer(compute='_compute_picking_count')
count_picking = fields.Integer(compute='_compute_picking_count')
count_picking_waiting = fields.Integer(compute='_compute_picking_count')
count_picking_late = fields.Integer(compute='_compute_picking_count')
count_picking_backorders = fields.Integer(compute='_compute_picking_count')
rate_picking_late = fields.Integer(compute='_compute_picking_count')
rate_picking_backorders = fields.Integer(compute='_compute_picking_count')
barcode_nomenclature_id = fields.Many2one(
'barcode.nomenclature', 'Barcode Nomenclature')
@api.one
def _compute_last_done_picking(self):
# TDE TODO: true multi
tristates = []
for picking in self.env['stock.picking'].search([('picking_type_id', '=', self.id), ('state', '=', 'done')], order='date_done desc', limit=10):
if picking.date_done > picking.date:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1})
elif picking.backorder_id:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0})
else:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1})
self.last_done_picking = json.dumps(tristates)
def _compute_picking_count(self):
# TDE TODO count picking can be done using previous two
domains = {
'count_picking_draft': [('state', '=', 'draft')],
'count_picking_waiting': [('state', 'in', ('confirmed', 'waiting'))],
'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))],
'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_late': [('scheduled_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))],
}
for field in domains:
data = self.env['stock.picking'].read_group(domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', self.ids)],
['picking_type_id'], ['picking_type_id'])
count = {
x['picking_type_id'][0]: x['picking_type_id_count']
for x in data if x['picking_type_id']
}
for record in self:
record[field] = count.get(record.id, 0)
for record in self:
record.rate_picking_late = record.count_picking and record.count_picking_late * 100 / record.count_picking or 0
record.rate_picking_backorders = record.count_picking and record.count_picking_backorders * 100 / record.count_picking or 0
def name_get(self):
""" Display 'Warehouse_name: PickingType_name' """
# TDE TODO remove context key support + update purchase
res = []
for picking_type in self:
if self.env.context.get('special_shortened_wh_name'):
if picking_type.warehouse_id:
name = picking_type.warehouse_id.name
else:
name = _('Customer') + ' (' + picking_type.name + ')'
elif picking_type.warehouse_id:
name = picking_type.warehouse_id.name + ': ' + picking_type.name
else:
name = picking_type.name
res.append((picking_type.id, name))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('name', operator, name), ('warehouse_id.name', operator, name)]
picks = self.search(domain + args, limit=limit)
return picks.name_get()
@api.onchange('code')
def onchange_picking_code(self):
if self.code == 'incoming':
self.default_location_src_id = self.env.ref('stock.stock_location_suppliers').id
self.default_location_dest_id = self.env.ref('stock.stock_location_stock').id
elif self.code == 'outgoing':
self.default_location_src_id = self.env.ref('stock.stock_location_stock').id
self.default_location_dest_id = self.env.ref('stock.stock_location_customers').id
def _get_action(self, action_xmlid):
# TDE TODO check to have one view + custo in methods
action = self.env.ref(action_xmlid).read()[0]
if self:
action['display_name'] = self.display_name
return action
def get_action_picking_tree_late(self):
return self._get_action('stock.action_picking_tree_late')
def get_action_picking_tree_backorder(self):
return self._get_action('stock.action_picking_tree_backorder')
def get_action_picking_tree_waiting(self):
return self._get_action('stock.action_picking_tree_waiting')
def get_action_picking_tree_ready(self):
return self._get_action('stock.action_picking_tree_ready')
def get_stock_picking_action_picking_type(self):
return self._get_action('stock.stock_picking_action_picking_type')
class Picking(models.Model):
_name = "stock.picking"
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = "Transfer"
_order = "priority desc, date asc, id desc"
name = fields.Char(
'Reference', default='/',
copy=False, index=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
origin = fields.Char(
'Source Document', index=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Reference of the document")
note = fields.Text('Notes')
backorder_id = fields.Many2one(
'stock.picking', 'Back Order of',
copy=False, index=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="If this shipment was split, then this field links to the shipment which contains the already processed part.")
move_type = fields.Selection([
('direct', 'As soon as possible'), ('one', 'When all products are ready')], 'Shipping Policy',
default='direct', required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="It specifies goods to be deliver partially or all at once")
state = fields.Selection([
('draft', 'Draft'), ('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Available'), ('done', 'Done')], string='Status', compute='_compute_state',
copy=False, index=True, readonly=True, store=True, track_visibility='onchange',
help=" * Draft: not confirmed yet and will not be scheduled until confirmed\n"
" * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n"
" * Waiting Availability: still waiting for the availability of products\n"
" * Partially Available: some products are available and reserved\n"
" * Ready to Transfer: products reserved, simply waiting for confirmation.\n"
" * Transferred: has been processed, can't be modified or cancelled anymore\n"
" * Cancelled: has been cancelled, can't be confirmed anymore")
group_id = fields.Many2one(
'procurement.group', 'Procurement Group',
readonly=True, related='move_lines.group_id', store=True)
priority = fields.Selection(
PROCUREMENT_PRIORITIES, string='Priority',
compute='_compute_priority', inverse='_set_priority', store=True,
# default='1', required=True, # TDE: required, depending on moves ? strange
index=True, track_visibility='onchange',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Priority for this picking. Setting manually a value here would set it as priority for all the moves")
scheduled_date = fields.Datetime(
'Scheduled Date', compute='_compute_scheduled_date', inverse='_set_scheduled_date', store=True,
index=True, track_visibility='onchange',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.")
date = fields.Datetime(
'Creation Date',
default=fields.Datetime.now, index=True, track_visibility='onchange',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Creation Date, usually the time of the order")
date_done = fields.Datetime('Date of Transfer', copy=False, readonly=True, help="Completion Date of Transfer")
location_id = fields.Many2one(
'stock.location', "Source Location Zone",
default=lambda self: self.env['stock.picking.type'].browse(self._context.get('default_picking_type_id')).default_location_src_id,
readonly=True, required=True,
states={'draft': [('readonly', False)]})
location_dest_id = fields.Many2one(
'stock.location', "Destination Location",
default=lambda self: self.env['stock.picking.type'].browse(self._context.get('default_picking_type_id')).default_location_dest_id,
readonly=True, required=True,
states={'draft': [('readonly', False)]})
move_lines = fields.One2many('stock.move', 'picking_id', string="Stock Moves", copy=True)
has_scrap_move = fields.Boolean(
'Has Scrap Moves', compute='_has_scrap_move')
picking_type_id = fields.Many2one(
'stock.picking.type', 'Operation Type',
required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
picking_type_code = fields.Selection([
('incoming', 'Vendors'),
('outgoing', 'Customers'),
('internal', 'Internal')], related='picking_type_id.code',
readonly=True)
picking_type_entire_packs = fields.Boolean(related='picking_type_id.show_entire_packs',
readonly=True)
partner_id = fields.Many2one(
'res.partner', 'Partner',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('stock.picking'),
index=True, required=True,
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]})
move_line_ids = fields.One2many('stock.move.line', 'picking_id', 'Operations')
move_line_exist = fields.Boolean(
'Has Pack Operations', compute='_compute_move_line_exist',
help='Check the existence of pack operation on the picking')
has_packages = fields.Boolean(
'Has Packages', compute='_compute_has_packages',
help='Check the existence of destination packages on move lines')
owner_id = fields.Many2one(
'res.partner', 'Owner',
states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
help="Default Owner")
printed = fields.Boolean('Printed')
is_locked = fields.Boolean(default=True, help='When the picking is not done this allows changing the '
'initial demand. When the picking is done this allows '
'changing the done quantities.')
# Used to search on pickings
product_id = fields.Many2one('product.product', 'Product', related='move_lines.product_id')
show_operations = fields.Boolean(related='picking_type_id.show_operations')
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'),
]
@api.depends('move_type', 'move_lines.state', 'move_lines.picking_id')
@api.one
def _compute_state(self):
''' State of a picking depends on the state of its related stock.move
- no moves: draft or assigned (launch_pack_operations)
- all moves canceled: cancel
- all moves done (including possible canceled): done
- All at once picking: least of confirmed / waiting / assigned
- Partial picking
- all moves assigned: assigned
- one of the move is assigned or partially available: partially available
- otherwise in waiting or confirmed state
'''
if not self.move_lines:
self.state = 'draft'
elif any(move.state == 'draft' for move in self.move_lines): # TDE FIXME: should be all ?
self.state = 'draft'
elif all(move.state == 'cancel' for move in self.move_lines):
self.state = 'cancel'
elif all(move.state in ['cancel', 'done'] for move in self.move_lines):
self.state = 'done'
else:
self.state = self.move_lines._get_relevant_state_among_moves()
@api.one
@api.depends('move_lines.priority')
def _compute_priority(self):
if self.mapped('move_lines'):
priorities = [priority for priority in self.mapped('move_lines.priority') if priority] or ['1']
self.priority = max(priorities)
else:
self.priority = '1'
@api.one
def _set_priority(self):
self.move_lines.write({'priority': self.priority})
@api.one
@api.depends('move_lines.date_expected')
def _compute_scheduled_date(self):
if self.move_type == 'direct':
self.scheduled_date = min(self.move_lines.mapped('date_expected') or [False])
else:
self.scheduled_date = max(self.move_lines.mapped('date_expected') or [False])
@api.one
def _set_scheduled_date(self):
self.move_lines.write({'date_expected': self.scheduled_date})
@api.one
def _has_scrap_move(self):
# TDE FIXME: better implementation
self.has_scrap_move = bool(self.env['stock.move'].search_count([('picking_id', '=', self.id), ('scrapped', '=', True)]))
@api.one
def _compute_move_line_exist(self):
self.move_line_exist = bool(self.move_line_ids)
@api.one
def _compute_has_packages(self):
has_packages = False
for pack_op in self.move_line_ids:
if pack_op.result_package_id:
has_packages = True
break
self.has_packages = has_packages
@api.onchange('picking_type_id', 'partner_id')
def onchange_picking_type(self):
if self.picking_type_id:
if self.picking_type_id.default_location_src_id:
location_id = self.picking_type_id.default_location_src_id.id
elif self.partner_id:
location_id = self.partner_id.property_stock_supplier.id
else:
customerloc, location_id = self.env['stock.warehouse']._get_partner_locations()
if self.picking_type_id.default_location_dest_id:
location_dest_id = self.picking_type_id.default_location_dest_id.id
elif self.partner_id:
location_dest_id = self.partner_id.property_stock_customer.id
else:
location_dest_id, supplierloc = self.env['stock.warehouse']._get_partner_locations()
self.location_id = location_id
self.location_dest_id = location_dest_id
# TDE CLEANME move into onchange_partner_id
if self.partner_id:
if self.partner_id.picking_warn == 'no-message' and self.partner_id.parent_id:
partner = self.partner_id.parent_id
elif self.partner_id.picking_warn not in ('no-message', 'block') and self.partner_id.parent_id.picking_warn == 'block':
partner = self.partner_id.parent_id
else:
partner = self.partner_id
if partner.picking_warn != 'no-message':
if partner.picking_warn == 'block':
self.partner_id = False
return {'warning': {
'title': ("Warning for %s") % partner.name,
'message': partner.picking_warn_msg
}}
@api.model
def create(self, vals):
# TDE FIXME: clean that brol
defaults = self.default_get(['name', 'picking_type_id'])
if vals.get('name', '/') == '/' and defaults.get('name', '/') == '/' and vals.get('picking_type_id', defaults.get('picking_type_id')):
vals['name'] = self.env['stock.picking.type'].browse(vals.get('picking_type_id', defaults.get('picking_type_id'))).sequence_id.next_by_id()
# TDE FIXME: what ?
# As the on_change in one2many list is WIP, we will overwrite the locations on the stock moves here
# As it is a create the format will be a list of (0, 0, dict)
if vals.get('move_lines') and vals.get('location_id') and vals.get('location_dest_id'):
for move in vals['move_lines']:
if len(move) == 3:
move[2]['location_id'] = vals['location_id']
move[2]['location_dest_id'] = vals['location_dest_id']
return super(Picking, self).create(vals)
@api.multi
def write(self, vals):
res = super(Picking, self).write(vals)
# Change locations of moves if those of the picking change
after_vals = {}
if vals.get('location_id'):
after_vals['location_id'] = vals['location_id']
if vals.get('location_dest_id'):
after_vals['location_dest_id'] = vals['location_dest_id']
if after_vals:
self.mapped('move_lines').filtered(lambda move: not move.scrapped).write(after_vals)
return res
@api.multi
def unlink(self):
self.mapped('move_lines')._action_cancel()
self.mapped('move_lines').unlink() # Checks if moves are not done
return super(Picking, self).unlink()
# Actions
# ----------------------------------------
@api.one
def action_assign_owner(self):
self.move_line_ids.write({'owner_id': self.owner_id.id})
@api.multi
def do_print_picking(self):
self.write({'printed': True})
return self.env.ref('stock.action_report_picking').report_action(self)
@api.multi
def action_confirm(self):
# call `_action_confirm` on every draft move
self.mapped('move_lines')\
.filtered(lambda move: move.state == 'draft')\
._action_confirm()
# call `_action_assign` on every confirmed move which location_id bypasses the reservation
self.filtered(lambda picking: picking.location_id.usage in ('supplier', 'inventory', 'production'))\
.filtered(lambda move: move.state == 'confirmed')\
.mapped('move_lines')._action_assign()
return True
@api.multi
def action_assign(self):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
self.filtered(lambda picking: picking.state == 'draft').action_confirm()
moves = self.mapped('move_lines').filtered(lambda move: move.state not in ('draft', 'cancel', 'done'))
if not moves:
raise UserError(_('Nothing to check the availability for.'))
moves._action_assign()
return True
@api.multi
def force_assign(self):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
self.mapped('move_lines').filtered(lambda move: move.state in ['confirmed', 'waiting', 'partially_available'])._force_assign()
return True
@api.multi
def action_cancel(self):
self.mapped('move_lines')._action_cancel()
return True
@api.multi
def action_done(self):
"""Changes picking state to done by processing the Stock Moves of the Picking
Normally that happens when the button "Done" is pressed on a Picking view.
@return: True
"""
# TDE FIXME: remove decorator when migration the remaining
# TDE FIXME: draft -> automatically done, if waiting ?? CLEAR ME
todo_moves = self.mapped('move_lines').filtered(lambda self: self.state in ['draft', 'partially_available', 'assigned', 'confirmed'])
# Check if there are ops not linked to moves yet
for pick in self:
# # Explode manually added packages
# for ops in pick.move_line_ids.filtered(lambda x: not x.move_id and not x.product_id):
# for quant in ops.package_id.quant_ids: #Or use get_content for multiple levels
# self.move_line_ids.create({'product_id': quant.product_id.id,
# 'package_id': quant.package_id.id,
# 'result_package_id': ops.result_package_id,
# 'lot_id': quant.lot_id.id,
# 'owner_id': quant.owner_id.id,
# 'product_uom_id': quant.product_id.uom_id.id,
# 'product_qty': quant.qty,
# 'qty_done': quant.qty,
# 'location_id': quant.location_id.id, # Could be ops too
# 'location_dest_id': ops.location_dest_id.id,
# 'picking_id': pick.id
# }) # Might change first element
# # Link existing moves or add moves when no one is related
for ops in pick.move_line_ids.filtered(lambda x: not x.move_id):
# Search move with this product
moves = pick.move_lines.filtered(lambda x: x.product_id == ops.product_id)
if moves: #could search move that needs it the most (that has some quantities left)
ops.move_id = moves[0].id
else:
new_move = self.env['stock.move'].create({
'name': _('New Move:') + ops.product_id.display_name,
'product_id': ops.product_id.id,
'product_uom_qty': ops.qty_done,
'product_uom': ops.product_uom_id.id,
'location_id': pick.location_id.id,
'location_dest_id': pick.location_dest_id.id,
'picking_id': pick.id,
})
ops.move_id = new_move.id
new_move._action_confirm()
todo_moves |= new_move
#'qty_done': ops.qty_done})
todo_moves._action_done()
self.write({'date_done': fields.Datetime.now()})
return True
do_transfer = action_done #TODO:replace later
@api.multi
def _check_entire_pack(self):
""" This function check if entire packs are moved in the picking"""
for picking in self:
origin_packages = picking.move_line_ids.mapped("package_id")
for pack in origin_packages:
all_in = True
packops = picking.move_line_ids.filtered(lambda x: x.package_id == pack)
keys = ['product_id', 'lot_id']
grouped_quants = {}
for k, g in groupby(sorted(pack.quant_ids, key=itemgetter(*keys)), key=itemgetter(*keys)):
grouped_quants[k] = sum(self.env['stock.quant'].concat(*list(g)).mapped('quantity'))
grouped_ops = {}
for k, g in groupby(sorted(packops, key=itemgetter(*keys)), key=itemgetter(*keys)):
grouped_ops[k] = sum(self.env['stock.move.line'].concat(*list(g)).mapped('product_qty'))
if any(grouped_quants[key] - grouped_ops.get(key, 0) != 0 for key in grouped_quants)\
or any(grouped_ops[key] - grouped_quants[key] != 0 for key in grouped_ops):
all_in = False
if all_in and packops:
packops.write({'result_package_id': pack.id})
@api.multi
def do_unreserve(self):
for move in self:
for move_line in move.move_lines:
move_line._do_unreserve()
self.write({'state': 'confirmed'})
@api.multi
def button_validate(self):
self.ensure_one()
if not self.move_lines and not self.move_line_ids:
raise UserError(_('Please add some lines to move'))
# If no lots when needed, raise error
picking_type = self.picking_type_id
no_quantities_done = all(line.qty_done == 0.0 for line in self.move_line_ids)
if picking_type.use_create_lots or picking_type.use_existing_lots:
lines_to_check = self.move_line_ids
if not no_quantities_done:
lines_to_check = lines_to_check.filtered(
lambda line: float_compare(line.qty_done, 0,
precision_rounding=line.product_uom_id.rounding)
)
for line in lines_to_check:
product = line.product_id
if product and product.tracking != 'none' and (line.qty_done == 0 or (not line.lot_name and not line.lot_id)):
raise UserError(_('You need to supply a lot/serial number for %s.') % product.name)
# In draft or with no pack operations edited yet, ask if we can just do everything
if self.state == 'draft' or no_quantities_done:
view = self.env.ref('stock.view_immediate_transfer')
wiz = self.env['stock.immediate.transfer'].create({'pick_id': self.id})
return {
'name': _('Immediate Transfer?'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.immediate.transfer',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': wiz.id,
'context': self.env.context,
}
# Check backorder should check for other barcodes
if self._check_backorder():
return self.action_generate_backorder_wizard()
self.action_done()
return
def action_generate_backorder_wizard(self):
view = self.env.ref('stock.view_backorder_confirmation')
wiz = self.env['stock.backorder.confirmation'].create({'pick_id': self.id})
return {
'name': _('Create Backorder?'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.backorder.confirmation',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': wiz.id,
'context': self.env.context,
}
def action_toggle_is_locked(self):
self.is_locked = not self.is_locked
return True
def _check_backorder(self):
self.ensure_one()
quantity_todo = {}
quantity_done = {}
for move in self.move_lines:
quantity_todo.setdefault(move.product_id.id, 0)
quantity_done.setdefault(move.product_id.id, 0)
quantity_todo[move.product_id.id] += move.product_uom_qty
quantity_done[move.product_id.id] += move.quantity_done
for ops in self.move_line_ids.filtered(lambda x: x.package_id and not x.product_id and not x.move_id):
for quant in ops.package_id.quant_ids:
quantity_done.setdefault(quant.product_id.id, 0)
quantity_done[quant.product_id.id] += quant.qty
for pack in self.move_line_ids.filtered(lambda x: x.product_id and not x.move_id):
quantity_done.setdefault(pack.product_id.id, 0)
quantity_done[pack.product_id.id] += pack.qty_done
return any(quantity_done[x] < quantity_todo.get(x, 0) for x in quantity_done)
def _create_backorder(self, backorder_moves=[]):
""" Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines.
"""
# TDE note: o2o conversion, todo multi
backorders = self.env['stock.picking']
for picking in self:
backorder_moves = backorder_moves or picking.move_lines
if self._context.get('do_only_split'):
not_done_bo_moves = backorder_moves.filtered(lambda move: move.id not in self._context.get('split', []))
else:
not_done_bo_moves = backorder_moves.filtered(lambda move: move.state not in ('done', 'cancel'))
if not not_done_bo_moves:
continue
backorder_picking = picking.copy({
'name': '/',
'move_lines': [],
'move_line_ids': [],
'backorder_id': picking.id
})
picking.message_post(body=_("Back order <em>%s</em> <b>created</b>.") % (backorder_picking.name))
not_done_bo_moves.write({'picking_id': backorder_picking.id})
if not picking.date_done:
picking.write({'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
backorder_picking.action_confirm()
backorder_picking.action_assign()
backorders |= backorder_picking
return backorders
def _put_in_pack(self):
package = False
for pick in self:
operations = pick.move_line_ids.filtered(lambda o: o.qty_done > 0 and not o.result_package_id)
operation_ids = self.env['stock.move.line']
if operations:
package = self.env['stock.quant.package'].create({})
for operation in operations:
if float_compare(operation.qty_done, operation.product_qty, precision_rounding=operation.product_uom_id.rounding) >= 0:
operation_ids |= operation
else:
quantity_left_todo = float_round(
operation.product_qty - operation.qty_done,
precision_rounding=operation.product_uom_id.rounding,
rounding_method='UP')
new_operation = operation.copy(
default={'product_uom_qty': operation.qty_done, 'qty_done': operation.qty_done})
operation.write({'product_uom_qty': quantity_left_todo, 'qty_done': 0.0})
operation_ids |= new_operation
operation_ids.write({'result_package_id': package.id})
else:
raise UserError(_('Please process some quantities to put in the pack first!'))
return package
def put_in_pack(self):
return self._put_in_pack()
def button_scrap(self):
self.ensure_one()
return {
'name': _('Scrap'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.scrap',
'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
'type': 'ir.actions.act_window',
'context': {'default_picking_id': self.id, 'product_ids': self.move_line_ids.mapped('product_id').ids},
'target': 'new',
}
def action_see_move_scrap(self):
self.ensure_one()
action = self.env.ref('stock.action_stock_scrap').read()[0]
scraps = self.env['stock.scrap'].search([('picking_id', '=', self.id)])
action['domain'] = [('id', 'in', scraps.ids)]
return action
def action_see_packages(self):
self.ensure_one()
action = self.env.ref('stock.action_package_view').read()[0]
packages = self.move_line_ids.mapped('result_package_id')
action['domain'] = [('id', 'in', packages.ids)]
action['context'] = {'picking_id': self.id}
return action
| richard-willowit/odoo | addons/stock/models/stock_picking.py | Python | gpl-3.0 | 36,588 |
#!/usr/bin/env python
"""
$push is similar to $addToSet. The difference is that rather than accumulating only unique values
it aggregates all values into an array.
Using an aggregation query, count the number of tweets for each user. In the same $group stage,
use $push to accumulate all the tweet texts for each user. Limit your output to the 5 users
with the most tweets.
Your result documents should include only the fields:
"_id" (screen name of user),
"count" (number of tweets found for the user),
"tweet_texts" (a list of the tweet texts found for the user).
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [
{'$group': {'_id' : 'user.screen_name',
'count': {'$sum': 1},
'tweet_texts': {'$push': 'text' }}},
{'$sort': {'count':-1}},
{'$limit':5}
]
return pipeline
def aggregate(db, pipeline):
result = db.tweets.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
assert len(result["result"]) == 5
assert result["result"][0]["count"] > result["result"][4]["count"]
import pprint
pprint.pprint(result) | napjon/moocs_solution | DataWranglingMongoDB/Lesson5/using_push.py | Python | mit | 2,299 |
from xdict.prepare_quotes_token_machine import prepare_quotes_token_machine
def tokenize_quotes(machine):
curr_state = "INIT"
rslt = ''
for i in range(0,machine.orig_str.__len__()):
input_symbol = machine.orig_str[i]
action,next_state,trigger_checker = machine.search(curr_state,input_symbol)
#print('----------')
#print(curr_state,trigger_checker,input_symbol,action,next_state)
if(action == machine.do_replace):
ch = action(input_symbol)
#print(ch)
elif(action == machine.do_throw):
ch = ''
action(curr_state,trigger_checker,input_symbol)
else:
ch = input_symbol
#rslt = ''.join((rslt,ch))
rslt = rslt +ch
curr_state = next_state
return(rslt)
#######################
def convert_token_in_quote(j_str,**kwargs):
'''
#1. use html.escape to escape all quote-operators appeared in single-quote or double-quote
#2. use html.escape to escape all paired-operators appeared in single-quote or double-quote : such as "{,},[,],(,)"
#3. use html.escape to escape all other operators appeared in single-quote or double-quote : such as : ':',','
#4. use html_escape to escape seperators-operators appeared in single-quote or double-quote: such as '\n'
>>> from xdict.jprint import convert_token_in_quote
>>> from xdict.jprint import help
>>>
>>> convert_token_in_quote('"a b":"cd"')
'"a b":"cd"'
>>> import html
>>> html.unescape('"a b":"cd"')
'"a b":"cd"'
>>> convert_token_in_quote('"a b":cd')
'"a b":cd'
>>>
>>> #help(convert_token_in_quote)
convert_token_in_quote('<a b>:"cd"',quotes_pairs_dict={1: ('"', '"'), 2: ("<", ">")})
'<a b>:"cd"'
'''
machine = prepare_quotes_token_machine(j_str,**kwargs)
rslt = tokenize_quotes(machine)
return(rslt)
| ihgazni2/dlixhict-didactic | xdict/tokenize_quotes.py | Python | mit | 1,990 |
#!/usr/bin/env python
"""
configure DCommands
"""
import sys
import types
import DIRAC
from DIRAC.Core.Base import Script
from COMDIRAC.Interfaces import DConfig, createMinimalConfig, critical, guessProfilesFromCS
from COMDIRAC.Interfaces import getDNFromProxy
class Params:
def __init__ ( self ):
self.minimal = False
self.guessProfile = False
def setMinimal( self, arg ):
self.minimal = True
def getMinimal( self ):
return self.minimal
def setGuessProfiles( self, arg ):
self.guessProfile = True
def getGuessProfiles( self ):
return self.guessProfile
params = Params()
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [options] [section[.option[=value]]]...' % Script.scriptName,
'Arguments:',
' without argument: display whole configuration content',
'++ OR ++',
' section: display all options in section',
'++ OR ++',
' section.option: display option',
'++ OR ++',
' section.option=value: set option value', ] )
)
Script.registerSwitch( "m", "minimal", "verify and fill minimal configuration", params.setMinimal )
Script.registerSwitch( "", "guess", "", params.setGuessProfiles )
Script.disableCS()
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if params.minimal:
createMinimalConfig()
dconfig = DConfig()
modified = False
if params.getGuessProfiles():
Script.enableCS()
result = getDNFromProxy()
if not result["OK"]:
print "ERROR: %s" % result["Message"]
DIRAC.exit( 2 )
dn = result["Value"]
result = guessProfilesFromCS( dn )
if not result["OK"]:
print "ERROR: %s" % result["Message"]
DIRAC.exit( 2 )
profiles = result["Value"]
for p, pOpts in profiles.items():
for opt, val in pOpts.items():
modified |= dconfig.existsOrCreate( p, opt, val )
if modified and not args:
dconfig.write()
if not args:
sections = dconfig.sections()
for s in sections:
retVal = dconfig.get( s, None )
if not retVal[ "OK" ]:
critical( retVal[ "Message" ] )
print "[%s]" % s
for o, v in retVal["Value" ]:
print o, "=", v
print
DIRAC.exit( 0 )
for arg in args:
value = None
section = None
option = None
if "=" in arg:
arg, value = arg.split( "=", 1 )
if "." in arg:
section, option = arg.split( ".", 1 )
else:
section = arg
if value != None:
dconfig.set( section, option, value )
modified = True
else:
retVal = dconfig.get( section, option )
if not retVal[ "OK" ]: critical( retVal[ "Message" ] )
ret = retVal[ "Value" ]
if type( ret ) == types.ListType:
print "[%s]" % section
for o, v in ret:
print o, "=", v
else:
print option, "=", ret
if modified:
dconfig.write()
| pigay/COMDIRAC | Interfaces/scripts/dconfig.py | Python | gpl-3.0 | 3,151 |
'''This module contains some glue code encapsulating a "main" process.
The code here is aimed at wrapping the most common tasks involved in creating
and, especially, training a neural network model.
'''
import climate
import datetime
import downhill
import os
import warnings
from . import graph
from . import trainer
logging = climate.get_logger(__name__)
class Experiment:
'''This class encapsulates tasks for training and evaluating a network.
Parameters
----------
model : :class:`Network <graph.Network>` or str
A specification for obtaining a model. If a string is given, it is
assumed to name a file containing a pickled model; this file will be
loaded and used. If a network instance is provided, it will be used
as the model. If a callable (such as a subclass) is provided, it
will be invoked using the provided keyword arguments to create a
network instance.
'''
def __init__(self, network, *args, **kwargs):
if isinstance(network, str) and os.path.isfile(network):
self.load(network)
elif isinstance(network, graph.Network):
self.network = network
else:
assert network is not graph.Network, \
'use a concrete theanets.Network subclass ' \
'like theanets.{Autoencoder,Regressor,...}'
self.network = network(*args, **kwargs)
def create_trainer(self, train, algo='rmsprop'):
'''Create a trainer.
Additional keyword arguments are passed directly to the trainer.
Parameters
----------
train : str
A string describing a trainer to use.
algo : str
A string describing an optimization algorithm.
Returns
-------
trainer : :class:`Trainer <trainer.Trainer>`
A trainer instance to alter the parameters of our network.
'''
train = train.lower()
if train == 'sample':
return trainer.SampleTrainer(self.network)
if train.startswith('layer') or train.startswith('sup'):
return trainer.SupervisedPretrainer(algo, self.network)
if train.startswith('pre') or train.startswith('unsup'):
return trainer.UnsupervisedPretrainer(algo, self.network)
return trainer.DownhillTrainer(train, self.network)
def create_dataset(self, data, **kwargs):
'''Create a dataset for this experiment.
Parameters
----------
data : sequence of ndarray or callable
The values that you provide for data will be encapsulated inside a
:class:`Dataset <downhill.Dataset>` instance; see that class for
documentation on the types of things it needs. In particular, you
can currently pass in either a list/array/etc. of data, or a
callable that generates data dynamically.
Returns
-------
data : :class:`Dataset <downhill.Dataset>`
A dataset capable of providing mini-batches of data to a training
algorithm.
'''
default_axis = 0
if not callable(data) and not callable(data[0]) and len(data[0].shape) == 3:
default_axis = 1
name = kwargs.get('name', 'dataset')
b, i, s = 'batch_size', 'iteration_size', '{}_batches'.format(name)
return downhill.Dataset(
data,
name=name,
batch_size=kwargs.get(b, 32),
iteration_size=kwargs.get(i, kwargs.get(s)),
axis=kwargs.get('axis', default_axis))
def train(self, *args, **kwargs):
'''Train the network until the trainer converges.
All arguments are passed to :func:`itertrain`.
Returns
-------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
monitors = None
for monitors in self.itertrain(*args, **kwargs):
pass
return monitors
def itertrain(self, train, valid=None, algorithm='rmsprop', **kwargs):
'''Train our network, one batch at a time.
This method yields a series of ``(train, valid)`` monitor pairs. The
``train`` value is a dictionary mapping names to monitor values
evaluated on the training dataset. The ``valid`` value is also a
dictionary mapping names to values, but these values are evaluated on
the validation dataset.
Because validation might not occur every training iteration, the
validation monitors might be repeated for multiple training iterations.
It is probably most helpful to think of the validation monitors as being
the "most recent" values that have been computed.
After training completes, the network attribute of this class will
contain the trained network parameters.
Parameters
----------
train : sequence of ndarray or :class:`downhill.Dataset`
A dataset to use when training the network. If this is a
``downhill.Dataset`` instance, it will be used directly as the
training datset. If it is another type, like a numpy array, it will
be converted to a ``downhill.Dataset`` and then used as the training
set.
valid : sequence of ndarray or :class:`downhill.Dataset`, optional
If this is provided, it will be used as a validation dataset. If not
provided, the training set will be used for validation. (This is not
recommended!)
algorithm : str or list of str, optional
One or more optimization algorithms to use for training our network.
If not provided, RMSProp will be used.
Yields
------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
# set up datasets
if valid is None:
valid = train
if not isinstance(valid, downhill.Dataset):
valid = self.create_dataset(valid, name='valid', **kwargs)
if not isinstance(train, downhill.Dataset):
train = self.create_dataset(train, name='train', **kwargs)
# set up training algorithm(s)
if 'optimize' in kwargs:
warnings.warn(
'please use the "algorithm" keyword arg instead of "optimize"',
DeprecationWarning)
algorithm = kwargs.pop('optimize')
if isinstance(algorithm, str):
algorithm = algorithm.split()
# set up auto-saving if enabled
progress = kwargs.get('save_progress')
timeout = kwargs.get('save_every', 0)
if timeout < 0: # timeout < 0 is in minutes instead of iterations.
timeout *= 60
# loop over trainers, saving every N minutes/iterations if enabled
for algo in algorithm:
if not callable(getattr(algo, 'itertrain', None)):
algo = self.create_trainer(algo)
start = datetime.datetime.now()
for i, monitors in enumerate(algo.itertrain(train, valid, **kwargs)):
yield monitors
now = datetime.datetime.now()
elapsed = (now - start).total_seconds()
if i and progress and (
(timeout < 0 and elapsed > -timeout) or
(timeout > 0 and i % int(timeout) == 0)):
self.save(progress)
start = now
def save(self, path):
'''Save the current network to a pickle file on disk.
Parameters
----------
path : str
Location of the file to save the network.
'''
self.network.save(path)
def load(self, path):
'''Load a saved network from a pickle file on disk.
This method sets the ``network`` attribute of the experiment to the
loaded network model.
Parameters
----------
filename : str
Load the keyword arguments and parameters of a network from a pickle
file at the named path. If this name ends in ".gz" then the input
will automatically be gunzipped; otherwise the input will be treated
as a "raw" pickle.
Returns
-------
network : :class:`Network <graph.Network>`
A newly-constructed network, with topology and parameters loaded
from the given pickle file.
'''
self.network = graph.Network.load(path)
return self.network
| masterkeywikz/seq2graph | src/theanets-0.6.1/theanets/main.py | Python | mit | 9,363 |
from django.db import models
from epg_core import models as core_models
class File(models.Model):
name = models.CharField(max_length=100)
filename = models.CharField(max_length=255)
def __unicode__(self):
return "%s (%s)" % (self.name, self.filename)
class XMLTVChannel(models.Model):
core_channel = models.ForeignKey(core_models.Channel)
xmltv_id = models.CharField(max_length=150)
file = models.ForeignKey('File')
def __unicode__(self):
return "%s %s" % (self.xmltv_id, self.core_channel.name)
| ansmirnov/epg-manager | epg_manager/import_xmltv/models.py | Python | gpl-2.0 | 545 |
# coding: utf-8
"""
Fail: read it https://developers.google.com/appengine/docs/python/gettingstartedpython27/devenvironment
ide - https://developers.google.com/eclipse/docs/running_and_debugging_2_0
https://developers.google.com/appengine/articles/eclipse !!
Сервисы на gae
https://developers.google.com/api-client-library/python/guide/google_app_engine
"""
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, webapp World!')
app = webapp2.WSGIApplication([('/', MainPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main() | zaqwes8811/coordinator-tasks | projects/thin-client-impl/gae-python/gae_test/__init__.py | Python | apache-2.0 | 760 |
from flask.ext.script import Manager, Server
from app import app
manager = Manager(app)
manager.add_command("runserver",
Server(host='127.0.0.1',port=5000, use_debugger=True))
if __name__ == '__main__':
manager.run() | Informationretrieval2016/Furnito_web | manage.py | Python | apache-2.0 | 231 |
from flask_restful import reqparse
from flask_restful import Resource
from rpi_pwm_server.pwm import PWMCtl
import logging
LOG = logging.getLogger(__name__)
class PWMResourse(Resource):
def get(self, *args, **kwargs):
return self._get_current_state()
def post(self, *args, **kwargs):
parser = reqparse.RequestParser()
parser.add_argument('mode', required=True)
parser.add_argument('value')
args = parser.parse_args()
mode = args.get('mode')
pwm_value = args.get('value')
PWMCtl.setmode(mode, pwm_value=pwm_value)
return self._get_current_state(), 200
@staticmethod
def _get_current_state():
return dict(mode=PWMCtl.get_mode_display_value(),
value=PWMCtl.get_pwm_display_value())
| alanquillin/rpi_pwm_server | rpi_pwm_server/wsgi/pwm.py | Python | gpl-3.0 | 806 |
#!/usr/bin/env python2.7
import subprocess
import sys
import pexpect
import unittest
import shutil
import os
import hashlib
def openssl(args):
with open(os.devnull, "w") as devnull:
proc = subprocess.Popen(['openssl'] + args, stdin=subprocess.PIPE, stderr=devnull)
for i in range(6):
proc.stdin.write("\n")
proc.stdin.close()
proc.communicate()
def setupSkyped():
try:
shutil.rmtree("t/skyped")
except OSError:
pass
os.makedirs("t/skyped")
cwd = os.getcwd()
os.chdir("t/skyped")
try:
shutil.copyfile("../../skyped.cnf", "skyped.cnf")
openssl(['req', '-new', '-x509', '-days', '365', '-nodes', '-config', 'skyped.cnf', '-out', 'skyped.cert.pem', '-keyout', 'skyped.key.pem'])
with open("skyped.conf", "w") as sock:
sock.write("[skyped]\n")
sock.write("username = alice\n")
sock.write("password = %s\n" % hashlib.sha1("foo").hexdigest())
sock.write("cert = %s/skyped.cert.pem\n" % os.getcwd())
sock.write("key = %s/skyped.key.pem\n" % os.getcwd())
sock.write("port = 2727\n")
finally:
os.chdir(cwd)
class Test(unittest.TestCase):
def mock(self, name):
with open("t/skyped.log", "w") as skyped_log,\
open("t/pexpect.log", "w") as pexpect_log:
skyped = subprocess.Popen([sys.executable, "skyped.py",
"-c", "t/skyped/skyped.conf", "-n", "-d", "-m", "t/%s-skyped.mock" % name],
stdout=skyped_log, stderr=subprocess.STDOUT)
try:
bitlbee = pexpect.spawn('../../bitlbee', ['-d', 't/bitlbee'], logfile=pexpect_log)
if os.environ.get('ATTACH_GDB'):
subprocess.Popen(['gdb', '-batch-silent',
'-ex', 'set logging overwrite on',
'-ex', 'set logging file t/gdb-%s.log' % bitlbee.pid,
'-ex', 'set logging on',
'-ex', 'handle all pass nostop noprint',
'-ex', 'handle SIGSEGV pass stop print',
'-ex', 'set pagination 0',
'-ex', 'continue',
'-ex', 'backtrace full',
'-ex', 'info registers',
'-ex', 'thread apply all backtrace',
'-ex', 'quit',
'../../bitlbee', str(bitlbee.pid) ])
bitlbee_mock = open("t/%s-bitlbee.mock" % name)
for i in bitlbee_mock.readlines():
line = i.strip()
if line.startswith(">> "):
bitlbee.expect_exact(line[3:], timeout=10)
elif line.startswith("<< "):
bitlbee.sendline(line[3:])
bitlbee_mock.close()
bitlbee.close()
finally:
skyped.terminate()
skyped.communicate()
def setUp(self):
try:
shutil.rmtree("t/bitlbee")
except OSError:
pass
os.makedirs("t/bitlbee")
def testMsg(self):
self.mock("msg")
def testLogin(self):
self.mock("login")
def testInfo(self):
self.mock("info")
def testCall(self):
self.mock("call")
def testCallFailed(self):
self.mock("call-failed")
def testAddYes(self):
self.mock("add-yes")
def testAddedYes(self):
self.mock("added-yes")
def testAddedNo(self):
self.mock("added-no")
def testGroupchatInvited(self):
self.mock("groupchat-invited")
def testGroupchatInvite(self):
self.mock("groupchat-invite")
def testGroupchatLeave(self):
self.mock("groupchat-leave")
def testGroupchatMsg(self):
self.mock("groupchat-msg")
def testGroupchatTopic(self):
self.mock("groupchat-topic")
def testCalledYes(self):
self.mock("called-yes")
def testCalledNo(self):
self.mock("called-no")
def testFiletransfer(self):
self.mock("filetransfer")
def testGroupRead(self):
self.mock("group-read")
def testGroupAdd(self):
self.mock("group-add")
def testCtcpHelp(self):
self.mock("ctcp-help")
def testSetMoodText(self):
self.mock("set-mood-text")
def testAwaySet(self):
self.mock("away-set")
if __name__ == '__main__':
setupSkyped()
unittest.main()
| meh/bitlbee | protocols/skype/test.py | Python | gpl-2.0 | 3,669 |
import logging
from django.conf.urls import patterns, url
from django.contrib.gis import admin
from django.contrib.messages.api import get_messages
from django.contrib.sites.models import Site
from django.template.response import TemplateResponse
from location.models import (
LocationConsumerSettings,
LocationSnapshot,
LocationSource,
LocationSourceType
)
logger = logging.getLogger('location.admin')
class LocationSourceAdmin(admin.options.OSMGeoAdmin):
list_display = (
'created',
'name',
'user',
'type',
'active'
)
list_filter = [
'type'
]
ordering = ['-created']
raw_id_fields = ('user', )
def get_urls(self):
urls = super(LocationSourceAdmin, self).get_urls()
urls = patterns(
'location.views',
url(
'configure-accounts/',
self.admin_site.admin_view(self.configure_accounts),
name='location_configure_accounts'
)
) + urls
return urls
def configure_accounts(self, request):
logger.info(self.model._meta)
logger.info(self.model._meta.app_label)
return TemplateResponse(
request,
'admin/location/configure.html', {
'messages': get_messages(request),
'title': 'Configure Accounts',
'domain': Site.objects.get_current().domain
}
)
class LocationSnapshotAdmin(admin.options.OSMGeoAdmin):
list_display = (
'date',
'neighborhood',
'nearest_city',
)
date_hierarchy = 'date'
raw_id_fields = ('source', )
list_per_page = 25
ordering = ['-date']
list_filter = [
'source__type'
]
search_fields = [
'source__type',
'source__user__username',
]
def nearest_city(self, obj):
city = obj.city
if city:
return city
nearest = obj.find_nearest_city()
if nearest.distance is not None:
return "%s (%d mi away)" % (
nearest,
nearest.distance.mi
)
return nearest
class LocationConsumerSettingsAdmin(admin.options.OSMGeoAdmin):
raw_id_fields = ('user', )
admin.site.register(LocationSourceType)
admin.site.register(LocationSource, LocationSourceAdmin)
admin.site.register(LocationSnapshot, LocationSnapshotAdmin)
admin.site.register(LocationConsumerSettings, LocationConsumerSettingsAdmin)
| coddingtonbear/django-location | location/admin.py | Python | mit | 2,517 |
from base import *
DOMAIN = "test295"
DOESNT_EXIST = "This_does_not_exist"
CONTENT_404 = "Not found. But it does exist now!"
CONF = """
vserver!295!nick = %s
vserver!295!document_root = %s
vserver!295!rule!1!match = default
vserver!295!rule!1!handler = cgi
vserver!295!error_handler = error_redir
vserver!295!error_handler!404!show = 0
vserver!295!error_handler!404!url = /404.sh
"""
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Custom Error return code"
self.request = "GET /%s HTTP/1.0\r\n" % (DOESNT_EXIST) + \
"Host: %s\r\n" % (DOMAIN)
self.expected_error = 200
self.expected_content = CONTENT_404
def Prepare (self, www):
d = self.Mkdir (www, DOMAIN)
self.WriteFile (d, "404.sh", 0755, "#!/bin/sh\necho -e 'HTTP/1.0 200 OK\r\nStatus: 200 OK\r\n\r\n" + CONTENT_404 + "'")
self.conf = CONF % (DOMAIN, d)
| lmcro/webserver | qa/295-custom-error-cgi.py | Python | gpl-2.0 | 988 |
# -*- coding: utf-8 -*-
'''
Container Example
==============
This example shows how to add a container to our screen.
A container is simply an empty place on the screen which
could be filled with any other content from a .kv file.
'''
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
import os
import kivy
kivy.require('1.8.0')
class RootWidget(BoxLayout):
'''Create a controller that receives a custom widget from the kv lang file.
Add an action to be called from a kv file.
'''
container = ObjectProperty(None)
class EzsApp(App):
'''This is the app itself'''
# setting the path of our directory
path = os.path.abspath(os.path.dirname("."))
error = False
def build(self):
'''This method loads the root.kv file automatically
:rtype: none
'''
# loading the content of root.kv
self.root = Builder.load_file(os.path.join(self.path, 'kv', 'root.kv'))
return self.root
def next_screen(self, screen):
'''Clear container and load the given screen object from file in kv
folder.
:param screen: name of the screen object made from the loaded .kv file
:type screen: str
:rtype: none
'''
filename = screen + '.kv'
# unload the content of the .kv file
# reason: it could have data form previous calls
setattr(self, screen, Builder.unload_file(
os.path.join(self.path, 'kv', filename)))
# load the content of the .kv file
setattr(self, screen, Builder.load_file(
os.path.join(self.path, 'kv', filename)))
# clear the container
self.root.container.clear_widgets()
# add the content of the .kv file to the container
self.root.container.add_widget(getattr(self, screen))
if __name__ == '__main__':
'''Start the application'''
EZS = EzsApp()
EZS.run()
| bliz937/kivy | examples/container/main.py | Python | mit | 1,990 |
from setuptools import setup
setup(name='tumblrtokens',
version='0.3',
description='Abstracts out creating Oauth authentication tokens for Tumblr.',
url='https://github.com/mkell43/tumblrtokens',
author='Michael Keller',
author_email='[email protected]',
license='MIT',
packages=['tumblrtokens'],
install_requires=['oauth2'],
zip_safe=False) | mkell43/tumblrtokens | setup.py | Python | mit | 397 |
from nose2.compat import unittest
from nose2.plugins import layers
from nose2 import events, loader, session
from nose2.tests._common import TestCase
class TestLayers(TestCase):
tags = ['unit']
def setUp(self):
self.session = session.Session()
self.loader = loader.PluggableTestLoader(session=self.session)
self.session.testLoader = self.loader
self.plugin = layers.Layers(session=self.session)
def test_simple_layer_inheritance(self):
class L1(object):
pass
class L2(L1):
pass
class T1(unittest.TestCase):
layer = L1
def test(self):
pass
class T2(unittest.TestCase):
layer = L2
def test(self):
pass
suite = unittest.TestSuite([T2('test'), T1('test')])
event = events.StartTestRunEvent(None, suite, None, 0, None)
self.plugin.startTestRun(event)
expect = [['test (nose2.tests.unit.test_layers_plugin.T1)',
['test (nose2.tests.unit.test_layers_plugin.T2)']]]
self.assertEqual(self.names(event.suite), expect)
def test_multiple_inheritance(self):
class L1(object):
pass
class L2(L1):
pass
class L3(L1):
pass
class T1(unittest.TestCase):
layer = L1
def test(self):
pass
class T2(unittest.TestCase):
layer = L2
def test(self):
pass
class T3(unittest.TestCase):
layer = L3
def test(self):
pass
suite = unittest.TestSuite([T2('test'), T1('test'), T3('test')])
event = events.StartTestRunEvent(None, suite, None, 0, None)
self.plugin.startTestRun(event)
expect = [['test (nose2.tests.unit.test_layers_plugin.T1)',
['test (nose2.tests.unit.test_layers_plugin.T2)'],
['test (nose2.tests.unit.test_layers_plugin.T3)']]]
self.assertEqual(self.names(event.suite), expect)
def test_deep_inheritance(self):
class L1(object):
pass
class L2(L1):
pass
class L3(L1):
pass
class L4(L2, L1):
pass
class L5(L4):
pass
class T1(unittest.TestCase):
layer = L1
def test(self):
pass
class T2(unittest.TestCase):
layer = L2
def test(self):
pass
class T3(unittest.TestCase):
layer = L3
def test(self):
pass
class T4(unittest.TestCase):
layer = L4
def test(self):
pass
class T5(unittest.TestCase):
layer = L5
def test(self):
pass
suite = unittest.TestSuite([T2('test'), T1('test'), T3('test'),
T4('test'), T5('test')])
event = events.StartTestRunEvent(None, suite, None, 0, None)
self.plugin.startTestRun(event)
expect = [['test (nose2.tests.unit.test_layers_plugin.T1)',
['test (nose2.tests.unit.test_layers_plugin.T2)',
['test (nose2.tests.unit.test_layers_plugin.T4)',
['test (nose2.tests.unit.test_layers_plugin.T5)']]],
['test (nose2.tests.unit.test_layers_plugin.T3)']]]
self.assertEqual(self.names(event.suite), expect)
def test_mixed_layers_no_layers(self):
class L1(object):
pass
class L2(L1):
pass
class T1(unittest.TestCase):
layer = L1
def test(self):
pass
class T2(unittest.TestCase):
layer = L2
def test(self):
pass
class T3(unittest.TestCase):
def test(self):
pass
suite = unittest.TestSuite([T2('test'), T1('test'), T3('test')])
event = events.StartTestRunEvent(None, suite, None, 0, None)
self.plugin.startTestRun(event)
expect = ['test (nose2.tests.unit.test_layers_plugin.T3)',
['test (nose2.tests.unit.test_layers_plugin.T1)',
['test (nose2.tests.unit.test_layers_plugin.T2)']]]
self.assertEqual(self.names(event.suite), expect)
def test_ordered_layers(self):
class L1(object):
pass
class L2(L1):
position = 1
class L3(L1):
position = 2
class L4(L1):
position = 3
class L5(L2):
position = 4
class T1(unittest.TestCase):
layer = L1
def test(self):
pass
class T2(unittest.TestCase):
layer = L2
def test(self):
pass
class T3(unittest.TestCase):
layer = L3
def test(self):
pass
class T4(unittest.TestCase):
layer = L4
def test(self):
pass
class T5(unittest.TestCase):
layer = L5
def test(self):
pass
suite = unittest.TestSuite([T2('test'), T1('test'),
T3('test'), T4('test'), T5('test')])
event = events.StartTestRunEvent(None, suite, None, 0, None)
self.plugin.startTestRun(event)
expect = [['test (nose2.tests.unit.test_layers_plugin.T1)',
['test (nose2.tests.unit.test_layers_plugin.T2)',
['test (nose2.tests.unit.test_layers_plugin.T5)', ]],
['test (nose2.tests.unit.test_layers_plugin.T3)', ],
['test (nose2.tests.unit.test_layers_plugin.T4)', ]]]
self.assertEqual(self.names(event.suite), expect)
def test_mixin_inheritance(self):
class L1(object):
pass
class L2(object): # a mixin, doesn't share a base w/L1
pass
class L3(L1):
pass
class L4(L3):
pass
class L5(L4):
pass
class L6(L2):
mixins = (L4,)
class T1(unittest.TestCase):
layer = L1
def test(self):
pass
class T3(unittest.TestCase):
layer = L3
def test(self):
pass
class T4(unittest.TestCase):
layer = L4
def test(self):
pass
class T5(unittest.TestCase):
layer = L5
def test(self):
pass
class T6(unittest.TestCase):
layer = L6
def test(self):
pass
suite = unittest.TestSuite([T6('test'), T1('test'),
T3('test'), T4('test'), T5('test')])
event = events.StartTestRunEvent(None, suite, None, 0, None)
self.plugin.startTestRun(event)
expect = [['test (nose2.tests.unit.test_layers_plugin.T1)',
['test (nose2.tests.unit.test_layers_plugin.T3)',
['test (nose2.tests.unit.test_layers_plugin.T4)',
[['test (nose2.tests.unit.test_layers_plugin.T6)']],
['test (nose2.tests.unit.test_layers_plugin.T5)', ]]]]]
self.assertEqual(self.names(event.suite), expect)
def names(self, suite):
return [n for n in self.iternames(suite)]
def iternames(self, suite):
for t in suite:
if isinstance(t, unittest.TestCase):
yield str(t)
else:
yield [n for n in self.iternames(t)]
def _listset(self, l):
n = set([])
for t in l:
if isinstance(t, list):
n.add(self._listset(t))
else:
n.add(t)
return frozenset(n)
| dhenrygithub/QGIS | python/ext-libs/nose2/tests/unit/test_layers_plugin.py | Python | gpl-2.0 | 7,967 |
#!/usr/bin/python
from ConfigParser import SafeConfigParser
import os
import subprocess
import sys
import shlex
import shutil
import os.path
import glob
from MapBuilder import MapBuilder
from ArgParser import parse_args
class Config:
def __init__(self, config_name):
self.config_name = config_name
self.parser = SafeConfigParser()
self.config = {}
self.section = 'progress'
def exists(self):
return os.path.isfile(self.config_name)
def delete(self):
try:
os.remove(config_file_name)
except OSError:
pass
def set(self, key, value):
if not self.exists():
self.parser.add_section(self.section)
config_file = open(self.config_name, 'w+')
self.parser.write(config_file)
config_file.close()
with open(self.config_name, 'w') as config_file:
self.config[key] = value
self.parser.set('progress', str(key), str(value))
self.parser.write(config_file)
def get(self, key, type='bool', default=False):
if type == 'bool':
try:
self.parser.read(self.config_name)
val = self.parser.get(self.section, key) == 'True'
except:
self.set(key, str(default))
return
else:
raise TypeError
self.config[key] = val
if __name__ == '__main__':
if len(sys.argv) < 2:
print """Usage:
download.py -d -f folder/
-d = dry-run
-f = force"""
sys.exit(-1)
local_folder = '/scr/data/' + sys.argv[-1]
remote_folder = sys.argv[-1]
try:
os.mkdir(local_folder)
except OSError:
pass
config_file_name = local_folder + '/progress.ini'
configurator = Config(config_file_name)
if '-f' in sys.argv:
configurator.delete()
if configurator.exists():
configurator.get('downloaded')
configurator.get('organized')
configurator.get('map')
configurator.get('multilane')
configurator.get('planefitting')
configurator.get('sync')
print configurator.config
if configurator.config['downloaded'] == False:
cmd = """rsync --progress -a -L --prune-empty-dirs --exclude="*_frames/" \
--exclude="*_radar" --include="*_frames.tar.gz" \
--include="*2.avi" --include="*.out" --include="params.ini" \
--include="*lanes.pickle" --filter="-! */" \
/scail/group/deeplearning/driving_data/q50_data/{remote} \
/scr/data/""".format(
remote=remote_folder)
print cmd
tokens = shlex.split(cmd)
if '-d' in sys.argv:
tokens.insert(1, '--dry-run')
print 'DRY RUN'
subprocess.call(tokens)
if not '-d' in sys.argv:
configurator.set('downloaded', True)
if configurator.config['organized'] == False:
for video in glob.glob(local_folder + '/split_0*2.avi'):
organized_folder = video.replace('split_0_', '').replace('2.avi', '')
try:
os.mkdir(organized_folder)
except OSError:
pass
tokens = organized_folder.split('/')
tokens[-1] = '*' + tokens[-1] + '*'
all_file_glob = '/'.join(tokens)
all_files = glob.glob(all_file_glob)
folder_name = organized_folder.split('/')[-1]
for file in all_files:
file_parts = file.split('/')
if file_parts[-1] != folder_name:
file_parts.insert(-1, folder_name)
old_file = file
new_file = '/'.join(file_parts)
print old_file, new_file
shutil.move(old_file, new_file)
if '.tar.gz' in new_file:
untar_cmd = 'tar xf %s -C %s' % (new_file,
organized_folder)
subprocess.call(untar_cmd.split())
os.remove(new_file)
params = local_folder + '/params.ini'
folder_params = local_folder + '/' + folder_name + '/params.ini'
shutil.copy(params, folder_params)
configurator.set('organized', True)
if configurator.config['map'] == False:
for run in sorted(glob.glob(local_folder + '/*/')):
if os.path.isdir(run):
print run
base = run.split('/')[-2]
temp_vid = base + '2.avi'
args = parse_args(run, temp_vid)
mb = MapBuilder(args, 1, 600, 0.5, 0.1)
output = run + '/' + base + '_bg.npz'
if not os.path.isfile(output):
mb.buildMap(['no-trees'])
mb.exportData(output)
output = run + '/' + base + '_ground.npz'
if not os.path.isfile(output):
mb.buildMap(['no-trees', 'ground'])
mb.exportData(output)
configurator.set('map', True)
if configurator.config['multilane'] == False:
for run in sorted(glob.glob(local_folder + '/*/')):
if os.path.isdir(run):
print run
num_lanes_file_name = run + '/num_lanes.ini'
if os.path.exists(num_lanes_file_name):
with open(num_lanes_file_name, 'r') as num_lanes_file:
lines = num_lanes_file.readlines()
for line in lines:
if 'left' in line.lower():
left = int(line.split()[-1])
else:
right = int(line.split()[-1])
else:
temp_vid = glob.glob(run + '/split_0_*2.avi')[0]
mplayer_cmd = 'mplayer -speed 3 -quiet ' + temp_vid
subprocess.call(mplayer_cmd.split())
print 'Enter number of left lanes:'
left = sys.stdin.readline()
print 'Enter number of right lanes:'
right = sys.stdin.readline()
with open(num_lanes_file_name, 'w+') as num_lanes_file:
num_lanes_file.write('left = ' + str(left))
num_lanes_file.write('right = ' + str(right))
interp = glob.glob(run + '/*interp_lanes.pickle')[0]
bg = glob.glob(run + '/*_bg.npz')[0]
cmd = 'python OffsetLanes.py {interp} {l} {r} {bg} {folder}'
cmd = cmd.format(interp=interp, l=left, r=right, bg=bg, folder=run)
print cmd
subprocess.call(cmd.split())
configurator.set('multilane', True)
if configurator.config['planefitting'] == False:
for run in sorted(glob.glob(local_folder + '/*/')):
if os.path.isdir(run):
print run
if len(glob.glob(run + '/*_planar.npz')) == 0:
video = run.split('/')[-2] + '2.avi'
cmd = 'python PlaneFitting.py {run} {video}'
cmd = cmd.format(run=run, video=video)
print cmd
subprocess.call(cmd.split())
configurator.set('planefitting', True)
if configurator.config['sync'] == False:
driving_data = '/deep/group/driving_data/'
sync_cmd = """rsync --progress -a --exclude=*_frames/ --exclude=*.avi
--exclude=*.pickle --exclude=*~ /scr/data/ \
{driving_data}/jkiske/data""".format(driving_data=driving_data)
print sync_cmd
subprocess.call(sync_cmd.split())
for run in sorted(glob.glob(local_folder + '/*/')):
run = run.split('/')[-2]
video_glob = driving_data + 'q50_data/{remote}/split_*_{run}2.avi' \
.format(remote=remote_folder, run=run)
for video in sorted(glob.glob(video_glob)):
link = driving_data + 'jkiske/data/{remote}/{run}/{video}' \
.format(remote=remote_folder, run=run,
video=video.split('/')[-1])
rm_cmd = 'rm ' + link
cmd = 'ln -s {video} {link}'.format(video=video, link=link)
# print cmd
subprocess.call(cmd.split())
configurator.set('sync', True)
| sameeptandon/sail-car-log | process/LaneCorrectorPipeline.py | Python | bsd-2-clause | 8,473 |
"""
Django settings for shopsite project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SETTING_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xw&!parp-j@3_sdxu^-g_l^g_)-*+o*-n=8%f$0cp3jy!#*fw)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shopsite.apps.catalog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shopsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(SETTING_DIR,"templates"),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'shopsite.utils.context_processors.shopsite',
],
},
},
]
WSGI_APPLICATION = 'shopsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#配置msql,使用驱动mysql-connector-python
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default':{
'NAME': 'shopsys',
#'ENGINE': 'mysql.connector.django',
'ENGINE': 'django.db.backends.mysql',
'USER': 'shopsys',
'PASSWORD': 'shopsys',
'HOST':'120.25.102.253',
'POST':'3306',
'TEST':{}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
DATE_FORMAT = 'Y-m-d'
TIME_ZONE = 'Asia/Shanghai'
# 是否开启国际化支持,不开启时可以不加载翻译模块优化性能
USE_I18N = False
# 本地化格式支持,为True使用系统locale设置显示数字、时间等格式
USE_L10N = False
USE_TZ = True
# 是否设置Etag, 设置etag可以降低网络资源开销,但会增加服务器性能开销
USE_ETAGS = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
#在给定的路径寻找静态文件
STATICFILES_DIRS = (
os.path.join(SETTING_DIR,"static"),
)
STATIC_URL = '/static/'
#用户上传的图片
MEDIA_ROOT = os.path.join(BASE_DIR,"media")
MEDIA_URL = "/media/"
# 站点设置
SITE_NAME = '小白购'
META_KEYWORDS = '小白购, 特价男装, 精品女鞋, 计算机图书, 双十一特惠'
META_DESCRIPTION = '''小白购 - 成都最大、最安全的网上交易平台,提供各类服饰、
美容、家居、数码、话费/点卡充值… 2亿优质特价商品,同时提供担保交易(先收货
后付款)、先行赔付、假一赔三、七天无理由退换货、数码免费维修等安全交易保障
服务,让你全面安心享受网上购物乐趣!''' | xueqiang41/shopsite | shopsite/shopsite/settings.py | Python | gpl-2.0 | 4,757 |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.utils import six
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from reviewboard.reviews.forms import DefaultReviewerForm
from reviewboard.reviews.models import DefaultReviewer, Group
from reviewboard.scmtools.models import Repository
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import (webapi_check_login_required,
webapi_check_local_site)
class DefaultReviewerResource(WebAPIResource):
"""Provides information on default reviewers for review requests.
Review Board will apply any default reviewers that match the repository
and any file path in an uploaded diff for new and updated review requests.
A default reviewer entry can list multiple users and groups.
This is useful when different groups own different parts of a codebase.
Adding DefaultReviewer entries ensures that the right people will always
see the review request and discussions.
Default reviewers take a regular expression for the file path matching,
making it flexible.
As a tip, specifying ``.*`` for the regular expression would have this
default reviewer applied to every review request on the matched
repositories.
"""
added_in = '1.6.16'
name = 'default_reviewer'
model = DefaultReviewer
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the default reviewer.',
},
'name': {
'type': six.text_type,
'description': 'The descriptive name of the entry.',
},
'file_regex': {
'type': six.text_type,
'description': 'The regular expression that is used to match '
'files uploaded in a diff.',
},
'repositories': {
'type': six.text_type,
'description': 'A comma-separated list of repository IDs that '
'this default reviewer will match against.',
},
'users': {
'type': six.text_type,
'description': 'A comma-separated list of usernames that '
'this default reviewer applies to matched review '
'requests.',
},
'groups': {
'type': six.text_type,
'description': 'A comma-separated list of group names that '
'this default reviewer applies to matched review '
'requests.',
},
}
uri_object_key = 'default_reviewer_id'
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def serialize_repositories_field(self, default_reviewer, **kwargs):
return default_reviewer.repository.all()
def serialize_users_field(self, default_reviewer, **kwargs):
return default_reviewer.people.all()
@webapi_check_login_required
def get_queryset(self, request, is_list=False, local_site=None,
*args, **kwargs):
"""Returns a queryset for DefaultReviewer models.
By default, this returns all default reviewers.
If the queryset is being used for a list of default reviewer
resources, then it can be further filtered by one or more of the
arguments listed in get_list.
"""
queryset = self.model.objects.filter(local_site=local_site)
if is_list:
if 'repositories' in request.GET:
for repo_id in request.GET.get('repositories').split(','):
try:
queryset = queryset.filter(repository=repo_id)
except ValueError:
pass
if 'users' in request.GET:
for username in request.GET.get('users').split(','):
queryset = queryset.filter(people__username=username)
if 'groups' in request.GET:
for name in request.GET.get('groups').split(','):
queryset = queryset.filter(groups__name=name)
return queryset
def has_access_permissions(self, request, default_reviewer,
*args, **kwargs):
return default_reviewer.is_accessible_by(request.user)
def has_modify_permissions(self, request, default_reviewer,
*args, **kwargs):
return default_reviewer.is_mutable_by(request.user)
def has_delete_permissions(self, request, default_reviewer,
*args, **kwargs):
return default_reviewer.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_request_fields(optional={
'groups': {
'type': six.text_type,
'description': 'A comma-separated list of group names that each '
'resulting default reviewer must apply to review '
'requests.',
},
'repositories': {
'type': six.text_type,
'description': 'A comma-separated list of IDs of repositories '
'that each resulting default reviewer must match '
'against.'
},
'users': {
'type': six.text_type,
'description': 'A comma-separated list of usernames that each '
'resulting default reviewer must apply to review '
'requests.',
},
})
@augment_method_from(WebAPIResource)
def get_list(self, request, *args, **kwargs):
"""Retrieves the list of default reviewers on the server.
By default, this lists all default reviewers. This list can be
further filtered down through the query arguments.
"""
pass
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieves information on a particular default reviewer."""
pass
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(INVALID_FORM_DATA, NOT_LOGGED_IN,
PERMISSION_DENIED)
@webapi_request_fields(
required={
'name': {
'type': six.text_type,
'description': 'The name of the default reviewer entry.',
},
'file_regex': {
'type': six.text_type,
'description': 'The regular expression used to match file '
'paths in newly uploaded diffs.',
},
},
optional={
'repositories': {
'type': six.text_type,
'description': 'A comma-separated list of repository IDs.',
},
'groups': {
'type': six.text_type,
'description': 'A comma-separated list of group names.',
},
'users': {
'type': six.text_type,
'description': 'A comma-separated list of usernames.',
}
},
)
def create(self, request, local_site=None, *args, **kwargs):
"""Creates a new default reviewer entry.
Note that by default, a default reviewer will apply to review
requests on all repositories, unless one or more repositories are
provided in the default reviewer's list.
"""
if not self.model.objects.can_create(request.user, local_site):
return self.get_no_access_error(request)
code, data = self._create_or_update(local_site, **kwargs)
if code == 200:
return 201, data
else:
return code, data
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(INVALID_FORM_DATA, NOT_LOGGED_IN,
PERMISSION_DENIED)
@webapi_request_fields(
optional={
'name': {
'type': six.text_type,
'description': 'The name of the default reviewer entry.',
},
'file_regex': {
'type': six.text_type,
'description': 'The regular expression used to match file '
'paths in newly uploaded diffs.',
},
'repositories': {
'type': six.text_type,
'description': 'A comma-separated list of repository IDs.',
},
'groups': {
'type': six.text_type,
'description': 'A comma-separated list of group names.',
},
'users': {
'type': six.text_type,
'description': 'A comma-separated list of usernames.',
}
},
)
def update(self, request, local_site=None, *args, **kwargs):
"""Updates an existing default reviewer entry.
If the list of repositories is updated with a blank entry, then
the default reviewer will apply to review requests on all repositories.
"""
try:
default_reviewer = self.get_object(request, local_site=local_site,
*args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, default_reviewer):
return self.get_no_access_error(request)
return self._create_or_update(local_site, default_reviewer, **kwargs)
def _create_or_update(self, local_site, default_reviewer=None, **kwargs):
invalid_fields = {}
form_data = {}
if 'groups' in kwargs:
group_names = [
name
for name in (
name.strip()
for name in kwargs['groups'].split(',')
)
if name
]
group_ids = [
group['pk']
for group in Group.objects.filter(
name__in=group_names, local_site=local_site).values('pk')
]
if len(group_ids) != len(group_names):
invalid_fields['groups'] = [
'One or more groups were not found'
]
form_data['groups'] = group_ids
if 'repositories' in kwargs:
repo_ids = []
try:
repo_ids = [
int(repo_id)
for repo_id in (
repo_id.strip()
for repo_id in kwargs['repositories'].split(',')
)
if repo_id
]
except ValueError:
invalid_fields['repositories'] = [
'One or more repository IDs were not in a valid format.'
]
if repo_ids:
found_count = Repository.objects.filter(
pk__in=repo_ids, local_site=local_site).count()
if len(repo_ids) != found_count:
invalid_fields['repositories'] = [
'One or more repositories were not found'
]
form_data['repository'] = repo_ids
if 'users' in kwargs:
usernames = [
name
for name in (
name.strip()
for name in kwargs['users'].split(',')
)
if name
]
user_ids = [
user['pk']
for user in User.objects.filter(
username__in=usernames).values('pk')
]
if len(user_ids) != len(usernames):
invalid_fields['users'] = [
'One or more users were not found'
]
form_data['people'] = user_ids
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields
}
for field in ('name', 'file_regex'):
if field in kwargs:
form_data[field] = kwargs[field]
if local_site:
form_data['local_site'] = local_site.pk
form = DefaultReviewerForm(data=form_data, instance=default_reviewer,
local_site=local_site)
if not form.is_valid():
# The form uses "people" and "repository", but we expose these
# as "users" and "repositories", so transmogrify the errors a bit.
field_errors = self._get_form_errors(form)
if 'people' in field_errors:
field_errors['users'] = field_errors.pop('people')
if 'repository' in field_errors:
field_errors['repositories'] = field_errors.pop('repository')
return INVALID_FORM_DATA, {
'fields': field_errors,
}
default_reviewer = form.save()
return 200, {
self.item_result_key: default_reviewer,
}
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Deletes the default reviewer entry.
This will not remove any reviewers from any review requests.
It will only prevent these default reviewer rules from being
applied to any new review requests or updates.
"""
pass
default_reviewer_resource = DefaultReviewerResource()
| brennie/reviewboard | reviewboard/webapi/resources/default_reviewer.py | Python | mit | 13,846 |
#! /usr/bin/env python
# -*- coding: utf8 -*-
###################################################################################################
# RuleUser
# widget.py
#
# Copyright (C) 2012,2013 Andrey Burbovskiy <[email protected]>
#
# Developed specially for ALT Linux School.
# http://www.altlinux.org/LTSP
#
# Computer management and monitoring of users:
# - LTSP servers
# - Linux standalone clients
# - Windows standalone clients(only VNC)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################################
import gtk, pango, os, string, gettext, time, datetime, gobject
import urllib, shutil
import mimetypes
_ = gettext.gettext
from dialogs import *
from tree import *
from util import *
####################################################################################################
def toolbar_button(pixbuf=None, tooltips=None, tooltip=None, toggle=None):
image = gtk.Image()
image.set_from_pixbuf(pixbuf)
if ( toggle ):
button = gtk.ToggleToolButton()
else:
button = gtk.ToolButton()
button.set_icon_widget(image)
button.unset_flags(gtk.CAN_FOCUS)
if ( tooltips and tooltip ):
if ( gtk.pygtk_version < (2, 12, 0) ):
button.set_tooltip(tooltips, tooltip)
else:
button.set_tooltip_text(tooltip)
return button
####################################################################################################
def menu_image_button(pixbuf, label):
button = gtk.ImageMenuItem(label)
image = gtk.Image()
image.set_from_pixbuf(pixbuf)
button.set_image(image)
return button
####################################################################################################
def image_button(pixbuf=None, label=None, tooltips=None, tooltip=None, toggle=None):
if ( toggle ):
button = gtk.ToggleButton(label)
else:
button = gtk.Button(label)
button.unset_flags(gtk.CAN_FOCUS)
#button.set_can_focus(False)
image = gtk.Image()
image.set_from_pixbuf(pixbuf)
button.set_image(image)
if ( tooltips and tooltip ):
if ( gtk.pygtk_version < (2, 12, 0) ):
tooltips.set_tip(button, tooltip)
else:
button.set_tooltip_text(tooltip)
return button
####################################################################################################
class menu_tool_button(gtk.ToolButton):
def __init__(self, pixbuf, tooltips, tooltip=None):
gtk.ToolButton.__init__(self, None)
image = gtk.Image()
image.set_from_pixbuf(pixbuf)
self.set_icon_widget(image)
if ( gtk.pygtk_version < (2, 12, 0) ):
self.set_tooltip(tooltips, tooltip)
else:
self.set_tooltip_text(tooltip)
self.button = self.get_child()
self.button.connect("button-press-event", self.event)
self.menu = gtk.Menu()
def append(self, item):
self.menu.append(item)
def context_menu(self, event):
self.menu.popup(None, None, None, event.button, event.time, None)
self.menu.show_all()
def event(self, data, event):
if (event.button==1):
self.context_menu(event)
####################################################################################################
class label_entry(gtk.Fixed):
def __init__(self, label_text, entry_text, width, length, x1, x2, editable=True, visibility=True):
gtk.Fixed.__init__(self)
self.set_size_request(int(width*7.4), 26)
label = gtk.Label(label_text)
label.set_alignment(0, 0.5)
self.entry = gtk.Entry()
self.entry.set_max_length(length)
self.entry.set_text(entry_text)
self.entry.set_property('width-chars', width)
self.entry_base_color = self.entry.get_style().copy().base[gtk.STATE_NORMAL]
self.set_editable(editable)
if ( visibility == False ):
self.entry.set_visibility(False)
self.put(label, x1, 0)
self.put(self.entry, x2, 0)
def get_text(self):
return self.entry.get_text()
def set_editable(self, editable):
if ( editable == False ):
self.entry.set_property("editable", False)
self.entry.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse("light gray"))
else:
self.entry.set_property("editable", True)
self.entry.modify_base(gtk.STATE_NORMAL, self.entry_base_color)
####################################################################################################
class file_entry(gtk.Fixed):
def __init__(self, pixbuf, label_text, entry_text, width, x1, x2, editable=True, folder=None):
gtk.Fixed.__init__(self)
self.set_size_request(int(width*7.4), 26)
label = gtk.Label(label_text)
label.set_alignment(0, 0.5)
self.entry = gtk.Entry()
self.entry.set_text(entry_text)
self.entry.set_property('width-chars', width)
self.button = image_button(pixbuf,"")
self.button.set_size_request(25, 25)
self.button.connect("clicked", file_chooser_dialog, self.entry, label_text)
if ( editable == False ):
self.entry.set_property("editable", False)
self.entry.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse("light gray"))
self.put(label, x1, 0)
self.put(self.entry, x2, 0)
self.put(self.button, x2+int(width*7.4), 0)
def get_text(self):
return self.entry.get_text()
####################################################################################################
def file_chooser_dialog(self, entry, label_text, folder=None, action=None):
dialog = gtk.FileChooserDialog(label_text, None, gtk.FILE_CHOOSER_ACTION_OPEN,\
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
if ( folder ):
dialog.set_current_folder(os.path.expanduser(folder))
else:
dialog.set_current_folder(os.path.expanduser("~"))
if ( action ):
dialog.set_action(action)
response = dialog.run()
if response == gtk.RESPONSE_OK:
if ( "gtk.Entry" in str(entry) ):
entry.set_text(dialog.get_filename())
elif ( "gtk.ComboBox" in str(entry) ):
entry.insert_text(0,dialog.get_filename())
entry.set_active(0)
dialog.destroy()
####################################################################################################
class file_browser(gtk.VBox):
def __init__(self, cfg, folder, folder_name):
gtk.VBox.__init__(self, False, 1)
self.cfg = cfg
self.folder = folder
self.folder_name = folder_name
self.current_folder = self.folder
self.show_hidden = False
self.file = None
vbox = self
# pb, file, filestat.st_size, modified, type_, sort_key
self.fileList = gtk.ListStore(gtk.gdk.Pixbuf, str, str, str, str, str)
self.fileList.set_default_sort_func(None)
self.treeView = gtk.TreeView(self.fileList)
self.treeView.set_rules_hint(True)
self.treeView.set_headers_visible(True)
self.treeView.set_headers_clickable(True)
self.treeView.set_grid_lines(gtk.TREE_VIEW_GRID_LINES_VERTICAL)
self.treeView.modify_font(pango.FontDescription(self.cfg.fontTree))
self.treeView.connect("button-press-event", self.tree_button_press_event)
self.TARGETS = [
('application/x-kde-urilist', 0, 0),
('x-special/gnome-copied-files', 0, 0),
('text/uri-list', 0, 0),]
self.treeView.connect("drag_data_get", self.drag_data_get_data, self.cfg)
self.treeView.enable_model_drag_source(gtk.gdk.BUTTON1_MASK, self.TARGETS, gtk.gdk.ACTION_COPY)
self.treeView.connect("drag_data_received", self.drag_data_received_data, self.cfg)
self.treeView.enable_model_drag_dest(self.TARGETS, gtk.gdk.ACTION_COPY)
self.treeSelection = self.treeView.get_selection()
self.treeSelection.set_mode(gtk.SELECTION_MULTIPLE)
######################
self.cellpb = gtk.CellRendererPixbuf()
self.cell1 = gtk.CellRendererText()
self.cell1.connect('edited', self.rename_file)
self.cell2 = gtk.CellRendererText()
self.cell3 = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Name"))
column.set_expand(True)
column.set_resizable(True)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.pack_start(self.cellpb, False)
column.pack_start(self.cell1, True)
column.set_attributes(self.cellpb, pixbuf=0)
column.set_attributes(self.cell1, text=1)
self.treeView.append_column(column)
column = gtk.TreeViewColumn(_("Size"))
column.set_expand(False)
column.pack_start(self.cell2, True)
column.set_attributes(self.cell2, text=2)
self.treeView.append_column(column)
column = gtk.TreeViewColumn(_("Modified"))
column.set_expand(False)
column.pack_start(self.cell3, True)
column.set_attributes(self.cell3, text=3)
self.treeView.append_column(column)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add(self.treeView)
##########################################
self.label = gtk.Label(self.folder_name)
self.label.set_alignment(0, 0.5)
vbox.pack_start(self.label, expand=False, fill=False, padding=5)
toolbar= gtk.Toolbar()
toolbar.set_orientation(gtk.ORIENTATION_HORIZONTAL)
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_border_width(0)
toolbar.set_tooltips(True)
button = toolbar_button(self.cfg.pixbuf_action_refresh_16, self.cfg.tooltips, _("Refresh"))
button.connect('clicked',self.refresh)
toolbar.insert(button,-1)
button = toolbar_button(self.cfg.pixbuf_list_up_16, self.cfg.tooltips, _("Up"))
button.connect('clicked',self.up)
toolbar.insert(button,-1)
separator = gtk.SeparatorToolItem()
toolbar.insert(separator,-1)
button = toolbar_button(self.cfg.pixbuf_list_folder_add_16, self.cfg.tooltips, _("New folder"))
button.connect('clicked',self.new_folder)
toolbar.insert(button,-1)
button = toolbar_button(self.cfg.pixbuf_list_file_copy_16, self.cfg.tooltips, _("Copy"))
button.connect('clicked',self.copy)
toolbar.insert(button,-1)
button = toolbar_button(self.cfg.pixbuf_list_file_paste_16, self.cfg.tooltips, _("Paste"))
button.connect('clicked',self.paste)
toolbar.insert(button,-1)
button = toolbar_button(self.cfg.pixbuf_list_file_edit_16, self.cfg.tooltips, _("Rename"))
button.connect('clicked',self.rename)
toolbar.insert(button,-1)
button = toolbar_button(self.cfg.pixbuf_list_file_remove_16, self.cfg.tooltips, _("Remove"))
button.connect('clicked',self.remove)
toolbar.insert(button,-1)
space = gtk.SeparatorToolItem()
space.set_draw(False)
space.set_expand(gtk.EXPAND)
toolbar.insert(space,-1)
button = toolbar_button(self.cfg.pixbuf_list_file_hide_16, self.cfg.tooltips, _("Show hidden files"), True)
button.connect('clicked',self.hidden)
toolbar.insert(button,-1)
vbox.pack_start(toolbar, expand=False, fill=False, padding=0)
vbox.pack_start(sw, expand=True, fill=True, padding=0)
vbox.set_homogeneous(False)
vbox.set_spacing(0)
vbox.show_all()
self.create_fileList()
##############################################
def create_fileList(self, data=None):
self.label.set_text(" "+self.folder_name+self.current_folder[len(self.folder):])
self.fileList.clear()
try:
listdir = os.listdir(self.current_folder)
except:
self.fileList.append([None,"..","","","",""])
return
list = []
for f in listdir:
if ( f[0] == '.' and self.show_hidden == False ):
continue
try:
file = self.current_folder+"/"+f
type_ = "1"
if ( os.path.islink(file) ):
pb = self.cfg.pixbuf_list_link_16
elif ( os.path.ismount(file) ):
pb = self.cfg.pixbuf_list_mount_16
elif ( os.path.isdir(file) ):
pb = self.cfg.pixbuf_list_folder_16
elif ( os.path.isfile(file) ):
type_ = "2"
pb = self.cfg.pixbuf_list_file_16
filestat = os.stat(self.current_folder+"/"+f)
except:
continue
modified = time.strftime("%d.%m.%y %H:%M:%S ", time.gmtime(filestat.st_mtime))
sort_key = ""
for c in unicode(f,'utf-8').lower():
if ( c != " " ):
sort_key += c
if ( c != "." ):
break
list.append([pb, f, filestat.st_size, modified, type_, sort_key])
list.sort(key=lambda tup: (tup[4],tup[5]))
self.fileList.append([None,"..","","","",""])
for z in list:
self.fileList.append(z)
##############################################
def get_filenames(self, mode=None):
model, rows = self.treeSelection.get_selected_rows()
if ( rows == [] ):
return []
if ( mode == "first" ):
# Раскрыть и выделить первую позицию
row = rows[0]
self.treeView.scroll_to_cell(row, None, use_align=True, row_align=0.5, col_align=0.0)
self.treeView.expand_to_path(row)
self.treeSelection.unselect_all()
self.treeSelection.select_path(row)
model, rows = self.treeSelection.get_selected_rows()
list = []
for row in rows:
if ( mode == "name" ):
list.append(self.fileList[row][1])
else:
list.append(self.current_folder+self.fileList[row][1])
return list
##############################################
def copy(self, data=None):
clipboard = gtk.clipboard_get()
clipboard.set_with_data(self.TARGETS, self.copy_files, self.clear_files)
def copy_files(self, clipboard, selectiondata, info, data):
txt = ""
#print selectiondata.target
if ( selectiondata.target == "x-special/gnome-copied-files" ):
txt = txt+"copy\n"
for file in self.get_filenames():
txt = txt+"file://"+urllib.quote(file)+"\n"
selectiondata.set(selectiondata.target, 8, txt)
def clear_files(self, clipboard, data):
pass
##############################################
def paste(self, data=None):
clipboard = gtk.clipboard_get()
#clipboard.request_targets(self.get_targets, user_data=None)
clipboard.request_contents("x-special/gnome-copied-files", self.paste_files)
clipboard.request_contents("application/x-kde-urilist", self.paste_files)
def get_targets(self, d1, d2, d3):
print d1, d2, d3
def paste_files(self, clipboard, selectiondata, udata):
if ( selectiondata.data == None ):
return
#print selectiondata.target
files = selectiondata.data.splitlines()
action = "copy"
if ( selectiondata.target == "x-special/gnome-copied-files" ):
action = files[0]
del files[0]
elif ( selectiondata.target == "application/x-kde-urilist" ):
del files[len(files)-1]
for file in files:
file = urllib.unquote(file)[7:]
try:
if (os.path.isfile(file) == False ):
continue
if ( action == "copy" ):
shutil.copy(unicode(file), self.current_folder)
elif ( action == "cut" ):
shutil.move(unicode(file), self.current_folder)
except:
continue
self.create_fileList()
##############################################
def new_folder(self, data=None):
try:
os.makedirs(self.current_folder+"Новая папка")
except:
return
self.create_fileList()
row = False
for i in range(len(self.fileList)):
if ( self.fileList[i][1] == "Новая папка" ):
row = (i,)
break
if ( row == False ):
return
col = self.treeView.get_column(0)
cell = col.get_cell_renderers()[1]
cell.set_property('editable', True)
self.file = self.current_folder+"Новая папка"
self.treeView.set_cursor_on_cell(row, col, cell, start_editing=True)
def rename(self, data=None):
# Выделить первую позицию
f_list = self.get_filenames("first")
model, rows = self.treeSelection.get_selected_rows()
if ( rows == [] ):
return
row = rows[0]
col = self.treeView.get_column(0)
cell = col.get_cell_renderers()[1]
cell.set_property('editable', True)
self.file = f_list[0]
self.treeView.set_cursor_on_cell(row, col, cell, start_editing=True)
def rename_file(self, cell, path, new_text):
cell.set_property('editable', False)
if ( not self.file or self.file == self.current_folder+new_text ):
return False
try:
os.rename(self.file, self.current_folder+new_text)
except:
return False
self.create_fileList()
##############################################
def hidden(self, data=None):
if ( self.show_hidden ):
self.show_hidden = False
else:
self.show_hidden = True
self.create_fileList()
##############################################
def remove(self, data=None):
dialog_list = self.get_filenames("name")
file_list = self.get_filenames()
window = self.get_toplevel()
if ( message_dialog(window, _("Remove")+" ?\n", dialog_list, True, "str") != True ):
return
for file in file_list:
try:
if ( os.path.isfile(file) == True ):
os.remove(file)
elif ( os.path.isdir(file) == True ):
os.rmdir(file)
except:
continue
self.create_fileList()
##############################################
def up(self, data=None):
if ( self.current_folder == self.folder ):
return
current = self.current_folder.split("/")
up = ""
for x in range(len(current)-2):
up = up+current[x]+"/"
self.current_folder = up
self.create_fileList()
##############################################
def refresh(self, data=None):
self.create_fileList()
##############################################
def selection_changed(self, data=None, event=None):
pass
##############################################
def tree_button_press_event(self, data=None, event=None):
if (event.button==3):
self.context_menu(event)
if (event.type == gtk.gdk._2BUTTON_PRESS):
self.open_file()
def open_file(self, data=None):
f_list = self.get_filenames()
if ( f_list[0][len(self.current_folder):] == ".." ):
self.up()
return
try:
isdir = os.path.isdir(f_list[0])
except:
return
if ( isdir == True ):
self.current_folder = f_list[0]+"/"
self.create_fileList()
return
try:
isfile = os.path.isfile(f_list[0])
except:
return
if ( isfile == True ):
cmd = "xdg-open "+f_list[0].replace(" ", "\\ ")
proc = popen_sub(self.cfg, shlex.split(cmd))
return
def context_menu(self, event):
mouseMenu = gtk.Menu()
item = menu_image_button(self.cfg.pixbuf_list_file_up_16, _("Open"))
item.connect('activate', self.open_file)
mouseMenu.append(item)
item = gtk.SeparatorMenuItem()
mouseMenu.append(item)
item = menu_image_button(self.cfg.pixbuf_list_folder_add_16, _("New folder"))
item.connect('activate', self.new_folder)
mouseMenu.append(item)
item = menu_image_button(self.cfg.pixbuf_list_file_copy_16, _("Copy"))
item.connect('activate', self.copy)
mouseMenu.append(item)
item = menu_image_button(self.cfg.pixbuf_list_file_paste_16, _("Paste"))
item.connect('activate', self.paste)
mouseMenu.append(item)
item = menu_image_button(self.cfg.pixbuf_list_file_edit_16, _("Rename"))
item.connect('activate', self.rename)
mouseMenu.append(item)
item = menu_image_button(self.cfg.pixbuf_list_file_remove_16, _("Remove"))
item.connect('activate', self.remove)
mouseMenu.append(item)
mouseMenu.popup(None, None, None, event.button, event.time, None)
mouseMenu.show_all()
def callback(self, data1=None, data2=None, data3=None):
pass
##############################################
def drag_data_get_data(self, treeview, context, selection, target_id, etime, cfg):
#print selection.target
pass
def drag_data_received_data(self, treeview, context, x, y, selection, info, etime, cfg):
#print selection.target
pass
####################################################################################################
class tooltips_(gtk.Window):
def __init__(self):
gtk.Window.__init__(self, gtk.WINDOW_POPUP)
self.set_resizable(False)
self.set_border_width(5)
self.set_app_paintable(True)
self.label = label = gtk.Label()
label.set_line_wrap(True)
label.set_alignment(0.5, 0.5)
self.add(label)
self.visible = False
def set_text(self, x=None, y=None, text=None):
if ( not x or not y or not text ):
if ( self.visible ):
self.hide_all()
self.visible = False
else:
self.label.set_text(text)
self.move(int(x+10), int(y))
if ( not self.visible ):
self.show_all()
self.visible = True
def show(self):
self.show_all()
def hide(self):
self.hide()
####################################################################################################
def entry_error(cfg, entry):
if ( cfg.entry_error_busy == True ):
return
cfg.entry_error_busy = True
thread_messageBox = thread_gfunc(cfg, False, True, entry_error_t, cfg, entry)
thread_messageBox.start()
####################################################################################################
def entry_error_t(cfg, entry):
base_color = entry.child.get_style().copy().base[gtk.STATE_NORMAL]
for i in range(3):
gtk.gdk.threads_enter()
entry.child.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse("gray"))
gtk.gdk.threads_leave()
time.sleep(0.3)
gtk.gdk.threads_enter()
entry.child.modify_base(gtk.STATE_NORMAL, base_color)
gtk.gdk.threads_leave()
time.sleep(0.3)
cfg.entry_error_busy = False
####################################################################################################
| xak-mcu/ruleuser | widget.py | Python | gpl-2.0 | 22,058 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/cowboy/PycharmProjects/CVP/Recuperacion.ui'
#
# Created: Wed May 27 02:08:30 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(400, 356)
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(100, 60, 201, 17))
self.label.setObjectName(_fromUtf8("label"))
self.txtDocumento = QtGui.QLineEdit(Dialog)
self.txtDocumento.setGeometry(QtCore.QRect(80, 120, 113, 27))
self.txtDocumento.setObjectName(_fromUtf8("txtDocumento"))
self.txtRespuesta = QtGui.QLineEdit(Dialog)
self.txtRespuesta.setGeometry(QtCore.QRect(200, 210, 113, 27))
self.txtRespuesta.setObjectName(_fromUtf8("txtRespuesta"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(170, 170, 66, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(170, 310, 66, 17))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.btnConsultar = QtGui.QPushButton(Dialog)
self.btnConsultar.setGeometry(QtCore.QRect(220, 120, 98, 27))
self.btnConsultar.setObjectName(_fromUtf8("btnConsultar"))
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(90, 220, 71, 17))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.btnComparar = QtGui.QPushButton(Dialog)
self.btnComparar.setGeometry(QtCore.QRect(150, 260, 98, 27))
self.btnComparar.setObjectName(_fromUtf8("btnComparar"))
self.label_5 = QtGui.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(100, 20, 201, 17))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label.setText(_translate("Dialog", "Escribe Documento deUsuario", None))
self.label_2.setText(_translate("Dialog", "Pregunta", None))
self.label_3.setText(_translate("Dialog", "CLAvE", None))
self.btnConsultar.setText(_translate("Dialog", "consultar", None))
self.label_4.setText(_translate("Dialog", "Respuestas", None))
self.btnComparar.setText(_translate("Dialog", "responder", None))
self.label_5.setText(_translate("Dialog", "Recuperacion de COntraseña", None))
| esvire/CVP | Recuperacion_ui.py | Python | gpl-2.0 | 3,165 |
"""
Views for creating, editing and viewing site-specific user profiles.
"""
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django.template import RequestContext
def create_profile(request, form_class, success_url=None,
template_name='profiles/private/create_profile.html',
extra_context=None):
"""
Create a profile for the current user, if one doesn't already
exist.
If the user already has a profile, a redirect will be issued to the
:view:`profiles.views.edit_profile` view.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``form_class``
The form class to use for validating and creating the user
profile. This form class must define a method named
``save()``, implementing the same argument signature as the
``save()`` method of a standard Django ``ModelForm`` (this
view will call ``save(commit=False)`` to obtain the profile
object, and fill in the user before the final save). If the
profile object includes many-to-many relations, the convention
established by ``ModelForm`` of using a method named
``save_m2m()`` will be used, and so your form class should
also define this method.
``success_url``
The URL to redirect to after successful profile creation. If
this argument is not supplied, this will default to the URL of
:view:`profiles.views.profile_detail` for the newly-created
profile object.
``template_name``
The template to use when displaying the profile-creation
form. If not supplied, this will default to
:template:`profiles/create_profile.html`.
**Context:**
``form``
The profile-creation form.
**Template:**
``template_name`` keyword argument, or
:template:`profiles/create_profile.html`.
"""
try:
profile_obj = request.user.profile
return HttpResponseRedirect(reverse('profiles_edit_profile'))
except ObjectDoesNotExist:
pass
#
# We set up success_url here, rather than as the default value for
# the argument. Trying to do it as the argument's default would
# mean evaluating the call to reverse() at the time this module is
# first imported, which introduces a circular dependency: to
# perform the reverse lookup we need access to profiles/urls.py,
# but profiles/urls.py in turn imports this module.
#
if success_url is None:
success_url = reverse('profiles_profile_detail',
kwargs={'username': request.user.username})
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
profile_obj = form.save(commit=False)
profile_obj.user = request.user
profile_obj.save()
if hasattr(form, 'save_m2m'):
form.save_m2m()
return HttpResponseRedirect(success_url)
else:
form = form_class()
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{'form': form},
context_instance=context)
create_profile = login_required(create_profile)
def edit_profile(request, form_class, success_url=None,
template_name='profiles/private/edit_profile.html',
extra_context=None):
"""
Edit the current user's profile.
If the user does not already have a profile, a redirect will be issued to
the :view:`profiles.views.create_profile` view.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``form_class``
The form class to use for validating and editing the user
profile. This form class must operate similarly to a standard
Django ``ModelForm`` in that it must accept an instance of the
object to be edited as the keyword argument ``instance`` to
its constructor, and it must implement a method named
``save()`` which will save the updates to the object.
``success_url``
The URL to redirect to following a successful edit. If not
specified, this will default to the URL of
:view:`profiles.views.profile_detail` for the profile object
being edited.
``template_name``
The template to use when displaying the profile-editing
form. If not specified, this will default to
:template:`profiles/edit_profile.html`.
**Context:**
``form``
The form for editing the profile.
``profile``
The user's current profile.
**Template:**
``template_name`` keyword argument or
:template:`profiles/edit_profile.html`.
"""
try:
profile_obj = request.user.profile
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse('profiles_profile_create'))
if success_url is None:
success_url = reverse('profiles_profile_detail',
kwargs={'username': request.user.username})
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES, instance=profile_obj)
if form.is_valid():
form.save()
return HttpResponseRedirect(success_url)
else:
form = form_class(instance=profile_obj)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{'form': form,
'profile': profile_obj,
'user': profile_obj.user,
},
context_instance=context)
edit_profile = login_required(edit_profile)
def profile_detail(request, username, public_profile_field=None,
template_name='profiles/public/profile_detail.html',
extra_context=None):
"""
Detail view of a user's profile.
If the user has not yet created a profile, ``Http404`` will be
raised.
**Required arguments:**
``username``
The username of the user whose profile is being displayed.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``public_profile_field``
The name of a ``BooleanField`` on the profile model; if the
value of that field on the user's profile is ``False``, the
``profile`` variable in the template will be ``None``. Use
this feature to allow users to mark their profiles as not
being publicly viewable.
If this argument is not specified, it will be assumed that all
users' profiles are publicly viewable.
``template_name``
The name of the template to use for displaying the profile. If
not specified, this will default to
:template:`profiles/profile_detail.html`.
**Context:**
``profile``
The user's profile, or ``None`` if the user's profile is not
publicly viewable (see the description of
``public_profile_field`` above).
**Template:**
``template_name`` keyword argument or
:template:`profiles/profile_detail.html`.
"""
user = get_object_or_404(User, username=username)
try:
profile_obj = user.profile
except ObjectDoesNotExist:
raise Http404
if public_profile_field is not None and \
not getattr(profile_obj, public_profile_field):
profile_obj = None
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{'profile': profile_obj},
context_instance=context)
| raven47git/readthedocs.org | readthedocs/profiles/views.py | Python | mit | 8,999 |
from erukar.system.engine import MagicEffect
class Divinomorph(MagicEffect):
def enact(self, instigator, target, cmd, mutator):
mutator.set('damage_type', 'divine')
mutator.set('sanctity', 1)
return mutator
| etkirsch/legends-of-erukar | erukar/content/magic/modifiers/Divinomorph.py | Python | agpl-3.0 | 237 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from activitystreams import parse as as_parser
from uuid import uuid4 as uuid
from dino.validation.base import BaseValidator
from dino.environ import ConfigDict
from dino.config import SessionKeys
from dino import environ
__author__ = 'Oscar Eriksson <[email protected]>'
class BaseValidatorTest(TestCase):
def setUp(self):
self.validator = BaseValidator()
environ.env.session = ConfigDict({})
environ.env.session.set(SessionKeys.user_id.value, '1234')
def test_id_on_actor(self):
activity = as_parser(self.get_act())
valid, reason = self.validator.validate_request(activity)
self.assertTrue(valid)
def test_wrong_on_actor(self):
activity = self.get_act()
activity['actor']['id'] = '5678'
activity = as_parser(activity)
valid, reason = self.validator.validate_request(activity)
self.assertFalse(valid)
def test_no_id_on_actor(self):
activity = self.get_act()
del activity['actor']['id']
activity = as_parser(activity)
valid, reason = self.validator.validate_request(activity)
self.assertFalse(valid)
def test_valid_session(self):
is_valid, reason = self.validator.validate_session({
'user_id': '1234',
'user_name': 'Batman',
'token': str(uuid())
})
self.assertTrue(is_valid)
def test_session_is_missing_user_id(self):
is_valid, reason = self.validator.validate_session({
'user_name': 'Batman',
'token': str(uuid())
})
self.assertFalse(is_valid)
def test_session_is_missing_user_name(self):
is_valid, reason = self.validator.validate_session({
'user_id': '1234',
'token': str(uuid())
})
self.assertFalse(is_valid)
def test_session_is_missing_token(self):
is_valid, reason = self.validator.validate_session({
'user_id': '1234',
'user_name': 'Batman'
})
self.assertFalse(is_valid)
def test_session_is_blank_user_id(self):
is_valid, reason = self.validator.validate_session({
'user_id': '',
'user_name': 'Batman',
'token': str(uuid())
})
self.assertFalse(is_valid)
def test_session_is_blank_user_name(self):
is_valid, reason = self.validator.validate_session({
'user_id': '1234',
'user_name': '',
'token': str(uuid())
})
self.assertFalse(is_valid)
def test_session_is_blank_token(self):
is_valid, reason = self.validator.validate_session({
'user_id': '1234',
'user_name': 'Batman',
'token': ''
})
self.assertFalse(is_valid)
def test_session_is_missing_all(self):
is_valid, reason = self.validator.validate_session({})
self.assertFalse(is_valid)
def get_act(self):
return {
'actor': {
'id': '1234'
},
'verb': 'test',
'target': {
'id': '4321'
}
}
| thenetcircle/dino | test/validation/test_validation_base.py | Python | apache-2.0 | 3,720 |
from .user_orders_request import UserOrdersRequest
from .user_orders_schema import UserOrdersSchema
| willrp/willbuyer | backend/util/request/order/user_orders/__init__.py | Python | mit | 100 |
import time
import requiem
sensor = requiem.Sensor("test")
alert = requiem.IDMEFAlert()
alert["alert.classification(0).name"] = "test python"
alert["alert.assessment.impact.severity"] = "low"
alert["alert.assessment.impact.completion"] = "failed"
alert["alert.assessment.impact.type"] = "recon"
alert["alert.detect_time"] = time.time()
alert["alert.source(0).node.address(0).address"] = "10.0.0.1"
alert["alert.target(0).node.address(1).address"] = "10.0.0.2"
alert["alert.target(1).node.address(0).address"] = "10.0.0.3"
alert["alert.additional_data(0).data"] = "something"
sensor.send_alert(alert)
| requiem-forasiem/librequiem | docs/api/examples/python/send_alert.py | Python | gpl-2.0 | 604 |
# -*- coding: iso-8859-1 -*-
#
# The Python Imaging Library.
# $Id$
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
# NOTE: This format cannot be automatically recognized, so the reader
# is not registered for use with Image.open(). To open a WEL file, use
# the WalImageFile.open() function instead.
# This reader is based on the specification available from:
# http://www.flipcode.com/tutorials/tut_q2levels.shtml
# and has been tested with a few sample files found using google.
from __future__ import print_function
from PIL import Image, _binary
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
i32 = _binary.i32le
##
# Load texture from a Quake2 WAL texture file.
# <p>
# By default, a Quake2 standard palette is attached to the texture.
# To override the palette, use the <b>putpalette</b> method.
#
# @param filename WAL file name, or an opened file handle.
# @return An image instance.
def open(filename):
# FIXME: modify to return a WalImageFile instance instead of
# plain Image object ?
if hasattr(filename, "read"):
fp = filename
else:
fp = builtins.open(filename, "rb")
# read header fields
header = fp.read(32+24+32+12)
size = i32(header, 32), i32(header, 36)
offset = i32(header, 40)
# load pixel data
fp.seek(offset)
im = Image.frombytes("P", size, fp.read(size[0] * size[1]))
im.putpalette(quake2palette)
im.format = "WAL"
im.format_description = "Quake2 Texture"
# strings are null-terminated
im.info["name"] = header[:32].split(b"\0", 1)[0]
next_name = header[56:56+32].split(b"\0", 1)[0]
if next_name:
im.info["next_name"] = next_name
return im
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)
if __name__ == "__main__":
im = open("../hacks/sample.wal")
print(im.info, im.mode, im.size)
im.save("../out.png")
| Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/PIL/WalImageFile.py | Python | mit | 5,524 |
#!/usr/bin/env dls-python
## \namespace read_archiver
# read_archiver.py is a python script to read bits
# from pmac status mbbo records in the archiver
# The intended use case is to query for changes in status bits
#
# Example query:
# \verbatim
# dls-python read_archiver.py BL02I-MO-STEP-05:AXIS1:status3 0x4 2015-02-10@00:00 2015-02-11@11:40
# \endverbatim
# queries for bit 0x4 of status byte 3 which represents following error.
# The output in this case was
# \verbatim
# 1 Mon Feb 9 08:42:44 2015 False 0
# 6 Wed Feb 11 11:26:44 2015 True 0
# 7 Wed Feb 11 11:29:48 2015 False 0
# 11 Wed Feb 11 11:34:04 2015 True 0
# 12 Wed Feb 11 11:36:04 2015 False 0
# \endverbatim
# The output indicates that the axis went into following error twice
#
# Help text from read_archiver.py -h
#\verbatim
#read bits from bit field records in DLS archiver
#Invocation:
# read_archiver.py <pvname> <value> <start_time> <end_time>
#
#Where:
# <pvname> is a bit field pv, e.g. BL23I-MO-STEP-01:PLCDISBITS00
# <value> is a bit field mask in decimal or hex, e.g. 0x800 or 2048
# <start_time> is a time in the format "%Y-%m-%d@%H:%M" (no seconds)
# for example 2012-11-11@17:10
# <end_time> is a time in the format "%Y-%m-%d@%H:%M" (no seconds)
#
#The output is of the form:
#
# count date_time [True/False] severity
#
#Example pvs and masks:
#
# for $(pmac):PLCDISBITS00 use these values
# plc0 0x1
# plc1 0x2
# plc2 0x4
# plc3 0x8
# plc4 0x10
# plc5 0x20
# plc6 0x40
# plc7 0x80
# plc8 0x100
# plc9 0x200
# plc10 0x400
# plc11 0x800
# plc12 0x1000
# plc13 0x2000
# plc14 0x4000
# plc15 0x8000
#
# for $(pmac):AXIS<N>:status1 use these values cf. Turbo SRM p. 310-315
# motor activated 0x8000
# negative end limit set 0x4000
# positive end limit set 0x2000
# amplifier enabled 0x800
# open loop mode 0x400
# desired velocity zero 0x20
# home search in progress 0x4
#
# for $(pmac):AXIS<N>:status3 use these values
# assigned to c.s. 0x8000
# stopped on position limit 0x800
# home complete 0x400
# fatal following error 0x4
# warning following error 0x2
# in position 0x1
#\endverbatim
import time
import sys
from xmlrpclib import ServerProxy, Error
Archiver_URL = "http://archiver.pri.diamond.ac.uk/" \
+ "archive/cgi/ArchiveDataServer.cgi"
def help():
print sys.argv[0] + """
read bits from bit field records in DLS archiver
Invocation:
read_archiver.py <pvname> <value> <start_time> <end_time>
Where:
<pvname> is a bit field pv, e.g. BL23I-MO-STEP-01:PLCDISBITS00
<value> is a bit field mask in decimal or hex, e.g. 0x800 or 2048
<start_time> is a time in the format "%Y-%m-%d@%H:%M" (no seconds)
for example 2012-11-11@17:10
<end_time> is a time in the format "%Y-%m-%d@%H:%M" (no seconds)
The output is of the form:
count date_time [True/False] severity
Example pvs and masks:
for $(pmac):PLCDISBITS00 use these values
plc0 0x1
plc1 0x2
plc2 0x4
plc3 0x8
plc4 0x10
plc5 0x20
plc6 0x40
plc7 0x80
plc8 0x100
plc9 0x200
plc10 0x400
plc11 0x800
plc12 0x1000
plc13 0x2000
plc14 0x4000
plc15 0x8000
for $(pmac):AXIS<N>:status1 use these values cf. Turbo SRM p. 310-315
motor activated 0x8000
negative end limit set 0x4000
positive end limit set 0x2000
amplifier enabled 0x800
open loop mode 0x400
desired velocity zero 0x20
home search in progress 0x4
for $(pmac):AXIS<N>:status3 use these values
assigned to c.s. 0x8000
stopped on position limit 0x800
home complete 0x400
fatal following error 0x4
warning following error 0x2
in position 0x1
"""
def main(pvname, check_value_str, start_str, end_str ):
server = ServerProxy(Archiver_URL)
archiver_key = 1000 # key for "all" archiver
# start time
t1 = time.strptime(start_str, "%Y-%m-%d@%H:%M")
time1 = time.mktime(t1)
t2 = time.strptime(end_str, "%Y-%m-%d@%H:%M")
time2 = time.mktime(t2)
check_value = int(check_value_str,0)
complete = False
no_data = False
count = 1
while not complete:
result = server.archiver.values(archiver_key, [pvname],
int(time1), 0, int(time2), 0, 100, 0)
assert(len(result) == 1)
if len(result[0]['values']) == 0:
no_data = True
if len(result[0]['values']) != 100:
complete = True
for entry in result[0]['values']:
bit_value = entry['value'][0] & check_value == check_value
if count == 1:
last_value = not bit_value
if bit_value != last_value:
print count, time.ctime(entry['secs']), bit_value, entry['sevr']
last_value = bit_value
count = count + 1
#Time of latest value
if not no_data:
if result[0]['values'][-1]['secs'] >= time2:
complete = True
else:
time1 = result[0]['values'][-1]['secs'] + 1
if no_data:
print "no data available"
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1] == "-h":
help()
sys.exit(1)
if len(sys.argv) < 5:
help()
sys.exit(1)
(pvname, check_value_str, start_str, end_str ) = sys.argv[1:5]
main(pvname, check_value_str, start_str, end_str )
| dls-controls/pmacUtil | pmacUtilApp/src/read_archiver.py | Python | apache-2.0 | 6,153 |
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.neutron.v2_0 import metering
from neutronclient.tests.unit import test_cli20
class CLITestV20MeteringJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['metering_label', 'metering_label_rule']
def test_create_metering_label(self):
# Create a metering label.
resource = 'metering_label'
cmd = metering.CreateMeteringLabel(
test_cli20.MyApp(sys.stdout), None)
name = 'my label'
myid = 'myid'
description = 'my description'
args = [name, '--description', description, '--shared']
position_names = ['name', 'description', 'shared']
position_values = [name, description, True]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_metering_labels(self):
resources = "metering_labels"
cmd = metering.ListMeteringLabel(
test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd)
def test_delete_metering_label(self):
# Delete a metering label.
resource = 'metering_label'
cmd = metering.DeleteMeteringLabel(
test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_metering_label(self):
resource = 'metering_label'
cmd = metering.ShowMeteringLabel(
test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id'])
def test_create_metering_label_rule(self):
resource = 'metering_label_rule'
cmd = metering.CreateMeteringLabelRule(
test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
metering_label_id = 'aaa'
remote_ip_prefix = '10.0.0.0/24'
direction = 'ingress'
args = [metering_label_id, remote_ip_prefix, '--direction', direction,
'--excluded']
position_names = ['metering_label_id', 'remote_ip_prefix', 'direction',
'excluded']
position_values = [metering_label_id, remote_ip_prefix,
direction, True]
self._test_create_resource(resource, cmd, metering_label_id,
myid, args, position_names, position_values)
def test_list_metering_label_rules(self):
resources = "metering_label_rules"
cmd = metering.ListMeteringLabelRule(
test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd)
def test_delete_metering_label_rule(self):
resource = 'metering_label_rule'
cmd = metering.DeleteMeteringLabelRule(
test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_metering_label_rule(self):
resource = 'metering_label_rule'
cmd = metering.ShowMeteringLabelRule(
test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id'])
| openstack/python-neutronclient | neutronclient/tests/unit/test_cli20_metering.py | Python | apache-2.0 | 3,969 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.