repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
HackerEarth/django-allauth | allauth/socialaccount/providers/twitter/views.py | 1 | 1820 | from django.utils import simplejson
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (OAuthAdapter,
OAuthLoginView,
OAuthCallbackView)
from allauth.socialaccount.models import SocialLogin, SocialAccount
from allauth.utils import get_user_model
from provider import TwitterProvider
User = get_user_model()
class TwitterAPI(OAuth):
"""
Verifying twitter credentials
"""
url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
def get_user_info(self):
user = simplejson.loads(self.query(self.url))
return user
class TwitterOAuthAdapter(OAuthAdapter):
provider_id = TwitterProvider.id
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
# Issue #42 -- this one authenticates over and over again...
# authorize_url = 'https://api.twitter.com/oauth/authorize'
authorize_url = 'https://api.twitter.com/oauth/authenticate'
def complete_login(self, request, app, token):
client = TwitterAPI(request, app.key, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
uid = extra_data['id']
user = User(username=extra_data['screen_name'])
account = SocialAccount(user=user,
uid=uid,
provider=TwitterProvider.id,
extra_data=extra_data)
return SocialLogin(account)
oauth_login = OAuthLoginView.adapter_view(TwitterOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(TwitterOAuthAdapter)
| mit | 985,280,656,928,123,300 | 36.916667 | 75 | 0.636813 | false |
skibaa/smart-sweeper | game/dbext.py | 1 | 2811 | import logging
from google.appengine.ext import db
from google.appengine.api import datastore_errors
import cPickle
logger=logging.getLogger("smartSweeper.dbext")
class PickledProperty(db.Property):
data_type = db.Blob
def __init__(self, force_type=None, *args, **kw):
self.force_type=force_type
super(PickledProperty, self).__init__(*args, **kw)
def validate(self, value):
value = super(PickledProperty, self).validate(value)
if value is not None and self.force_type and \
not isinstance(value, self.force_type):
raise datastore_errors.BadValueError(
'Property %s must be of type "%s".' % (self.name,
self.force_type))
return value
def get_value_for_datastore(self, model_instance):
value = self.__get__(model_instance, model_instance.__class__)
if value is not None:
return db.Text(cPickle.dumps(value))
def make_value_from_datastore(self, value):
if value is not None:
return cPickle.loads(str(value))
class CachedReferenceProperty(db.ReferenceProperty):
def __property_config__(self, model_class, property_name):
super(CachedReferenceProperty, self).__property_config__(model_class,
property_name)
#Just carelessly override what super made
setattr(self.reference_class,
self.collection_name,
_CachedReverseReferenceProperty(model_class, property_name,
self.collection_name))
class _CachedReverseReferenceProperty(db._ReverseReferenceProperty):
def __init__(self, model, prop, collection_name):
super(_CachedReverseReferenceProperty, self).__init__(model, prop)
self.__prop=prop
self.__collection_name = collection_name
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
logger.debug("cached reverse trying")
if self.__collection_name in model_instance.__dict__:# why does it get here at all?
return model_instance.__dict__[self.__collection_name]
logger.info("cached reverse miss %s",self.__collection_name)
query=super(_CachedReverseReferenceProperty, self).__get__(model_instance,
model_class)
#replace the attribute on the instance
res=[]
for c in query:
resolved_name='_RESOLVED_'+self.__prop #WARNING: using internal
setattr(c, resolved_name, model_instance)
res += [c]
model_instance.__dict__[self.__collection_name]=res
return res
def __delete__ (self, model_instance):
if model_instance is not None:
del model_instance.__dict__[self.__collection_name]
| apache-2.0 | -9,060,400,630,525,390,000 | 37.506849 | 91 | 0.626467 | false |
allynt/tings | T/tings/views/api/views_api_users.py | 1 | 1138 | from rest_framework import generics, permissions
from django.contrib.auth.models import User
# from T.tings.models.models_users import TUserProfile
from T.tings.serializers.serializers_users import TUserSerializer
class TUserPermission(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# anybody can submit GET, HEAD or OPTIONS requests...
if request.method in permissions.SAFE_METHODS:
return True
# only the admin or collection owners can submit PUT, POST, or DELETE requests...
user = request.user
return user.is_superuser or user == obj
class TUserList(generics.ListCreateAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
class TUserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
| mit | 8,404,409,644,057,363,000 | 33.484848 | 89 | 0.742531 | false |
akx/gentry | gore/api/handlers/store.py | 1 | 1862 | import base64
import json
import logging
import zlib
from datetime import datetime
from django.conf import settings
from django.db import transaction
from django.http import JsonResponse
from django.utils.encoding import force_str
from django.utils.timezone import make_aware
from pytz import UTC
from gore.auth import validate_auth_header
from gore.excs import InvalidAuth
from gore.models import Event
from gore.signals import event_received
from gore.utils.event_grouper import group_event
logger = logging.getLogger(__name__)
def store_event(request, project):
try:
auth_header = validate_auth_header(request, project)
except InvalidAuth as ia:
return JsonResponse({'error': str(ia)}, status=401)
body = request.body
if request.META.get('HTTP_CONTENT_ENCODING') == 'deflate':
body = zlib.decompress(body)
elif auth_header.get('sentry_version') == '5': # Support older versions of Raven
body = zlib.decompress(base64.b64decode(body)).decode('utf8')
body = json.loads(force_str(body))
timestamp = make_aware(datetime.fromtimestamp(float(auth_header['sentry_timestamp'])), timezone=UTC)
with transaction.atomic():
event = Event.objects.create_from_raven(project_id=project, body=body, timestamp=timestamp)
try:
with transaction.atomic():
group = group_event(event.project, event)
group.archived = False
group.cache_values()
group.save()
except: # pragma: no cover
logger.warning('event with ID %s could not be grouped' % event.id, exc_info=True)
try:
event_received.send(sender=event)
except: # pragma: no cover
logger.warning('event_received signal handling failed', exc_info=True)
if settings.DEBUG:
raise
return JsonResponse({'id': event.id}, status=201)
| mit | 4,651,106,081,188,272,000 | 33.481481 | 104 | 0.6971 | false |
mazz/kifu | tests/tests.py | 1 | 1568 | import unittest
import transaction
from pyramid import testing
from default.models.mymodel import DBSession
class TestMyViewSuccessCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from default.models.mymodel import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = MyModel(name='one', value=55)
DBSession.add(model)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_passing_view(self):
from default.views.home import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['one'].name, 'one')
self.assertEqual(info['project'], 'default')
class TestMyViewFailureCondition(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
engine = create_engine('sqlite://')
from default.models.mymodel import (
Base,
MyModel,
)
DBSession.configure(bind=engine)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_failing_view(self):
from default.views.home import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info.status_int, 500)
| mit | -580,382,816,191,372,500 | 27.509091 | 52 | 0.621811 | false |
django-stars/dash2011 | presence/apps/shout/views.py | 1 | 2081 | import logging
import json
from django.shortcuts import render_to_response
from django.http import Http404
from django.template import RequestContext
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseForbidden
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from shout.models import Shout
from shout.forms import ShoutForm
logger = logging.getLogger("presence.%s" % __name__)
@login_required
def shout_new(request):
if request.method == "POST":
form = ShoutForm(request.POST)
if form.is_valid():
shout = form.save(user=request.user)
logger.info('New %s shout from "%s"' % (('public', 'private')[shout.is_private], shout.user.username))
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'OK'}), mimetype='application/json')
return HttpResponseRedirect(reverse('shout-list'))
else:
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'ERR', 'reason': 'Shout text is required!'}), mimetype='application/json')
else:
form = ShoutForm()
data = {
'form': form,
}
return render_to_response('shout/new.html', data, RequestContext(request))
@login_required
def shout_list(request):
#custom manager to get non provat or privat but my
shouts = Shout.objects.filter_for_user(user=request.user)
data = {
'shouts': shouts,
}
return render_to_response('shout/list.html', data, RequestContext(request))
@login_required
def shout_detail(request, shout_id):
try:
shout = Shout.objects.get_for_user(user=request.user, id=shout_id)
except Shout.DoesNotExist:
raise Http404
data = {
'shout': shout,
}
return render_to_response('shout/detail.html', data, RequestContext(request))
| bsd-3-clause | 7,593,957,745,665,859,000 | 31.515625 | 134 | 0.682845 | false |
dprince/tripleo-heat-templates | extraconfig/post_deploy/clouds_yaml.py | 1 | 2821 | #!/usr/bin/env python
import os
import yaml
AUTH_URL = os.environ['auth_url']
ADMIN_PASSWORD = os.environ['admin_password']
CLOUD_NAME = os.environ['cloud_name']
HOME_DIR = os.environ['home_dir']
IDENTITY_API_VERSION = os.environ['identity_api_version']
PROJECT_NAME = os.environ['project_name']
PROJECT_DOMAIN_NAME = os.environ['project_domain_name']
REGION_NAME = os.environ['region_name']
USER_NAME = os.environ['user_name']
USER_DOMAIN_NAME = os.environ['user_domain_name']
CONFIG_DIR = os.path.join(HOME_DIR, '.config')
OS_DIR = os.path.join(CONFIG_DIR, 'openstack')
USER_CLOUDS_YAML = os.path.join(OS_DIR, 'clouds.yaml')
GLOBAL_OS_DIR = os.path.join('/etc', 'openstack')
GLOBAL_CLOUDS_YAML = os.path.join(GLOBAL_OS_DIR, 'clouds.yaml')
CLOUD = {CLOUD_NAME: {'auth': {'auth_url': AUTH_URL,
'project_name': PROJECT_NAME,
'project_domain_name': PROJECT_DOMAIN_NAME,
'username': USER_NAME,
'user_domain_name': USER_DOMAIN_NAME,
'password': ADMIN_PASSWORD},
'region_name': REGION_NAME,
'identity_api_version': IDENTITY_API_VERSION}
}
def _create_clouds_yaml(clouds_yaml):
with open(clouds_yaml, 'w') as f:
yaml.dump({'clouds': {}}, f, default_flow_style=False)
os.chmod(clouds_yaml, 0o600)
def _read_clouds_yaml(clouds_yaml):
with open(clouds_yaml, 'r') as f:
clouds = yaml.safe_load(f)
if 'clouds' not in clouds:
clouds.update({'clouds': {}})
return clouds
def _write_clouds_yaml(clouds_yaml, clouds):
with open(clouds_yaml, 'w') as f:
yaml.dump(clouds, f, default_flow_style=False)
try:
# Get the uid and gid for the homedir
user_id = os.stat(HOME_DIR).st_uid
group_id = os.stat(HOME_DIR).st_gid
if not os.path.isdir(CONFIG_DIR):
os.makedirs(CONFIG_DIR)
os.chown(CONFIG_DIR, user_id, group_id)
if not os.path.isdir(OS_DIR):
os.makedirs(OS_DIR)
os.chown(OS_DIR, user_id, group_id)
if not os.path.isdir(GLOBAL_OS_DIR):
os.makedirs(GLOBAL_OS_DIR)
if not os.path.isfile(USER_CLOUDS_YAML):
_create_clouds_yaml(USER_CLOUDS_YAML)
if not os.path.isfile(GLOBAL_CLOUDS_YAML):
_create_clouds_yaml(GLOBAL_CLOUDS_YAML)
user_clouds = _read_clouds_yaml(USER_CLOUDS_YAML)
global_clouds = _read_clouds_yaml(GLOBAL_CLOUDS_YAML)
user_clouds['clouds'].update(CLOUD)
global_clouds['clouds'].update(CLOUD)
_write_clouds_yaml(USER_CLOUDS_YAML, user_clouds)
_write_clouds_yaml(GLOBAL_CLOUDS_YAML, global_clouds)
os.chown(USER_CLOUDS_YAML, user_id, group_id)
except Exception:
print('ERROR: Create clouds.yaml failed.')
raise
| apache-2.0 | -4,069,245,254,461,047,000 | 31.056818 | 74 | 0.621765 | false |
Tocknicsu/nctuoj | backend/test/api/bulletin/admin_cross.py | 1 | 2124 | #!/usr/bin/env python3
import sys
import requests
import json
import unittest
import datetime
from util import TestCase
import config
import common
class TestApiBulletinAdminCross(TestCase):
url = '%s/api/groups/1/bulletins/'%(config.base_url)
cross_url = None
token = common.get_user_info({'account': config.user_admin_account, 'passwd': config.user_admin_password})['token']
title = "Title test @ " + str(datetime.datetime.now())
content = "Content test @ " + str(datetime.datetime.now())
def get_cross_url(self):
if self.cross_url is None:
url = '%s/api/groups/2/bulletins/'%(config.base_url)
data = {
"token": self.token,
}
res = requests.get(url, data=data)
res.connection.close()
self.cross_url='%s/api/groups/1/bulletins/%s/'%(config.base_url, json.loads(res.text)['msg'][0]['id'])
return self.cross_url
def test_get(self):
data = {
"token": self.token,
}
res = requests.get(self.get_cross_url(), data=data)
res.connection.close()
expect_result = {
"status_code": 404,
"body": {
"msg": "Error bulletin id",
}
}
self.assertEqualR(res, expect_result)
def test_put(self):
data = {
"token": self.token,
"title": self.title,
"content": self.content,
}
res = requests.put(self.get_cross_url(), data=data)
res.connection.close()
expect_result = {
"status_code": 404,
"body": {
"msg": "Error bulletin id",
}
}
self.assertEqualR(res, expect_result)
def test_delete(self):
data = {
"token": self.token,
}
res = requests.get(self.get_cross_url(), data=data)
res.connection.close()
expect_result = {
"status_code": 404,
"body": {
"msg": "Error bulletin id",
}
}
self.assertEqualR(res, expect_result)
| mit | -9,101,086,923,572,818,000 | 28.915493 | 119 | 0.527778 | false |
JaviMerino/lisa | libs/utils/analysis/frequency_analysis.py | 1 | 24894 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import operator
from trappy.utils import listify
from devlib.utils.misc import memoized
from collections import namedtuple
from analysis_module import AnalysisModule
# Configure logging
import logging
NON_IDLE_STATE = 4294967295
ResidencyTime = namedtuple('ResidencyTime', ['total', 'active'])
ResidencyData = namedtuple('ResidencyData', ['label', 'residency'])
class FrequencyAnalysis(AnalysisModule):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(FrequencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_cpu_frequency_residency(self, cpu, total=True):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getCPUFrequencyResidency(cpu)
if not residency:
return None
if total:
return residency.total
return residency.active
def _dfg_cluster_frequency_residency(self, cluster, total=True):
"""
Get per-Cluster frequency residency, i.e. amount of time CLUSTER
`cluster` spent at each frequency.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getClusterFrequencyResidency(cluster)
if not residency:
return None
if total:
return residency.total
return residency.active
###############################################################################
# Plotting Methods
###############################################################################
def plotClusterFrequencies(self, title='Clusters Frequencies'):
"""
Plot frequency trend for all clusters. If sched_overutilized events are
available, the plots will also show the intervals of time where the
cluster was overutilized.
:param title: user-defined plot title
:type title: str
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
df = self._dfg_trace_event('cpu_frequency')
pd.options.mode.chained_assignment = None
# Extract LITTLE and big clusters frequencies
# and scale them to [MHz]
if len(self._platform['clusters']['little']):
lfreq = df[df.cpu == self._platform['clusters']['little'][-1]]
lfreq['frequency'] = lfreq['frequency']/1e3
else:
lfreq = []
if len(self._platform['clusters']['big']):
bfreq = df[df.cpu == self._platform['clusters']['big'][-1]]
bfreq['frequency'] = bfreq['frequency']/1e3
else:
bfreq = []
# Compute AVG frequency for LITTLE cluster
avg_lfreq = 0
if len(lfreq) > 0:
lfreq['timestamp'] = lfreq.index
lfreq['delta'] = (lfreq['timestamp'] -lfreq['timestamp'].shift()).fillna(0).shift(-1)
lfreq['cfreq'] = (lfreq['frequency'] * lfreq['delta']).fillna(0)
timespan = lfreq.iloc[-1].timestamp - lfreq.iloc[0].timestamp
avg_lfreq = lfreq['cfreq'].sum()/timespan
# Compute AVG frequency for big cluster
avg_bfreq = 0
if len(bfreq) > 0:
bfreq['timestamp'] = bfreq.index
bfreq['delta'] = (bfreq['timestamp'] - bfreq['timestamp'].shift()).fillna(0).shift(-1)
bfreq['cfreq'] = (bfreq['frequency'] * bfreq['delta']).fillna(0)
timespan = bfreq.iloc[-1].timestamp - bfreq.iloc[0].timestamp
avg_bfreq = bfreq['cfreq'].sum()/timespan
pd.options.mode.chained_assignment = 'warn'
# Setup a dual cluster plot
fig, pltaxes = plt.subplots(2, 1, figsize=(16, 8))
plt.suptitle(title, y=.97, fontsize=16, horizontalalignment='center')
# Plot Cluster frequencies
axes = pltaxes[0]
axes.set_title('big Cluster')
if avg_bfreq > 0:
axes.axhline(avg_bfreq, color='r', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['big'][0] - 100000)/1e3,
(self._platform['freqs']['big'][-1] + 100000)/1e3
)
if len(bfreq) > 0:
bfreq['frequency'].plot(style=['r-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO big CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
axes.set_xticklabels([])
axes.set_xlabel('')
self._trace.analysis.status.plotOverutilized(axes)
axes = pltaxes[1]
axes.set_title('LITTLE Cluster')
if avg_lfreq > 0:
axes.axhline(avg_lfreq, color='b', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['little'][0] - 100000)/1e3,
(self._platform['freqs']['little'][-1] + 100000)/1e3
)
if len(lfreq) > 0:
lfreq['frequency'].plot(style=['b-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO LITTLE CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
self._trace.analysis.status.plotOverutilized(axes)
# Save generated plots into datadir
figname = '{}/{}cluster_freqs.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix)
pl.savefig(figname, bbox_inches='tight')
logging.info('LITTLE cluster average frequency: %.3f GHz',
avg_lfreq/1e3)
logging.info('big cluster average frequency: %.3f GHz',
avg_bfreq/1e3)
return (avg_lfreq/1e3, avg_bfreq/1e3)
def plotCPUFrequencyResidency(self, cpus=None, pct=False, active=False):
"""
Plot per-CPU frequency residency. big CPUs are plotted first and then
LITTLEs.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param cpus: List of cpus. By default plot all CPUs
:type cpus: list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
if cpus is None:
# Generate plots only for available CPUs
cpufreq_data = self._dfg_trace_event('cpu_frequency')
_cpus = range(cpufreq_data.cpu.max()+1)
else:
_cpus = listify(cpus)
# Split between big and LITTLE CPUs ordered from higher to lower ID
_cpus.reverse()
big_cpus = [c for c in _cpus if c in self._platform['clusters']['big']]
little_cpus = [c for c in _cpus if c in
self._platform['clusters']['little']]
_cpus = big_cpus + little_cpus
# Precompute active and total time for each CPU
residencies = []
xmax = 0.0
for cpu in _cpus:
res = self._getCPUFrequencyResidency(cpu)
residencies.append(ResidencyData('CPU{}'.format(cpu), res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cpu', xmax, pct, active)
def plotClusterFrequencyResidency(self, clusters=None,
pct=False, active=False):
"""
Plot the frequency residency in a given cluster, i.e. the amount of
time cluster `cluster` spent at frequency `f_i`. By default, both 'big'
and 'LITTLE' clusters data are plotted.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param clusters: name of the clusters to be plotted (all of them by
default)
:type clusters: str ot list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU
if not self._trace.freq_coherency:
logging.warn('Cluster frequency is not coherent, plot DISABLED!')
return
# Sanitize clusters
if clusters is None:
_clusters = self._platform['clusters'].keys()
else:
_clusters = listify(clusters)
# Precompute active and total time for each cluster
residencies = []
xmax = 0.0
for cluster in _clusters:
res = self._getClusterFrequencyResidency(
self._platform['clusters'][cluster.lower()])
residencies.append(ResidencyData('{} Cluster'.format(cluster),
res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cluster', xmax, pct, active)
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getCPUActiveSignal(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time,
i.e.:
cpu_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cpu_active[t] == 0 otherwise
:param cpu: CPU ID
:type cpu: int
"""
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'cannot compute CPU active signal!')
return None
idle_df = self._dfg_trace_event('cpu_idle')
cpu_df = idle_df[idle_df.cpu_id == cpu]
cpu_active = cpu_df.state.apply(
lambda s: 1 if s == NON_IDLE_STATE else 0
)
start_time = 0.0
if not self._trace.ftrace.normalized_time:
start_time = self._trace.ftrace.basetime
if cpu_active.index[0] != start_time:
entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time])
cpu_active = pd.concat([entry_0, cpu_active])
return cpu_active
@memoized
def _getClusterActiveSignal(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster
time, i.e.:
cluster_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cluster_active[t] == 0 otherwise
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
"""
cpu_active = {}
for cpu in cluster:
cpu_active[cpu] = self._getCPUActiveSignal(cpu)
active = pd.DataFrame(cpu_active)
active.fillna(method='ffill', inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.iteritems()]
)
return cluster_active
@memoized
def _getClusterFrequencyResidency(self, cluster):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
:raises: KeyError
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, '
'frequency residency computation not possible!')
return None
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'frequency residency computation not possible!')
return None
if isinstance(cluster, str):
try:
_cluster = self._platform['clusters'][cluster.lower()]
except KeyError:
logging.warn('%s cluster not found!', cluster)
return None
else:
_cluster = listify(cluster)
freq_df = self._dfg_trace_event('cpu_frequency')
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU. This assumption is verified
# by the Trace module when parsing the trace.
if len(_cluster) > 1 and not self._trace.freq_coherency:
logging.warn('Cluster frequency is NOT coherent,'
'cannot compute residency!')
return None
cluster_freqs = freq_df[freq_df.cpu == _cluster[0]]
# Compute TOTAL Time
time_intervals = cluster_freqs.index[1:] - cluster_freqs.index[:-1]
total_time = pd.DataFrame({
'time': time_intervals,
'frequency': [f/1000.0 for f in cluster_freqs.iloc[:-1].frequency]
})
total_time = total_time.groupby(['frequency']).sum()
# Compute ACTIVE Time
cluster_active = self._getClusterActiveSignal(_cluster)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
new_idx = sorted(cluster_freqs.index.tolist() +
cluster_active.index.tolist())
cluster_freqs = cluster_freqs.reindex(new_idx, method='ffill')
cluster_active = cluster_active.reindex(new_idx, method='ffill')
nonidle_time = []
for f in available_freqs:
freq_active = cluster_freqs.frequency.apply(
lambda x: 1 if x == f else 0
)
active_t = cluster_active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(self._trace.integrate_square_wave(active_t))
active_time = pd.DataFrame({'time': nonidle_time},
index=[f/1000.0 for f in available_freqs])
active_time.index.name = 'frequency'
return ResidencyTime(total_time, active_time)
def _getCPUFrequencyResidency(self, cpu):
"""
Get a DataFrame with per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency. Both total and active times
will be computed.
:param cpu: CPU ID
:type cpu: int
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
"""
return self._getClusterFrequencyResidency(cpu)
def _plotFrequencyResidencyAbs(self, axes, residency, n_plots,
is_first, is_last, xmax, title=''):
"""
Private method to generate frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency: tuple of total and active time dataframes
:type residency: namedtuple(ResidencyTime)
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_last: if True this is the last plot
:type is_last: bool
:param xmax: x-axes higher bound
:param xmax: double
:param title: title of this subplot
:type title: str
"""
yrange = 0.4 * max(6, len(residency.total)) * n_plots
residency.total.plot.barh(ax=axes, color='g',
legend=False, figsize=(16, yrange))
residency.active.plot.barh(ax=axes, color='r',
legend=False, figsize=(16, yrange))
axes.set_xlim(0, 1.05*xmax)
axes.set_ylabel('Frequency [MHz]')
axes.set_title(title)
axes.grid(True)
if is_last:
axes.set_xlabel('Time [s]')
else:
axes.set_xticklabels([])
if is_first:
# Put title on top of the figure. As of now there is no clean way
# to make the title appear always in the same position in the
# figure because figure heights may vary between different
# platforms (different number of OPPs). Hence, we use annotation
legend_y = axes.get_ylim()[1]
axes.annotate('OPP Residency Time', xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes.annotate('GREEN: Total', xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
color='g', fontsize=14)
axes.annotate('RED: Active', xy=(0, legend_y),
xytext=(50, 25), textcoords='offset points',
color='r', fontsize=14)
def _plotFrequencyResidencyPct(self, axes, residency_df, label,
n_plots, is_first, is_last, res_type):
"""
Private method to generate PERCENTAGE frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency_df: residency time dataframe
:type residency_df: :mod:`pandas.DataFrame`
:param label: label to be used for percentage residency dataframe
:type label: str
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_first: if True this is the last plot
:type is_first: bool
:param res_type: type of residency, either TOTAL or ACTIVE
:type title: str
"""
# Compute sum of the time intervals
duration = residency_df.time.sum()
residency_pct = pd.DataFrame(
{label: residency_df.time.apply(lambda x: x*100/duration)},
index=residency_df.index
)
yrange = 3 * n_plots
residency_pct.T.plot.barh(ax=axes, stacked=True, figsize=(16, yrange))
axes.legend(loc='lower center', ncol=7)
axes.set_xlim(0, 100)
axes.grid(True)
if is_last:
axes.set_xlabel('Residency [%]')
else:
axes.set_xticklabels([])
if is_first:
legend_y = axes.get_ylim()[1]
axes.annotate('OPP {} Residency Time'.format(res_type),
xy=(0, legend_y), xytext=(-50, 35),
textcoords='offset points', fontsize=18)
def _plotFrequencyResidency(self, residencies, entity_name, xmax,
pct, active):
"""
Generate Frequency residency plots for the given entities.
:param residencies:
:type residencies: namedtuple(ResidencyData) - tuple containing:
1) as first element, a label to be used as subplot title
2) as second element, a namedtuple(ResidencyTime)
:param entity_name: name of the entity ('cpu' or 'cluster') used in the
figure name
:type entity_name: str
:param xmax: upper bound of x-axes
:type xmax: double
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
n_plots = len(residencies)
gs = gridspec.GridSpec(n_plots, 1)
fig = plt.figure()
figtype = ""
for idx, data in enumerate(residencies):
if data.residency is None:
plt.close(fig)
return
axes = fig.add_subplot(gs[idx])
is_first = idx == 0
is_last = idx+1 == n_plots
if pct and active:
self._plotFrequencyResidencyPct(axes, data.residency.active,
data.label, n_plots,
is_first, is_last,
'ACTIVE')
figtype = "_pct_active"
continue
if pct:
self._plotFrequencyResidencyPct(axes, data.residency.total,
data.label, n_plots,
is_first, is_last,
'TOTAL')
figtype = "_pct_total"
continue
self._plotFrequencyResidencyAbs(axes, data.residency,
n_plots, is_first,
is_last, xmax,
title=data.label)
figname = '{}/{}{}_freq_residency{}.png'\
.format(self._trace.plots_dir,
self._trace.plots_prefix,
entity_name, figtype)
pl.savefig(figname, bbox_inches='tight')
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 | -7,015,758,231,730,057,000 | 37.180982 | 98 | 0.555676 | false |
cburmeister/flask-bones | app/commands.py | 1 | 1163 | from faker import Faker
import click
from app.database import db
from app.user.models import User
@click.option('--num_users', default=5, help='Number of users.')
def populate_db(num_users):
"""Populates the database with seed data."""
fake = Faker()
users = []
for _ in range(num_users):
users.append(
User(
username=fake.user_name(),
email=fake.email(),
password=fake.word() + fake.word(),
remote_addr=fake.ipv4()
)
)
users.append(
User(
username='cburmeister',
email='[email protected]',
password='test123',
remote_addr=fake.ipv4(),
active=True,
is_admin=True
)
)
for user in users:
db.session.add(user)
db.session.commit()
def create_db():
"""Creates the database."""
db.create_all()
def drop_db():
"""Drops the database."""
if click.confirm('Are you sure?', abort=True):
db.drop_all()
def recreate_db():
"""Same as running drop_db() and create_db()."""
drop_db()
create_db()
| mit | -36,645,924,411,596,520 | 21.803922 | 64 | 0.536543 | false |
rbuffat/pyidf | tests/test_shadingsite.py | 1 | 2163 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.thermal_zones_and_surfaces import ShadingSite
log = logging.getLogger(__name__)
class TestShadingSite(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_shadingsite(self):
pyidf.validation_level = ValidationLevel.error
obj = ShadingSite()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_azimuth_angle = 180.0
obj.azimuth_angle = var_azimuth_angle
# real
var_tilt_angle = 90.0
obj.tilt_angle = var_tilt_angle
# real
var_starting_x_coordinate = 4.4
obj.starting_x_coordinate = var_starting_x_coordinate
# real
var_starting_y_coordinate = 5.5
obj.starting_y_coordinate = var_starting_y_coordinate
# real
var_starting_z_coordinate = 6.6
obj.starting_z_coordinate = var_starting_z_coordinate
# real
var_length = 7.7
obj.length = var_length
# real
var_height = 8.8
obj.height = var_height
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.shadingsites[0].name, var_name)
self.assertAlmostEqual(idf2.shadingsites[0].azimuth_angle, var_azimuth_angle)
self.assertAlmostEqual(idf2.shadingsites[0].tilt_angle, var_tilt_angle)
self.assertAlmostEqual(idf2.shadingsites[0].starting_x_coordinate, var_starting_x_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_y_coordinate, var_starting_y_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_z_coordinate, var_starting_z_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].length, var_length)
self.assertAlmostEqual(idf2.shadingsites[0].height, var_height) | apache-2.0 | -7,739,266,213,189,500,000 | 31.787879 | 101 | 0.645862 | false |
nlgcoin/guldencoin-official | test/functional/interface_rest.py | 2 | 14644 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import BLOCK_HEADER_SIZE
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (GuldenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Gulden to node 1")
# Random address so node1's balance doesn't increase
not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ"
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, 102) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), BLOCK_HEADER_SIZE)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
| mit | -9,178,108,057,652,545,000 | 43.646341 | 153 | 0.631931 | false |
mitmedialab/MediaCloud-Web-Tools | server/views/topics/topiccreate.py | 1 | 3535 | import logging
from flask import jsonify, request
import flask_login
import mediacloud.error
from server import app, mc
from server.auth import user_mediacloud_client
from server.util.request import form_fields_required, api_error_handler, json_error_response, arguments_required
from server.views.topics.topic import topic_summary
logger = logging.getLogger(__name__)
VERSION_1 = 1
COLLECTION_US_TOP_ONLINE = 58722749
@app.route('/api/topics/create', methods=['PUT'])
@flask_login.login_required
@form_fields_required('name', 'description', 'solr_seed_query', 'start_date', 'end_date')
@api_error_handler
def topic_create():
user_mc = user_mediacloud_client()
name = request.form['name']
description = request.form['description']
solr_seed_query = request.form['solr_seed_query']
start_date = request.form['start_date']
end_date = request.form['end_date']
optional_args = {
'max_iterations': request.form['max_iterations'] if 'max_iterations' in request.form and request.form['max_iterations'] != 'null' else None,
'max_stories': request.form['max_stories'] if 'max_stories' in request.form and request.form['max_stories'] != 'null' else flask_login.current_user.profile['limits']['max_topic_stories'],
}
try:
topic_result = user_mc.topicCreate(name=name, description=description, solr_seed_query=solr_seed_query,
start_date=start_date, end_date=end_date,
media_tags_ids=[COLLECTION_US_TOP_ONLINE], # HACK: can't save without one of these in place (for now)
**optional_args,
)['topics'][0]
topics_id = topic_result['topics_id']
logger.info("Created new topic \"{}\" as {}".format(name, topics_id))
# if this includes any of the US-centric collections, add the retweet partisanship subtopic by default
# client will either make a empty snapshot, or a spidering one
return topic_summary(topics_id)
except mediacloud.error.MCException as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(e.message, e.status_code)
except Exception as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(str(e), 500)
@app.route('/api/topics/name-exists', methods=['GET'])
@flask_login.login_required
@arguments_required('searchStr')
@api_error_handler
def topic_name_exists():
# Check if topic with name exists already
# Have to do this in a unique method, instead of in topic_search because we need to use an admin connection
# to media cloud to list all topics, but we don't want to return topics a user can't see to them.
# :return: boolean indicating if topic with this name exists for not (case insensive check)
search_str = request.args['searchStr']
topics_id = int(request.args['topicId']) if 'topicId' in request.args else None
matching_topics = mc.topicList(name=search_str, limit=15)
if topics_id:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']
if t['topics_id'] != topics_id]
else:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']]
name_in_use = search_str.lower() in matching_topic_names
return jsonify({'nameInUse': name_in_use})
| apache-2.0 | -6,057,934,450,072,376,000 | 48.788732 | 195 | 0.660255 | false |
jeffsilverm/presentation | whats_new_in_python_3.6/type_hints_complicated.py | 1 | 1308 | #! /usr/bin/python3.6
# -*- coding: utf-8 -*-
import time
import sys
assert sys.version_info.major == 3 and sys.version_info.minor == 6, "Not running python 3.6, running {}".format(
sys.version_info)
class A(object):
def __init__(self, instance_mark) -> None:
self.instance_mark_A = instance_mark
def af_A(self, input):
return input * 2
def afo_A(self, input):
return input * 4
class AA(A):
def __init__(self, instance_marker) -> None:
super()
self.instance_marker = instance_marker
def aaf_AA(self, method_input):
return method_input * 20
def afo_A(self, method_input):
return method_input ** 2
class B(object):
def __init__(self):
pass
def bf_B(self, method_input):
return method_input * 9
a = A("marker a")
aa = AA("marker aa")
print("a.af_A(4) ", a.af_A(4))
print("a.afo_A(4) ", a.afo_A(4))
print("aa.aaf_AA(4) ", aa.aaf_AA(4))
print("aa.afo_A(4) ", aa.afo_A(4))
print("a.af_A('4') ", a.af_A('4'))
print("a.afo_A('4') ", a.afo_A('4'))
print("aa.aaf_AA('4') ", aa.aaf_AA('4'), flush=True)
try:
print("aa.afo_A('4') ", aa.afo_A('4'))
except TypeError as t:
time.sleep(1)
print("Exception TypeError was raised, as expected, when calling aa.afo_A('4'))", file=sys.stderr)
| gpl-2.0 | 859,038,877,987,351,700 | 22.357143 | 112 | 0.58104 | false |
Crowdcomputer/CC | crowdcomputer/init_db.py | 1 | 1613 | '''
Created on Nov 26, 2012
@author: stefanotranquillini
'''
from django.contrib.auth.models import User, Group
from rest_framework.authtoken.models import Token
from general.models import Application
from uuid import uuid4
def init():
initAppsAndCC()
def initAppsAndCC():
try:
user, c = User.objects.get_or_create(username='crowdcomputer',email="[email protected]",password="this.is.spam")
user.save()
print "%s %s"%(user.username,c)
app, c = Application.objects.get_or_create(name="crowdcomputer",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
app.save()
print "%s %s" %(app.name, app.token)
app, c = Application.objects.get_or_create(name="bpmn",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
print "%s %s" %(app.name, app.token)
app.save()
bpmn, c = Group.objects.get_or_create(name='bpmn')
bpmn.save()
except Exception, e:
print e
print 'exception'
def createAdmin(username,password,email):
try:
admin, c = User.objects.get_or_create(email=email)
if c:
admin.set_password(password)
admin.username=username
admin.is_superuser = True
admin.is_staff = True
admin.save()
print 'creato'
else:
admin.set_password(password)
admin.save()
print 'aggiornato'
except Exception:
print 'exception'
| apache-2.0 | -2,273,612,954,416,833,300 | 28.345455 | 126 | 0.588965 | false |
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/matrixlib/defmatrix.py | 1 | 34262 | from __future__ import division, absolute_import, print_function
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
from numpy.core.numerictypes import issubdtype
# make translation table
_numchars = '0123456789.-+jeEL'
if sys.version_info[0] >= 3:
class _NumCharTable:
def __getitem__(self, i):
if chr(i) in _numchars:
return chr(i)
else:
return None
_table = _NumCharTable()
def _eval(astr):
str_ = astr.translate(_table)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
else:
_table = [None] * 256
for k in range(256):
_table[k] = chr(k)
_table = ''.join(_table)
_todelete = []
for k in _table:
if k not in _numchars:
_todelete.append(k)
_todelete = ''.join(_todelete)
del k
def _eval(astr):
str_ = astr.translate(_table, _todelete)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
def _convert_from_string(data):
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError("Rows not the same size.")
count += 1
newdata.append(newrow)
return newdata
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
Unlike `matrix`, `asmatrix` does not make a copy if the input is already
a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
Parameters
----------
data : array_like
Input data.
dtype : data-type
Data-type of the output matrix.
Returns
-------
mat : matrix
`data` interpreted as a matrix.
Examples
--------
>>> x = np.array([[1, 2], [3, 4]])
>>> m = np.asmatrix(x)
>>> x[0,0] = 5
>>> m
matrix([[5, 2],
[3, 4]])
"""
return matrix(data, dtype=dtype, copy=False)
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n == 0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n < 0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n - 1):
result = N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t - q - 1] == '0':
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q + 1, t):
Z = N.dot(Z, Z)
if beta[t - k - 1] == '1':
result = N.dot(result, Z)
return result
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
(matrix multiplication) and ``**`` (matrix power).
Parameters
----------
data : array_like or string
If `data` is a string, it is interpreted as a matrix with commas
or spaces separating columns, and semicolons separating rows.
dtype : data-type
Data-type of the output matrix.
copy : bool
If `data` is already an `ndarray`, then this flag determines
whether the data is copied (the default), or whether a view is
constructed.
See Also
--------
array
Examples
--------
>>> a = np.matrix('1 2; 3 4')
>>> print(a)
[[1 2]
[3 4]]
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
[3, 4]])
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy:
return new.copy()
else:
return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
newshape = self.shape
if ndim == 0:
self.shape = (1, 1)
elif ndim == 1:
self.shape = (1, newshape[0])
return
def __getitem__(self, index):
self._getitem = True
try:
out = N.ndarray.__getitem__(self, index)
finally:
self._getitem = False
if not isinstance(out, N.ndarray):
return out
if out.ndim == 0:
return out[()]
if out.ndim == 1:
sh = out.shape[0]
# Determine when we should have a column array
try:
n = len(index)
except:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
else:
out.shape = (1, sh)
return out
def __mul__(self, other):
if isinstance(other, (N.ndarray, list, tuple)):
# This promotes 1-D vectors to row vectors
return N.dot(self, asmatrix(other))
if isscalar(other) or not hasattr(other, '__rmul__'):
return N.dot(self, other)
return NotImplemented
def __rmul__(self, other):
return N.dot(other, self)
def __imul__(self, other):
self[:] = self * other
return self
def __pow__(self, other):
return matrix_power(self, other)
def __ipow__(self, other):
self[:] = self ** other
return self
def __rpow__(self, other):
return NotImplemented
def __repr__(self):
s = repr(self.__array__()).replace('array', 'matrix')
# now, 'matrix' has 6 letters, and 'array' 5, so the columns don't
# line up anymore. We need to add a space.
l = s.splitlines()
for i in range(1, len(l)):
if l[i]:
l[i] = ' ' + l[i]
return '\n'.join(l)
def __str__(self):
return str(self.__array__())
def _align(self, axis):
"""A convenience function for operations that need to preserve axis
orientation.
"""
if axis is None:
return self[0, 0]
elif axis == 0:
return self
elif axis == 1:
return self.transpose()
else:
raise ValueError("unsupported axis")
def _collapse(self, axis):
"""A convenience function for operations that want to collapse
to a scalar like _align, but are using keepdims=True
"""
if axis is None:
return self[0, 0]
else:
return self
# Necessary because base-class tolist expects dimension
# reduction by x[0]
def tolist(self):
"""
Return the matrix as a (possibly nested) list.
See `ndarray.tolist` for full documentation.
See Also
--------
ndarray.tolist
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.tolist()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self.__array__().tolist()
# To preserve orientation of result...
def sum(self, axis=None, dtype=None, out=None):
"""
Returns the sum of the matrix elements, along the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum
Notes
-----
This is the same as `ndarray.sum`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix([[1, 2], [4, 3]])
>>> x.sum()
10
>>> x.sum(axis=1)
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
matrix([[ 3.],
[ 7.]])
>>> out = np.zeros((1, 2), dtype='float')
>>> x.sum(axis=1, dtype='float', out=out)
matrix([[ 3.],
[ 7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
# To update docstring from array to matrix...
def squeeze(self, axis=None):
"""
Return a possibly reshaped matrix.
Refer to `numpy.squeeze` for more documentation.
Parameters
----------
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one,
an error is raised.
Returns
-------
squeezed : matrix
The matrix, but as a (1, N) matrix if it had shape (N, 1).
See Also
--------
numpy.squeeze : related function
Notes
-----
If `m` has a single column then that column is returned
as the single row of a matrix. Otherwise `m` is returned.
The returned matrix is always either `m` itself or a view into `m`.
Supplying an axis keyword argument will not affect the returned matrix
but it may cause an error to be raised.
Examples
--------
>>> c = np.matrix([[1], [2]])
>>> c
matrix([[1],
[2]])
>>> c.squeeze()
matrix([[1, 2]])
>>> r = c.T
>>> r
matrix([[1, 2]])
>>> r.squeeze()
matrix([[1, 2]])
>>> m = np.matrix([[1, 2], [3, 4]])
>>> m.squeeze()
matrix([[1, 2],
[3, 4]])
"""
return N.ndarray.squeeze(self, axis=axis)
# To update docstring from array to matrix...
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the matrix elements along the given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean
Notes
-----
Same as `ndarray.mean` except that, where that returns an `ndarray`,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.mean()
5.5
>>> x.mean(0)
matrix([[ 4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
[ 9.5]])
"""
return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
def std(self, axis=None, dtype=None, out=None, ddof=0):
"""
Return the standard deviation of the array elements along the given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std
Notes
-----
This is the same as `ndarray.std`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
3.4520525295346629
>>> x.std(0)
matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
[ 1.11803399]])
"""
return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def var(self, axis=None, dtype=None, out=None, ddof=0):
"""
Returns the variance of the matrix elements, along the given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var
Notes
-----
This is the same as `ndarray.var`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.var()
11.916666666666666
>>> x.var(0)
matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
>>> x.var(1)
matrix([[ 1.25],
[ 1.25],
[ 1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Refer to `prod` for full documentation.
See Also
--------
prod, ndarray.prod
Notes
-----
Same as `ndarray.prod`, except, where that returns an `ndarray`, this
returns a `matrix` object instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.prod()
0
>>> x.prod(0)
matrix([[ 0, 45, 120, 231]])
>>> x.prod(1)
matrix([[ 0],
[ 840],
[7920]])
"""
return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
def any(self, axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Refer to `numpy.any` for full documentation.
Parameters
----------
axis : int, optional
Axis along which logical OR is performed
out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
Returns
-------
any : bool, ndarray
Returns a single bool if `axis` is ``None``; otherwise,
returns `ndarray`
"""
return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
def all(self, axis=None, out=None):
"""
Test whether all matrix elements along a given axis evaluate to True.
Parameters
----------
See `numpy.all` for complete descriptions
See Also
--------
numpy.all
Notes
-----
This is the same as `ndarray.all`, but it returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = x[0]; y
matrix([[0, 1, 2, 3]])
>>> (x == y)
matrix([[ True, True, True, True],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
>>> (x == y).all()
False
>>> (x == y).all(0)
matrix([[False, False, False, False]], dtype=bool)
>>> (x == y).all(1)
matrix([[ True],
[False],
[False]], dtype=bool)
"""
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
def max(self, axis=None, out=None):
"""
Return the maximum value along an axis.
Parameters
----------
See `amax` for complete descriptions
See Also
--------
amax, ndarray.max
Notes
-----
This is the same as `ndarray.max`, but returns a `matrix` object
where `ndarray.max` would return an ndarray.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.max()
11
>>> x.max(0)
matrix([[ 8, 9, 10, 11]])
>>> x.max(1)
matrix([[ 3],
[ 7],
[11]])
"""
return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
def argmax(self, axis=None, out=None):
"""
Indexes of the maximum values along an axis.
Return the indexes of the first occurrences of the maximum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmax` for complete descriptions
See Also
--------
numpy.argmax
Notes
-----
This is the same as `ndarray.argmax`, but returns a `matrix` object
where `ndarray.argmax` would return an `ndarray`.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.argmax()
11
>>> x.argmax(0)
matrix([[2, 2, 2, 2]])
>>> x.argmax(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmax(self, axis, out)._align(axis)
def min(self, axis=None, out=None):
"""
Return the minimum value along an axis.
Parameters
----------
See `amin` for complete descriptions.
See Also
--------
amin, ndarray.min
Notes
-----
This is the same as `ndarray.min`, but returns a `matrix` object
where `ndarray.min` would return an ndarray.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.min()
-11
>>> x.min(0)
matrix([[ -8, -9, -10, -11]])
>>> x.min(1)
matrix([[ -3],
[ -7],
[-11]])
"""
return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
def argmin(self, axis=None, out=None):
"""
Indexes of the minimum values along an axis.
Return the indexes of the first occurrences of the minimum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmin` for complete descriptions.
See Also
--------
numpy.argmin
Notes
-----
This is the same as `ndarray.argmin`, but returns a `matrix` object
where `ndarray.argmin` would return an `ndarray`.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.argmin()
11
>>> x.argmin(0)
matrix([[2, 2, 2, 2]])
>>> x.argmin(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmin(self, axis, out)._align(axis)
def ptp(self, axis=None, out=None):
"""
Peak-to-peak (maximum - minimum) value along the given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp
Notes
-----
Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.ptp()
11
>>> x.ptp(0)
matrix([[8, 8, 8, 8]])
>>> x.ptp(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self))
def getA(self):
"""
Return `self` as an `ndarray` object.
Equivalent to ``np.asarray(self)``.
Parameters
----------
None
Returns
-------
ret : ndarray
`self` as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA()
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
"""
return self.__array__()
def getA1(self):
"""
Return `self` as a flattened `ndarray`.
Equivalent to ``np.asarray(x).ravel()``
Parameters
----------
None
Returns
-------
ret : ndarray
`self`, 1-D, as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return self.__array__().ravel()
def ravel(self, order='C'):
"""
Return a flattened matrix.
Refer to `numpy.ravel` for more documentation.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `m` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
ret : matrix
Return the matrix flattened to shape `(1, N)` where `N`
is the number of elements in the original matrix.
A copy is made only if necessary.
See Also
--------
matrix.flatten : returns a similar output matrix but always a copy
matrix.flat : a flat iterator on the array.
numpy.ravel : related function which returns an ndarray
"""
return N.ndarray.ravel(self, order=order)
def getT(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
def getH(self):
"""
Returns the (complex) conjugate transpose of `self`.
Equivalent to ``np.transpose(self)`` if `self` is real-valued.
Parameters
----------
None
Returns
-------
ret : matrix object
complex conjugate transpose of `self`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4)))
>>> z = x - 1j*x; z
matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
[ 1. +1.j, 5. +5.j, 9. +9.j],
[ 2. +2.j, 6. +6.j, 10.+10.j],
[ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
return self.transpose().conjugate()
else:
return self.transpose()
T = property(getT, None)
A = property(getA, None)
A1 = property(getA1, None)
H = property(getH, None)
I = property(getI, None)
def _from_string(str, gdict, ldict):
rows = str.split(';')
rowtup = []
for row in rows:
trow = row.split(',')
newrow = []
for x in trow:
newrow.extend(x.split())
trow = newrow
coltup = []
for col in trow:
col = col.strip()
try:
thismat = ldict[col]
except KeyError:
try:
thismat = gdict[col]
except KeyError:
raise KeyError("%s not found" % (col,))
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
return concatenate(rowtup, axis=0)
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
Parameters
----------
obj : str or array_like
Input data. Names of variables in the current scope may be
referenced, even if `obj` is a string.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
Ignored if `obj` is not a string or `gdict` is `None`.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
Returns
-------
out : matrix
Returns a matrix object, which is a specialized 2-D array.
See Also
--------
matrix
Examples
--------
>>> A = np.mat('1 1; 1 1')
>>> B = np.mat('2 2; 2 2')
>>> C = np.mat('3 4; 5 6')
>>> D = np.mat('7 8; 9 0')
All the following expressions construct the same block matrix:
>>> np.bmat([[A, B], [C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat('A,B; C,D')
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
"""
if isinstance(obj, str):
if gdict is None:
# get previous frame
frame = sys._getframe().f_back
glob_dict = frame.f_globals
loc_dict = frame.f_locals
else:
glob_dict = gdict
loc_dict = ldict
return matrix(_from_string(obj, glob_dict, loc_dict))
if isinstance(obj, (tuple, list)):
# [[A,B],[C,D]]
arr_rows = []
for row in obj:
if isinstance(row, N.ndarray): # not 2-d
return matrix(concatenate(obj, axis=-1))
else:
arr_rows.append(concatenate(row, axis=-1))
return matrix(concatenate(arr_rows, axis=0))
if isinstance(obj, N.ndarray):
return matrix(obj)
mat = asmatrix
| mit | -4,329,534,605,734,874,600 | 26.38769 | 89 | 0.471222 | false |
bloyl/mne-python | mne/__init__.py | 1 | 5917 | """MNE software for MEG and EEG data analysis."""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.devN' where N is an integer.
#
from ._version import __version__
# have to import verbose first since it's needed by many things
from .utils import (set_log_level, set_log_file, verbose, set_config,
get_config, get_config_path, set_cache_dir,
set_memmap_min_size, grand_average, sys_info, open_docs)
from .io.pick import (pick_types, pick_channels,
pick_channels_regexp, pick_channels_forward,
pick_types_forward, pick_channels_cov,
pick_channels_evoked, pick_info,
channel_type, channel_indices_by_type)
from .io.base import concatenate_raws
from .io.meas_info import create_info, Info
from .io.proj import Projection
from .io.kit import read_epochs_kit
from .io.eeglab import read_epochs_eeglab
from .io.reference import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from .io.what import what
from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_surfaces, write_bem_surfaces, write_head_bem,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance,
compute_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps, AcqParserFIF)
from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, Forward,
write_forward_solution, make_forward_solution,
convert_forward_solution, make_field_map,
make_forward_dipole, use_coil_def)
from .source_estimate import (read_source_estimate,
SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, VolVectorSourceEstimate,
MixedSourceEstimate, MixedVectorSourceEstimate,
grade_to_tris,
spatial_src_adjacency,
spatial_tris_adjacency,
spatial_dist_adjacency,
spatial_inter_hemi_adjacency,
spatio_temporal_src_adjacency,
spatio_temporal_tris_adjacency,
spatio_temporal_dist_adjacency,
extract_label_time_course, stc_near_sensors)
from .surface import (read_surface, write_surface, decimate_surface, read_tri,
get_head_surf, get_meg_helmet_surf, dig_mri_distances,
marching_cubes, voxel_neighbors)
from .morph_map import read_morph_map
from .morph import (SourceMorph, read_source_morph, grade_to_vertices,
compute_source_morph)
from .source_space import (read_source_spaces, vertex_to_mni,
head_to_mni, head_to_mri, read_talxfm,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
add_source_space_distances, morph_source_spaces,
get_volume_labels_from_aseg,
get_volume_labels_from_src, read_freesurfer_lut)
from .annotations import (Annotations, read_annotations, annotations_from_events,
events_from_annotations)
from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs,
concatenate_epochs, make_fixed_length_epochs)
from .evoked import (Evoked, EvokedArray, read_evokeds, write_evokeds,
combine_evoked)
from .label import (read_label, label_sign_flip,
write_label, stc_to_label, grow_labels, Label, split_label,
BiHemiLabel, read_labels_from_annot, write_labels_to_annot,
random_parcellation, morph_labels, labels_to_stc)
from .misc import parse_config, read_reject_parameters
from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
scale_source_space)
from .transforms import (read_trans, write_trans,
transform_surface_to, Transform)
from .proj import (read_proj, write_proj, compute_proj_epochs,
compute_proj_evoked, compute_proj_raw, sensitivity_map)
from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole
from .channels import (equalize_channels, rename_channels, find_layout,
read_vectorview_selection)
from .report import Report, open_report
from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff
from .rank import compute_rank
from . import beamformer
from . import channels
from . import chpi
from . import commands
from . import connectivity
from . import coreg
from . import cuda
from . import datasets
from . import dipole
from . import epochs
from . import event
from . import externals
from . import io
from . import filter
from . import gui
from . import inverse_sparse
from . import minimum_norm
from . import preprocessing
from . import simulation
from . import stats
from . import surface
from . import time_frequency
from . import viz
from . import decoding
from . import export
# initialize logging
set_log_level(None, False)
set_log_file()
| bsd-3-clause | -6,323,858,588,769,140,000 | 44.515385 | 81 | 0.630218 | false |
Septima/qgis-qlrbrowser | src/QlrBrowser/mysettings/qgissettingmanager/types/bool.py | 1 | 3112 | #-----------------------------------------------------------
#
# QGIS setting manager is a python module to easily manage read/write
# settings and set/get corresponding widgets.
#
# Copyright : (C) 2013 Denis Rouzaud
# Email : [email protected]
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this progsram; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt5.QtWidgets import QCheckBox
from qgis.core import QgsProject
from ..setting import Setting
from ..setting_widget import SettingWidget
from ..setting_manager import Debug
class Bool(Setting):
def __init__(self, name, scope, default_value, options={}):
Setting.__init__(self, name, scope, default_value, bool, QgsProject.instance().readBoolEntry, QgsProject.instance().writeEntryBool, options)
def check(self, value):
if type(value) != bool:
raise NameError("Setting %s must be a boolean." % self.name)
def config_widget(self, widget):
if type(widget) == QCheckBox:
return CheckBoxBoolWidget(self, widget, self.options)
elif hasattr(widget, "isCheckable") and widget.isCheckable():
return CheckableBoolWidget(self, widget, self.options)
else:
print(type(widget))
raise NameError("SettingManager does not handle %s widgets for booleans at the moment (setting: %s)" %
(type(widget), self.name))
class CheckBoxBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.stateChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
if Debug:
print("Bool: set_widget_value: {0}{1}".format(value, self.setting.name))
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
class CheckableBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.clicked
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
def widget_test(self, value):
print('cannot test checkable groupbox at the moment')
return False | gpl-2.0 | 4,159,583,462,942,297,000 | 36.506024 | 148 | 0.643959 | false |
vhernandez/pygtksheet | examples/complex_test.py | 1 | 11754 | import sys
sys.path += ['/usr/local/lib/python2.6/dist-packages/gtk-2.0']
import gtk
from gtk import gdk
import pango
import gtksheet
from bordercombo import BorderCombo
#from gtkextra import BorderCombo
#import gtkextra
class TestSheet(gtksheet.Sheet):
def __init__(self):
gtksheet.Sheet.__init__(self, 20, 20, "Test")
colormap = gdk.colormap_get_system()
self.default_bg_color = colormap.alloc_color("light yellow")
self.default_fg_color = colormap.alloc_color("black")
self.set_background(self.default_bg_color)
self.set_grid(colormap.alloc_color("light blue"))
for column in xrange(self.get_columns_count()):
name = chr(ord("A") + column)
self.column_button_add_label(column, name)
self.set_column_title(column, name)
self.default_font = self.style.font_desc
class TestWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
status_box = gtk.HBox(spacing=1)
status_box.set_border_width(0)
self.location = gtk.Label("")
(width, height) = self.location.size_request()
self.location.set_size_request(160, height)
status_box.pack_start(self.location, False)
self.entry = gtk.Entry()
self.entry.connect("changed", self._show_sheet_entry_cb)
status_box.pack_start(self.entry)
t = gtk.Toolbar()
ttips = gtk.Tooltips()
def add_widget_to_toolbar(widget, separator=True, tooltip=None):
ti = gtk.ToolItem()
ti.add(widget)
if tooltip is not None:
ti.set_tooltip(ttips, tooltip)
t.insert(ti, -1)
if separator:
t.insert(gtk.SeparatorToolItem(), -1)
fontbutton = gtk.FontButton()
fontbutton.connect("font-set", self._font_changed_cb)
add_widget_to_toolbar(fontbutton,
tooltip="Change the font of the selected cells");
self.fontbutton = fontbutton
items = \
(("justleft", None,
"Justify selected cells to the left",
gtk.STOCK_JUSTIFY_LEFT, self._justification_cb,
gtk.JUSTIFY_LEFT),
("justcenter", None,
"Justify selected cells to the center",
gtk.STOCK_JUSTIFY_CENTER, self._justification_cb,
gtk.JUSTIFY_CENTER),
("justright", None,
"Justify selected cells to the right",
gtk.STOCK_JUSTIFY_RIGHT, self._justification_cb,
gtk.JUSTIFY_RIGHT))
for name, label, tooltip, stock_id, cb, cb_params in items:
ti = gtk.Action(name, label, tooltip, stock_id)
ti.connect("activate", cb, cb_params)
t.insert(ti.create_tool_item(), -1)
bordercombo = BorderCombo()
bordercombo.connect("changed", self._border_changed_cb)
add_widget_to_toolbar(bordercombo,
tooltip="Change the border of the selected cells")
colormap = gdk.colormap_get_system()
colorbtn = gtk.ColorButton(colormap.alloc_color("black"))
colorbtn.connect("color-set", self._color_changed_cb, "f")
add_widget_to_toolbar(colorbtn, separator=False,
tooltip="Change the foreground color of the selected cells")
self.fgcolorbtn = colorbtn
colorbtn = gtk.ColorButton(colormap.alloc_color("light yellow"))
colorbtn.connect("color-set", self._color_changed_cb, "b")
add_widget_to_toolbar(colorbtn,
tooltip="Change the background color of the selected cells");
self.bgcolorbtn = colorbtn
self.sheet = TestSheet()
self.sheet.connect("activate", self._activate_sheet_cell_cb)
self.sheet.get_entry().connect("changed", self._show_entry_cb)
self.sheet.connect("changed", self._sheet_changed_cb)
ws = gtk.ScrolledWindow()
ws.add(self.sheet)
fd = self.sheet.default_font
fontbutton.set_font_name(fd.to_string())
vbox = gtk.VBox()
vbox.pack_start(t, False, False, 0)
vbox.pack_start(status_box, False, False, 0)
vbox.pack_start(ws, True, True, 0)
self.add(vbox)
self.set_size_request(500,400)
self.show_all()
def _sheet_changed_cb(self, sheet, row, column):
print "Sheet change at row: %d, column: %d" % (row, column)
def _show_sheet_entry_cb(self, entry):
if not entry.flags() & gtk.HAS_FOCUS:
return
sheet_entry = self.sheet.get_entry()
text = entry.get_text()
sheet_entry.set_text(text)
def _show_entry_cb(self, sheet_entry, *args):
if not sheet_entry.flags() & gtk.HAS_FOCUS:
return
text = sheet_entry.get_text()
self.entry.set_text(text)
def _activate_sheet_cell_cb(self, sheet, row, column):
title = sheet.get_column_title(column)
if title:
cell = " %s:%d " % (title, row)
else:
cell = " ROW: %d COLUMN: %d " % (row, column)
self.location.set_text(cell)
# Set attributes
attributes = sheet.get_attributes(row, column)
if attributes:
fd = attributes.font_desc if attributes.font_desc else self.sheet.default_font
fgcolor = attributes.foreground
bgcolor = attributes.background
else:
fd = self.sheet.default_font
fgcolor = self.sheet.default_fg_color
bgcolor = self.sheet.default_bg_color
self.fontbutton.set_font_name(fd.to_string())
self.fgcolorbtn.set_color(fgcolor)
self.bgcolorbtn.set_color(bgcolor)
# Set entry text
sheet_entry = sheet.get_entry()
self.entry.props.max_length = sheet_entry.props.max_length
text = sheet.cell_get_text(row, column)
if text:
self.entry.set_text(text)
else:
self.entry.set_text("")
print self.sheet.props.active_cell
def _font_changed_cb(self, widget):
r = self.sheet.props.selected_range
fd = pango.FontDescription(widget.get_font_name())
self.sheet.range_set_font(r, fd)
def _justification_cb(self, widget, data=None):
if data is None:
return
r = self.sheet.props.selected_range
if r:
self.sheet.range_set_justification(r, data)
def _border_changed_cb(self, widget):
border = widget.get_active()
range = self.sheet.props.selected_range
border_width = 3
self.sheet.range_set_border(range, 0, 0)
if border == 1:
border_mask = gtksheet.SHEET_TOP_BORDER
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 2:
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 3:
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 4:
border_mask = gtksheet.SHEET_LEFT_BORDER
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 5:
if range.col0 == range.coli:
border_mask = gtksheet.SHEET_LEFT_BORDER | gtksheet.SHEET_RIGHT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_LEFT_BORDER
auxcol = range.coli
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli = auxcol
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 6:
if range.row0 == range.rowi:
border_mask = gtksheet.SHEET_TOP_BORDER | gtksheet.SHEET_BOTTOM_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_TOP_BORDER
auxrow = range.rowi
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi = auxrow
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 7:
border_mask = gtksheet.SHEET_RIGHT_BORDER | gtksheet.SHEET_LEFT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 8:
border_mask = gtksheet.SHEET_BOTTOM_BORDER | gtksheet.SHEET_TOP_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 9:
self.sheet.range_set_border(range, 15, border_width)
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 15
auxrange = sheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask ^ gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask ^ gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask ^ gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask ^ gtksheet.SHEET_LEFT_BORDER
if border_mask != 15:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 10:
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 0
auxrange = gtksheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask | gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask | gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask | gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask | gtksheet.SHEET_LEFT_BORDER
if border_mask != 0:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 11:
border_mask = 15
self.sheet.range_set_border(range, border_mask, border_width)
def _color_changed_cb(self, widget, data=None):
# Bug in GtkSheet?: the color must be allocated with the system's
# colormap, else it is ignored
if data is None:
return
color = widget.get_color()
_range = self.sheet.props.selected_range
if data == "f":
self.sheet.range_set_foreground(_range, color)
else:
self.sheet.range_set_background(_range, color)
def main():
w = TestWindow()
w.connect("delete-event", lambda x,y: gtk.main_quit())
gtk.main()
if __name__=='__main__':
main()
| gpl-2.0 | 9,136,060,946,695,285,000 | 39.253425 | 90 | 0.561766 | false |
nash-x/hws | nova/huawei/scheduler/filters/disk_filter.py | 1 | 2145 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.filters import disk_filter
from nova.huawei import utils as h_utils
LOG = logging.getLogger(__name__)
class HuaweiDiskFilter(disk_filter.DiskFilter):
"""Disk Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
#deep copy a filter properties to avoid changing
filter_properties_tmp = copy.deepcopy(filter_properties)
context = filter_properties_tmp['context']
instance = filter_properties_tmp['request_spec']['instance_properties']
if h_utils.is_boot_from_volume(context, instance):
# just process local disk(ephemeral and swap), so set
# root_gb to zero
filter_properties_tmp.get('instance_type')['root_gb'] = 0
# if the request disk size is zero, we should return true.
# In negative free disk size condition, the instance booted volume
# is not create successfully.
instance_type = filter_properties.get('instance_type')
requested_disk = (1024 * (instance_type['ephemeral_gb']) +
instance_type['swap'])
if requested_disk == 0:
return True
return super(HuaweiDiskFilter, self).host_passes(host_state,
filter_properties_tmp)
| apache-2.0 | -566,471,726,499,216,700 | 39.471698 | 79 | 0.660606 | false |
pglomski/shopnotes | drill_speed_chart.py | 1 | 2778 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Produce a custom twist drill plot'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
# set some rcParams
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['xtick.major.pad'] = 10
mpl.rcParams['xtick.direction'] = 'inout'
mpl.rcParams['xtick.labelsize'] = 26
mpl.rcParams['ytick.direction'] = 'inout'
mpl.rcParams['ytick.labelsize'] = 20
# define the constants for our chart
materials = [
('Acrylic' , 650 , 'c' , '-' ) ,
('Aluminum' , 300 , 'b' , '-' ) ,
('Brass' , 200 , 'g' , '-' ) ,
('LC Steel' , 110 , 'k' , '-' ) ,
('Wood' , 100 , 'brown' , '-' ) ,
('MC Steel' , 80 , 'darkgray' , '-' ) ,
('HC Steel' , 60 , 'lightgray' , '-' ) ,
('Stainless' , 50 , 'purple' , '-' ) ,
]
drill_speeds = [250, 340, 390, 510, 600, 650, 990, 1550, 1620, 1900, 2620, 3100] #rpm
speed_lims = (200., 4000.) # rpm
max_in = 1. # in.
incr = 1./16. # in.
im_sz = 25. # in.
ratio = 8.5/11.
fig = plt.figure(figsize=(im_sz,ratio * im_sz), dpi=600)
fig.patch.set_alpha(0)
# generate a vector of drill bit diameter
x = np.array([float(i) * incr for i in range(1,int(max_in/incr) + 1)]) # in.
# calculate the drill speed curve for each material type and plot the curve
for name, speed, color, linestyle in materials:
plt.loglog(x, 12/np.pi/x*speed, label=name, linewidth=5, color=color, linestyle=linestyle)
ax = plt.gca()
# adjust the axis tick locators to match drill press speeds
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(drill_speeds))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%4d'))
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_ylim(speed_lims)
# set the drill diameter locators and format the ticks with LaTeX
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=incr))
ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_xlim((incr, max_in))
ticks = ['0', r'$$\frac{1}{16}$$' , r'$$\frac{1}{8}$$' , r'$$\frac{3}{16}$$' , r'$$\frac{1}{4}$$' ,
r'$$\frac{5}{16}$$' , r'$$\frac{3}{8}$$' , r'$$\frac{7}{16}$$' , r'$$\frac{1}{2}$$' ,
r'$$\frac{9}{16}$$' , r'$$\frac{5}{8}$$' , r'$$\frac{11}{16}$$' , r'$$\frac{3}{4}$$' ,
r'$$\frac{13}{16}$$' , r'$$\frac{7}{8}$$' , r'$$\frac{15}{16}$$' , r'$$1$$' ]
ax.xaxis.set_ticklabels(ticks)
# Add the Texts
plt.xlabel('Bit Diameter (in.)', fontsize=26)
plt.ylabel('Drill Speed (rpm)' , fontsize=26)
plt.title('Twist Drill Speeds' , fontsize=50)
plt.legend(ncol=2, loc=3, fontsize=40)
plt.grid('on')
plt.savefig('drill_speed_chart.png')
| agpl-3.0 | 6,025,454,394,184,277,000 | 35.077922 | 102 | 0.569114 | false |
robertnishihara/ray | python/ray/dashboard/dashboard.py | 1 | 37612 | try:
import aiohttp.web
except ImportError:
print("The dashboard requires aiohttp to run.")
import sys
sys.exit(1)
import argparse
import copy
import datetime
import errno
import json
import logging
import os
import platform
import threading
import time
import traceback
import yaml
import uuid
import grpc
from google.protobuf.json_format import MessageToDict
import ray
import ray.ray_constants as ray_constants
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
from ray.core.generated import reporter_pb2
from ray.core.generated import reporter_pb2_grpc
from ray.core.generated import core_worker_pb2
from ray.core.generated import core_worker_pb2_grpc
from ray.dashboard.interface import BaseDashboardController
from ray.dashboard.interface import BaseDashboardRouteHandler
from ray.dashboard.memory import construct_memory_table, MemoryTable, \
GroupByType, SortingType
from ray.dashboard.metrics_exporter.client import Exporter
from ray.dashboard.metrics_exporter.client import MetricsExportClient
from ray.dashboard.node_stats import NodeStats
from ray.dashboard.util import to_unix_time
from ray.metrics_agent import PrometheusServiceDiscoveryWriter
try:
from ray.tune import Analysis
from tensorboard import program
except ImportError:
Analysis = None
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
async def json_response(is_dev, result=None, error=None,
ts=None) -> aiohttp.web.Response:
if ts is None:
ts = datetime.datetime.utcnow()
headers = None
if is_dev:
headers = {"Access-Control-Allow-Origin": "*"}
return aiohttp.web.json_response(
{
"result": result,
"timestamp": to_unix_time(ts),
"error": error,
},
headers=headers)
class DashboardController(BaseDashboardController):
def __init__(self, redis_address, redis_password):
self.node_stats = NodeStats(redis_address, redis_password)
self.raylet_stats = RayletStats(
redis_address, redis_password=redis_password)
if Analysis is not None:
self.tune_stats = TuneCollector(2.0)
self.memory_table = MemoryTable([])
def _construct_raylet_info(self):
D = self.raylet_stats.get_raylet_stats()
workers_info_by_node = {
data["nodeId"]: data.get("workersStats")
for data in D.values()
}
infeasible_tasks = sum(
(data.get("infeasibleTasks", []) for data in D.values()), [])
# ready_tasks are used to render tasks that are not schedulable
# due to resource limitations.
# (e.g., Actor requires 2 GPUs but there is only 1 gpu available).
ready_tasks = sum((data.get("readyTasks", []) for data in D.values()),
[])
actor_groups = self.node_stats.get_actors(
workers_info_by_node, infeasible_tasks, ready_tasks)
plasma_stats = {}
# HTTP call to metrics port for each node in nodes/
used_views = ("object_store_num_local_objects",
"object_store_available_memory",
"object_store_used_memory")
for address, data in D.items():
# process view data
views = [
view for view in data.get("viewData", [])
if view.get("viewName") in used_views
]
node_plasma_stats = {}
for view in views:
view_name = view["viewName"]
view_measures = view["measures"]
if view_measures:
view_data = view_measures[0].get("doubleValue", .0)
else:
view_data = .0
node_plasma_stats[view_name] = view_data
plasma_stats[address] = node_plasma_stats
return {
"nodes": D,
"actorGroups": actor_groups,
"plasmaStats": plasma_stats
}
def get_ray_config(self):
try:
config_path = os.path.expanduser("~/ray_bootstrap_config.yaml")
with open(config_path) as f:
cfg = yaml.safe_load(f)
except Exception:
error = "No config"
return error, None
D = {
"min_workers": cfg["min_workers"],
"max_workers": cfg["max_workers"],
"initial_workers": cfg["initial_workers"],
"autoscaling_mode": cfg["autoscaling_mode"],
"idle_timeout_minutes": cfg["idle_timeout_minutes"],
}
try:
D["head_type"] = cfg["head_node"]["InstanceType"]
except KeyError:
D["head_type"] = "unknown"
try:
D["worker_type"] = cfg["worker_nodes"]["InstanceType"]
except KeyError:
D["worker_type"] = "unknown"
return None, D
def get_node_info(self):
return self.node_stats.get_node_stats()
def get_raylet_info(self):
return self._construct_raylet_info()
def get_memory_table_info(self,
group_by=GroupByType.NODE_ADDRESS,
sort_by=SortingType.OBJECT_SIZE) -> MemoryTable:
# Collecting memory info adds big overhead to the cluster.
# This must be collected only when it is necessary.
self.raylet_stats.include_memory_info = True
D = self.raylet_stats.get_raylet_stats()
workers_info_by_node = {
data["nodeId"]: data.get("workersStats")
for data in D.values()
}
self.memory_table = construct_memory_table(
workers_info_by_node, group_by=group_by, sort_by=sort_by)
return self.memory_table
def stop_collecting_memory_table_info(self):
self.raylet_stats.include_memory_info = False
def tune_info(self):
if Analysis is not None:
D = self.tune_stats.get_stats()
else:
D = {}
return D
def tune_availability(self):
if Analysis is not None:
D = self.tune_stats.get_availability()
else:
D = {"available": False, "trials_available": False}
return D
def set_tune_experiment(self, experiment):
if Analysis is not None:
return self.tune_stats.set_experiment(experiment)
return "Tune Not Enabled", None
def enable_tune_tensorboard(self):
if Analysis is not None:
self.tune_stats.enable_tensorboard()
def launch_profiling(self, node_id, pid, duration):
profiling_id = self.raylet_stats.launch_profiling(
node_id=node_id, pid=pid, duration=duration)
return profiling_id
def check_profiling_status(self, profiling_id):
return self.raylet_stats.check_profiling_status(profiling_id)
def get_profiling_info(self, profiling_id):
return self.raylet_stats.get_profiling_info(profiling_id)
def kill_actor(self, actor_id, ip_address, port):
return self.raylet_stats.kill_actor(actor_id, ip_address, port)
def get_logs(self, hostname, pid):
return self.node_stats.get_logs(hostname, pid)
def get_errors(self, hostname, pid):
return self.node_stats.get_errors(hostname, pid)
def start_collecting_metrics(self):
self.node_stats.start()
self.raylet_stats.start()
if Analysis is not None:
self.tune_stats.start()
class DashboardRouteHandler(BaseDashboardRouteHandler):
def __init__(self, dashboard_controller: DashboardController,
is_dev=False):
self.dashboard_controller = dashboard_controller
self.is_dev = is_dev
def forbidden(self) -> aiohttp.web.Response:
return aiohttp.web.Response(status=403, text="403 Forbidden")
async def get_forbidden(self, _) -> aiohttp.web.Response:
return self.forbidden()
async def get_index(self, req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/index.html"))
async def get_favicon(self, req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/favicon.ico"))
async def ray_config(self, req) -> aiohttp.web.Response:
error, result = self.dashboard_controller.get_ray_config()
if error:
return await json_response(self.is_dev, error=error)
return await json_response(self.is_dev, result=result)
async def node_info(self, req) -> aiohttp.web.Response:
now = datetime.datetime.utcnow()
D = self.dashboard_controller.get_node_info()
return await json_response(self.is_dev, result=D, ts=now)
async def raylet_info(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.get_raylet_info()
return await json_response(self.is_dev, result=result)
async def memory_table_info(self, req) -> aiohttp.web.Response:
group_by = req.query.get("group_by")
sort_by = req.query.get("sort_by")
kwargs = {}
try:
if group_by:
kwargs["group_by"] = GroupByType(group_by)
if sort_by:
kwargs["sort_by"] = SortingType(sort_by)
except ValueError as e:
return aiohttp.web.HTTPBadRequest(reason=str(e))
memory_table = self.dashboard_controller.get_memory_table_info(
**kwargs)
return await json_response(self.is_dev, result=memory_table.__dict__())
async def stop_collecting_memory_table_info(self,
req) -> aiohttp.web.Response:
self.dashboard_controller.stop_collecting_memory_table_info()
return await json_response(self.is_dev, result={})
async def tune_info(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.tune_info()
return await json_response(self.is_dev, result=result)
async def tune_availability(self, req) -> aiohttp.web.Response:
result = self.dashboard_controller.tune_availability()
return await json_response(self.is_dev, result=result)
async def set_tune_experiment(self, req) -> aiohttp.web.Response:
data = await req.json()
error, result = self.dashboard_controller.set_tune_experiment(
data["experiment"])
if error:
return await json_response(self.is_dev, error=error)
return await json_response(self.is_dev, result=result)
async def enable_tune_tensorboard(self, req) -> aiohttp.web.Response:
self.dashboard_controller.enable_tune_tensorboard()
return await json_response(self.is_dev, result={})
async def launch_profiling(self, req) -> aiohttp.web.Response:
node_id = req.query.get("node_id")
pid = int(req.query.get("pid"))
duration = int(req.query.get("duration"))
profiling_id = self.dashboard_controller.launch_profiling(
node_id, pid, duration)
return await json_response(self.is_dev, result=str(profiling_id))
async def check_profiling_status(self, req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
status = self.dashboard_controller.check_profiling_status(profiling_id)
return await json_response(self.is_dev, result=status)
async def get_profiling_info(self, req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
profiling_info = self.dashboard_controller.get_profiling_info(
profiling_id)
return aiohttp.web.json_response(profiling_info)
async def kill_actor(self, req) -> aiohttp.web.Response:
actor_id = req.query.get("actor_id")
ip_address = req.query.get("ip_address")
port = req.query.get("port")
return await json_response(
self.is_dev,
self.dashboard_controller.kill_actor(actor_id, ip_address, port))
async def logs(self, req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.dashboard_controller.get_logs(hostname, pid)
return await json_response(self.is_dev, result=result)
async def errors(self, req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.dashboard_controller.get_errors(hostname, pid)
return await json_response(self.is_dev, result=result)
class MetricsExportHandler:
def __init__(self,
dashboard_controller: DashboardController,
metrics_export_client: MetricsExportClient,
dashboard_id,
is_dev=False):
assert metrics_export_client is not None
self.metrics_export_client = metrics_export_client
self.dashboard_controller = dashboard_controller
self.is_dev = is_dev
async def enable_export_metrics(self, req) -> aiohttp.web.Response:
if self.metrics_export_client.enabled:
return await json_response(
self.is_dev, result={"url": None}, error="Already enabled")
succeed, error = self.metrics_export_client.start_exporting_metrics()
error_msg = "Failed to enable it. Error: {}".format(error)
if not succeed:
return await json_response(
self.is_dev, result={"url": None}, error=error_msg)
url = self.metrics_export_client.dashboard_url
return await json_response(self.is_dev, result={"url": url})
async def get_dashboard_address(self, req) -> aiohttp.web.Response:
if not self.metrics_export_client.enabled:
return await json_response(
self.is_dev,
result={"url": None},
error="Metrics exporting is not enabled.")
url = self.metrics_export_client.dashboard_url
return await json_response(self.is_dev, result={"url": url})
async def redirect_to_dashboard(self, req) -> aiohttp.web.Response:
if not self.metrics_export_client.enabled:
return await json_response(
self.is_dev,
result={"url": None},
error="You should enable metrics export to use this endpoint.")
raise aiohttp.web.HTTPFound(self.metrics_export_client.dashboard_url)
def setup_metrics_export_routes(app: aiohttp.web.Application,
handler: MetricsExportHandler):
"""Routes that require dynamically changing class attributes."""
app.router.add_get("/api/metrics/enable", handler.enable_export_metrics)
app.router.add_get("/api/metrics/url", handler.get_dashboard_address)
app.router.add_get("/metrics/redirect", handler.redirect_to_dashboard)
def setup_static_dir(app):
build_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build")
if not os.path.isdir(build_dir):
raise OSError(
errno.ENOENT, "Dashboard build directory not found. If installing "
"from source, please follow the additional steps "
"required to build the dashboard"
"(cd python/ray/dashboard/client "
"&& npm ci "
"&& npm run build)", build_dir)
static_dir = os.path.join(build_dir, "static")
app.router.add_static("/static", static_dir)
return build_dir
def setup_speedscope_dir(app, build_dir):
speedscope_dir = os.path.join(build_dir, "speedscope-1.5.3")
app.router.add_static("/speedscope", speedscope_dir)
def setup_dashboard_route(app: aiohttp.web.Application,
handler: BaseDashboardRouteHandler,
index=None,
favicon=None,
ray_config=None,
node_info=None,
raylet_info=None,
tune_info=None,
tune_availability=None,
launch_profiling=None,
check_profiling_status=None,
get_profiling_info=None,
kill_actor=None,
logs=None,
errors=None,
memory_table=None,
stop_memory_table=None):
def add_get_route(route, handler_func):
if route is not None:
app.router.add_get(route, handler_func)
add_get_route(index, handler.get_index)
add_get_route(favicon, handler.get_favicon)
add_get_route(ray_config, handler.ray_config)
add_get_route(node_info, handler.node_info)
add_get_route(raylet_info, handler.raylet_info)
add_get_route(tune_info, handler.tune_info)
add_get_route(tune_availability, handler.tune_availability)
add_get_route(launch_profiling, handler.launch_profiling)
add_get_route(check_profiling_status, handler.check_profiling_status)
add_get_route(get_profiling_info, handler.get_profiling_info)
add_get_route(kill_actor, handler.kill_actor)
add_get_route(logs, handler.logs)
add_get_route(errors, handler.errors)
add_get_route(memory_table, handler.memory_table_info)
add_get_route(stop_memory_table, handler.stop_collecting_memory_table_info)
class Dashboard:
"""A dashboard process for monitoring Ray nodes.
This dashboard is made up of a REST API which collates data published by
Reporter processes on nodes into a json structure, and a webserver
which polls said API for display purposes.
Args:
host(str): Host address of dashboard aiohttp server.
port(str): Port number of dashboard aiohttp server.
redis_address(str): GCS address of a Ray cluster
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
redis_passord(str): Redis password to access GCS
metrics_export_address(str): The address users host their dashboard.
"""
def __init__(self,
host,
port,
redis_address,
temp_dir,
redis_password=None,
metrics_export_address=None):
self.host = host
self.port = port
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self.temp_dir = temp_dir
self.dashboard_id = str(uuid.uuid4())
self.dashboard_controller = DashboardController(
redis_address, redis_password)
self.service_discovery = PrometheusServiceDiscoveryWriter(
redis_address, redis_password, temp_dir)
# Setting the environment variable RAY_DASHBOARD_DEV=1 disables some
# security checks in the dashboard server to ease development while
# using the React dev server. Specifically, when this option is set, we
# allow cross-origin requests to be made.
self.is_dev = os.environ.get("RAY_DASHBOARD_DEV") == "1"
self.app = aiohttp.web.Application()
route_handler = DashboardRouteHandler(
self.dashboard_controller, is_dev=self.is_dev)
# Setup Metrics exporting service if necessary.
self.metrics_export_address = metrics_export_address
if self.metrics_export_address:
self._setup_metrics_export()
# Setup Dashboard Routes
build_dir = setup_static_dir(self.app)
setup_speedscope_dir(self.app, build_dir)
setup_dashboard_route(
self.app,
route_handler,
index="/",
favicon="/favicon.ico",
ray_config="/api/ray_config",
node_info="/api/node_info",
raylet_info="/api/raylet_info",
tune_info="/api/tune_info",
tune_availability="/api/tune_availability",
launch_profiling="/api/launch_profiling",
check_profiling_status="/api/check_profiling_status",
get_profiling_info="/api/get_profiling_info",
kill_actor="/api/kill_actor",
logs="/api/logs",
errors="/api/errors",
memory_table="/api/memory_table",
stop_memory_table="/api/stop_memory_table")
self.app.router.add_get("/{_}", route_handler.get_forbidden)
self.app.router.add_post("/api/set_tune_experiment",
route_handler.set_tune_experiment)
self.app.router.add_post("/api/enable_tune_tensorboard",
route_handler.enable_tune_tensorboard)
def _setup_metrics_export(self):
exporter = Exporter(self.dashboard_id, self.metrics_export_address,
self.dashboard_controller)
self.metrics_export_client = MetricsExportClient(
self.metrics_export_address, self.dashboard_controller,
self.dashboard_id, exporter)
# Setup endpoints
metrics_export_handler = MetricsExportHandler(
self.dashboard_controller,
self.metrics_export_client,
self.dashboard_id,
is_dev=self.is_dev)
setup_metrics_export_routes(self.app, metrics_export_handler)
def _start_exporting_metrics(self):
result, error = self.metrics_export_client.start_exporting_metrics()
if not result and error:
url = ray.services.get_webui_url_from_redis(self.redis_client)
error += (" Please reenable the metrics export by going to "
"the url: {}/api/metrics/enable".format(url))
ray.utils.push_error_to_driver_through_redis(
self.redis_client, "metrics export failed", error)
def log_dashboard_url(self):
url = ray.services.get_webui_url_from_redis(self.redis_client)
if url is None:
raise ValueError("WebUI URL is not present in GCS.")
with open(os.path.join(self.temp_dir, "dashboard_url"), "w") as f:
f.write(url)
logger.info("Dashboard running on {}".format(url))
def run(self):
self.log_dashboard_url()
self.dashboard_controller.start_collecting_metrics()
self.service_discovery.start()
if self.metrics_export_address:
self._start_exporting_metrics()
aiohttp.web.run_app(self.app, host=self.host, port=self.port)
class RayletStats(threading.Thread):
def __init__(self, redis_address, redis_password=None):
self.nodes_lock = threading.Lock()
self.nodes = []
self.stubs = {}
self.reporter_stubs = {}
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self._raylet_stats_lock = threading.Lock()
self._raylet_stats = {}
self._profiling_stats = {}
self._update_nodes()
self.include_memory_info = False
super().__init__()
def _update_nodes(self):
with self.nodes_lock:
self.nodes = ray.nodes()
node_ids = [node["NodeID"] for node in self.nodes]
# First remove node connections of disconnected nodes.
for node_id in self.stubs.keys():
if node_id not in node_ids:
stub = self.stubs.pop(node_id)
stub.close()
reporter_stub = self.reporter_stubs.pop(node_id)
reporter_stub.close()
# Now add node connections of new nodes.
for node in self.nodes:
node_id = node["NodeID"]
if node_id not in self.stubs:
node_ip = node["NodeManagerAddress"]
channel = grpc.insecure_channel("{}:{}".format(
node_ip, node["NodeManagerPort"]))
stub = node_manager_pb2_grpc.NodeManagerServiceStub(
channel)
self.stubs[node_id] = stub
# Block wait until the reporter for the node starts.
while True:
reporter_port = self.redis_client.get(
"REPORTER_PORT:{}".format(node_ip))
if reporter_port:
break
reporter_channel = grpc.insecure_channel("{}:{}".format(
node_ip, int(reporter_port)))
reporter_stub = reporter_pb2_grpc.ReporterServiceStub(
reporter_channel)
self.reporter_stubs[node_id] = reporter_stub
assert len(self.stubs) == len(
self.reporter_stubs), (self.stubs.keys(),
self.reporter_stubs.keys())
def get_raylet_stats(self):
with self._raylet_stats_lock:
return copy.deepcopy(self._raylet_stats)
def launch_profiling(self, node_id, pid, duration):
profiling_id = str(uuid.uuid4())
def _callback(reply_future):
reply = reply_future.result()
with self._raylet_stats_lock:
self._profiling_stats[profiling_id] = reply
reporter_stub = self.reporter_stubs[node_id]
reply_future = reporter_stub.GetProfilingStats.future(
reporter_pb2.GetProfilingStatsRequest(pid=pid, duration=duration))
reply_future.add_done_callback(_callback)
return profiling_id
def check_profiling_status(self, profiling_id):
with self._raylet_stats_lock:
is_present = profiling_id in self._profiling_stats
if not is_present:
return {"status": "pending"}
reply = self._profiling_stats[profiling_id]
if reply.std_err:
return {"status": "error", "error": reply.std_err}
else:
return {"status": "finished"}
def get_profiling_info(self, profiling_id):
with self._raylet_stats_lock:
profiling_stats = self._profiling_stats.get(profiling_id)
assert profiling_stats, "profiling not finished"
return json.loads(profiling_stats.profiling_stats)
def kill_actor(self, actor_id, ip_address, port):
channel = grpc.insecure_channel("{}:{}".format(ip_address, int(port)))
stub = core_worker_pb2_grpc.CoreWorkerServiceStub(channel)
def _callback(reply_future):
_ = reply_future.result()
reply_future = stub.KillActor.future(
core_worker_pb2.KillActorRequest(
intended_actor_id=ray.utils.hex_to_binary(actor_id)))
reply_future.add_done_callback(_callback)
return {}
def run(self):
counter = 0
while True:
time.sleep(1.0)
replies = {}
try:
for node in self.nodes:
node_id = node["NodeID"]
stub = self.stubs[node_id]
reply = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(
include_memory_info=self.include_memory_info),
timeout=2)
reply_dict = MessageToDict(reply)
reply_dict["nodeId"] = node_id
replies[node["NodeManagerAddress"]] = reply_dict
with self._raylet_stats_lock:
for address, reply_dict in replies.items():
self._raylet_stats[address] = reply_dict
except Exception:
logger.exception(traceback.format_exc())
finally:
counter += 1
# From time to time, check if new nodes have joined the cluster
# and update self.nodes
if counter % 10:
self._update_nodes()
class TuneCollector(threading.Thread):
"""Initialize collector worker thread.
Args
logdir (str): Directory path to save the status information of
jobs and trials.
reload_interval (float): Interval(in s) of space between loading
data from logs
"""
def __init__(self, reload_interval):
self._logdir = None
self._trial_records = {}
self._data_lock = threading.Lock()
self._reload_interval = reload_interval
self._trials_available = False
self._tensor_board_dir = ""
self._enable_tensor_board = False
self._errors = {}
super().__init__()
def get_stats(self):
with self._data_lock:
tensor_board_info = {
"tensorboard_current": self._logdir == self._tensor_board_dir,
"tensorboard_enabled": self._tensor_board_dir != ""
}
return {
"trial_records": copy.deepcopy(self._trial_records),
"errors": copy.deepcopy(self._errors),
"tensorboard": tensor_board_info
}
def set_experiment(self, experiment):
with self._data_lock:
if os.path.isdir(os.path.expanduser(experiment)):
self._logdir = os.path.expanduser(experiment)
return None, {"experiment": self._logdir}
else:
return "Not a Valid Directory", None
def enable_tensorboard(self):
with self._data_lock:
if not self._tensor_board_dir:
tb = program.TensorBoard()
tb.configure(argv=[None, "--logdir", str(self._logdir)])
tb.launch()
self._tensor_board_dir = self._logdir
def get_availability(self):
with self._data_lock:
return {
"available": True,
"trials_available": self._trials_available
}
def run(self):
while True:
with self._data_lock:
self.collect()
time.sleep(self._reload_interval)
def collect_errors(self, df):
sub_dirs = os.listdir(self._logdir)
trial_names = filter(
lambda d: os.path.isdir(os.path.join(self._logdir, d)), sub_dirs)
for trial in trial_names:
error_path = os.path.join(self._logdir, trial, "error.txt")
if os.path.isfile(error_path):
self._trials_available = True
with open(error_path) as f:
text = f.read()
self._errors[str(trial)] = {
"text": text,
"job_id": os.path.basename(self._logdir),
"trial_id": "No Trial ID"
}
other_data = df[df["logdir"].str.contains(trial)]
if len(other_data) > 0:
trial_id = other_data["trial_id"].values[0]
self._errors[str(trial)]["trial_id"] = str(trial_id)
if str(trial_id) in self._trial_records.keys():
self._trial_records[str(trial_id)]["error"] = text
self._trial_records[str(trial_id)][
"status"] = "ERROR"
def collect(self):
"""
Collects and cleans data on the running Tune experiment from the
Tune logs so that users can see this information in the front-end
client
"""
self._trial_records = {}
self._errors = {}
if not self._logdir:
return
# search through all the sub_directories in log directory
analysis = Analysis(str(self._logdir))
df = analysis.dataframe(metric="episode_reward_mean", mode="max")
if len(df) == 0 or "trial_id" not in df.columns:
return
self._trials_available = True
# make sure that data will convert to JSON without error
df["trial_id_key"] = df["trial_id"].astype(str)
df = df.fillna(0)
trial_ids = df["trial_id"]
for i, value in df["trial_id"].iteritems():
if type(value) != str and type(value) != int:
trial_ids[i] = int(value)
df["trial_id"] = trial_ids
# convert df to python dict
df = df.set_index("trial_id_key")
trial_data = df.to_dict(orient="index")
# clean data and update class attribute
if len(trial_data) > 0:
trial_data = self.clean_trials(trial_data)
self._trial_records.update(trial_data)
self.collect_errors(df)
def clean_trials(self, trial_details):
first_trial = trial_details[list(trial_details.keys())[0]]
config_keys = []
float_keys = []
metric_keys = []
# list of static attributes for trial
default_names = [
"logdir", "time_this_iter_s", "done", "episodes_total",
"training_iteration", "timestamp", "timesteps_total",
"experiment_id", "date", "timestamp", "time_total_s", "pid",
"hostname", "node_ip", "time_since_restore",
"timesteps_since_restore", "iterations_since_restore",
"experiment_tag", "trial_id"
]
# filter attributes into floats, metrics, and config variables
for key, value in first_trial.items():
if isinstance(value, float):
float_keys.append(key)
if str(key).startswith("config/"):
config_keys.append(key)
elif key not in default_names:
metric_keys.append(key)
# clean data into a form that front-end client can handle
for trial, details in trial_details.items():
ts = os.path.getctime(details["logdir"])
formatted_time = datetime.datetime.fromtimestamp(ts).strftime(
"%Y-%m-%d %H:%M:%S")
details["start_time"] = formatted_time
details["params"] = {}
details["metrics"] = {}
# round all floats
for key in float_keys:
details[key] = round(details[key], 12)
# group together config attributes
for key in config_keys:
new_name = key[7:]
details["params"][new_name] = details[key]
details.pop(key)
# group together metric attributes
for key in metric_keys:
details["metrics"][key] = details[key]
details.pop(key)
if details["done"]:
details["status"] = "TERMINATED"
else:
details["status"] = "RUNNING"
details.pop("done")
details["job_id"] = os.path.basename(self._logdir)
details["error"] = "No Error"
return trial_details
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse Redis server for the "
"dashboard to connect to."))
parser.add_argument(
"--host",
required=True,
type=str,
help="The host to use for the HTTP server.")
parser.add_argument(
"--port",
required=True,
type=int,
help="The port to use for the HTTP server.")
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="The address to use for Redis.")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
parser.add_argument(
"--temp-dir",
required=False,
type=str,
default=None,
help="Specify the path of the temporary directory use by Ray process.")
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
# TODO(sang): Add a URL validation.
metrics_export_address = os.environ.get("METRICS_EXPORT_ADDRESS")
try:
dashboard = Dashboard(
args.host,
args.port,
args.redis_address,
args.temp_dir,
redis_password=args.redis_password,
metrics_export_address=metrics_export_address)
dashboard.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = ("The dashboard on node {} failed with the following "
"error:\n{}".format(platform.node(), traceback_str))
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_DIED_ERROR, message)
if isinstance(e, OSError) and e.errno == errno.ENOENT:
logger.warning(message)
else:
raise e
| apache-2.0 | -6,620,654,547,215,299,000 | 37.616016 | 79 | 0.58476 | false |
crmorse/weewx-waterflow | bin/weedb/mysql.py | 1 | 9153 | #
# Copyright (c) 2012 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
# $Revision$
# $Author$
# $Date$
#
"""Driver for the MySQL database"""
import decimal
import MySQLdb
import _mysql_exceptions
from weeutil.weeutil import to_bool
import weedb
def connect(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Connect to the specified database"""
return Connection(host=host, user=user, password=password, database=database, **kwargs)
def create(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Create the specified database. If it already exists,
an exception of type weedb.DatabaseExists will be thrown."""
# Open up a connection w/o specifying the database.
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
# An exception will get thrown if the database already exists.
try:
# Now create the database.
cursor.execute("CREATE DATABASE %s" % (database,))
except _mysql_exceptions.ProgrammingError:
# The database already exists. Change the type of exception.
raise weedb.DatabaseExists("Database %s already exists" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def drop(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Drop (delete) the specified database."""
# Open up a connection
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
try:
cursor.execute("DROP DATABASE %s" % database)
except _mysql_exceptions.OperationalError:
raise weedb.NoDatabase("""Attempt to drop non-existent database %s""" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Connection(weedb.Connection):
"""A wrapper around a MySQL connection object."""
def __init__(self, host='localhost', user='', password='', database='', **kwargs):
"""Initialize an instance of Connection.
Parameters:
host: IP or hostname with the mysql database (required)
user: User name (required)
password: The password for the username (required)
database: The database to be used. (required)
kwargs: Any extra arguments you may wish to pass on to MySQL (optional)
If the operation fails, an exception of type weedb.OperationalError will be raised.
"""
try:
connection = MySQLdb.connect(host=host, user=user, passwd=password, db=database, **kwargs)
except _mysql_exceptions.OperationalError, e:
# The MySQL driver does not include the database in the
# exception information. Tack it on, in case it might be useful.
raise weedb.OperationalError(str(e) + " while opening database '%s'" % (database,))
weedb.Connection.__init__(self, connection, database, 'mysql')
# Allowing threads other than the main thread to see any transactions
# seems to require an isolation level of READ UNCOMMITTED.
self.query("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED")
def cursor(self):
"""Return a cursor object."""
# The implementation of the MySQLdb cursor is lame enough that we are
# obliged to include a wrapper around it:
return Cursor(self)
def tables(self):
"""Returns a list of tables in the database."""
table_list = list()
try:
# Get a cursor directly from MySQL
cursor = self.connection.cursor()
cursor.execute("""SHOW TABLES;""")
while True:
row = cursor.fetchone()
if row is None: break
# Extract the table name. In case it's in unicode, convert to a regular string.
table_list.append(str(row[0]))
finally:
cursor.close()
return table_list
def genSchemaOf(self, table):
"""Return a summary of the schema of the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
try:
# Get a cursor directly from MySQL:
cursor = self.connection.cursor()
# MySQL throws an exception if you try to show the columns of a
# non-existing table
try:
cursor.execute("""SHOW COLUMNS IN %s;""" % table)
except _mysql_exceptions.ProgrammingError, e:
# Table does not exist. Change the exception type:
raise weedb.OperationalError(e)
irow = 0
while True:
row = cursor.fetchone()
if row is None: break
# Append this column to the list of columns.
colname = str(row[0])
if row[1].upper()=='DOUBLE':
coltype = 'REAL'
elif row[1].upper().startswith('INT'):
coltype = 'INTEGER'
elif row[1].upper().startswith('CHAR'):
coltype = 'STR'
else:
coltype = str(row[1]).upper()
is_primary = True if row[3] == 'PRI' else False
yield (irow, colname, coltype, to_bool(row[2]), row[4], is_primary)
irow += 1
finally:
cursor.close()
def columnsOf(self, table):
"""Return a list of columns in the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
column_list = [row[1] for row in self.genSchemaOf(table)]
return column_list
def begin(self):
"""Begin a transaction."""
self.query("START TRANSACTION")
def commit(self):
try:
weedb.Connection.commit(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def rollback(self):
try:
weedb.Connection.rollback(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def query(self, *args, **kwargs):
try:
self.connection.query(*args, **kwargs)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Cursor(object):
"""A wrapper around the MySQLdb cursor object"""
def __init__(self, connection):
"""Initialize a Cursor from a connection.
connection: An instance of db.mysql.Connection"""
# Get the MySQLdb cursor and store it internally:
self.cursor = connection.connection.cursor()
def execute(self, sql_string, sql_tuple=() ):
"""Execute a SQL statement on the MySQL server.
sql_string: A SQL statement to be executed. It should use ? as
a placeholder.
sql_tuple: A tuple with the values to be used in the placeholders."""
# MySQL uses '%s' as placeholders, so replace the ?'s with %s
mysql_string = sql_string.replace('?','%s')
try:
# Convert sql_tuple to a plain old tuple, just in case it actually
# derives from tuple, but overrides the string conversion (as is the
# case with a TimeSpan object):
self.cursor.execute(mysql_string, tuple(sql_tuple))
except (_mysql_exceptions.OperationalError, _mysql_exceptions.ProgrammingError), e:
raise weedb.OperationalError(e)
return self
def fetchone(self):
# Get a result from the MySQL cursor, then run it through the massage
# filter below
return massage(self.cursor.fetchone())
def close(self):
try:
self.cursor.close()
del self.cursor
except:
pass
#
# Supplying functions __iter__ and next allows the cursor to be used as an iterator.
#
def __iter__(self):
return self
def next(self):
result = self.fetchone()
if result is None:
raise StopIteration
return result
#
# This is a utility function for converting a result set that might contain
# longs or decimal.Decimals (which MySQLdb uses) to something containing just ints.
#
def massage(seq):
# Return the massaged sequence if it exists, otherwise, return None
if seq is not None:
return [int(i) if isinstance(i, long) or isinstance(i,decimal.Decimal) else i for i in seq]
| gpl-3.0 | 4,721,289,113,010,788,000 | 36.979253 | 102 | 0.579919 | false |
papaloizouc/peacehack | peacehack/theapp/migrations/0001_initial.py | 1 | 5492 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CrazyObject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ActionGeo_ADM1Code', models.CharField(max_length=10, null=True, blank=True)),
('ActionGeo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FeatureID', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FullName', models.CharField(max_length=200, null=True, blank=True)),
('ActionGeo_Lat', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_Long', models.TextField(null=True, blank=True)),
('ActionGeo_Type', models.TextField(null=True, blank=True)),
('Actor1Code', models.TextField(null=True, blank=True)),
('Actor1CountryCode', models.TextField(null=True, blank=True)),
('Actor1EthnicCode', models.TextField(null=True, blank=True)),
('Actor1Geo_ADM1Code', models.TextField(null=True, blank=True)),
('Actor1Geo_CountryCode', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FullName', models.TextField(null=True, blank=True)),
('Actor1Geo_Lat', models.TextField(null=True, blank=True)),
('Actor1Geo_Long', models.TextField(null=True, blank=True)),
('Actor1Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor1KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Name', models.TextField(null=True, blank=True)),
('Actor1Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2EthnicCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_ADM1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor2Geo_FullName', models.TextField(null=True, blank=True)),
('Actor2Geo_Lat', models.TextField(null=True, blank=True)),
('Actor2Geo_Long', models.TextField(null=True, blank=True)),
('Actor2Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor2KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Name', models.TextField(null=True, blank=True)),
('Actor2Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('AvgTone', models.TextField(null=True, blank=True)),
('DATEADDED', models.IntegerField(null=True, blank=True)),
('EventBaseCode', models.IntegerField(null=True, blank=True)),
('EventCode', models.IntegerField(null=True, blank=True)),
('EventRootCode', models.IntegerField(null=True, blank=True)),
('FractionDate', models.TextField(null=True, blank=True)),
('GLOBALEVENTID', models.IntegerField(null=True, blank=True)),
('GoldsteinScale', models.TextField(null=True, blank=True)),
('IsRootEvent', models.IntegerField(null=True, blank=True)),
('MonthYear', models.IntegerField(null=True, blank=True)),
('NumArticles', models.IntegerField(null=True, blank=True)),
('NumMentions', models.IntegerField(null=True, blank=True)),
('NumSources', models.IntegerField(null=True, blank=True)),
('QuadClass', models.IntegerField(null=True, blank=True)),
('SOURCEURL', models.TextField(null=True, blank=True)),
('SQLDATE', models.IntegerField(null=True, blank=True)),
('Year', models.IntegerField(null=True, blank=True)),
('Day', models.IntegerField(null=True, blank=True)),
('Month', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
| gpl-2.0 | 5,515,570,037,667,942,000 | 65.97561 | 114 | 0.599417 | false |
ashishtilokani/Cloaking-Detection-Tool | googleBot/googleBot/spiders/scrape2.py | 1 | 1236 | from scrapy.selector import HtmlXPathSelector
from scrapy.spider import Spider
import html2text
import re
import os.path
class scrape(Spider):
name = "googleBot2"
start_urls = []
with open('/home/ashish/Desktop/CloakingDetectionTool/url.txt','r') as f:
for line in f:
l=line.replace("/", "_")
try:
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','r')
f.close()
except:
start_urls.append(line)
def parse(self, response):
regex = re.compile('[^A-Za-z0-9_]')
#First parameter is the replacement, second parameter is your input string
d={}
l=(response.url).replace("/", "_")
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','w')
terms=[]
terms = (response.body).split()
c=0
for word in terms:
word=regex.sub('', word)
if word not in d:
d[word]=1
f.write(word)
f.write(' ')
c=1
if c==0: #empty
f.write(' ')
f.write('\n')
f.close()
| mit | -6,094,454,040,259,034,000 | 29.146341 | 88 | 0.486246 | false |
mprinc/McMap | src/scripts/CSN_Archive/check_object_names.py | 1 | 4677 | #!/usr/bin/env python
# Copyright (c) 2015, Scott D. Peckham
#------------------------------------------------------
# S.D. Peckham
# July 9, 2015
#
# Tool to extract the object part of every CSDMS Standard
# Variable Name and generate a list of objects that
# includes those as well as all parent objects.
#
# Example of use at a Unix prompt:
#
# % ./check_object_names.py CSN_VarNames_v0.82.txt
#------------------------------------------------------
#
# Functions:
# check_objects()
#
#------------------------------------------------------
import os.path
import sys
#------------------------------------------------------
def check_objects( in_file='CSN_VarNames_v0.82.txt' ):
#--------------------------------------------------
# Open input file that contains copied names table
#--------------------------------------------------
try:
in_unit = open( in_file, 'r' )
except:
print 'SORRY: Could not open TXT file named:'
print ' ' + in_file
#-------------------------
# Open new CSV text file
#-------------------------
## pos = in_file.rfind('.')
## prefix = in_file[0:pos]
## out_file = prefix + '.ttl'
out_file = 'All_Object_Names.txt'
#-------------------------------------------
OUT_EXISTS = os.path.exists( out_file )
if (OUT_EXISTS):
print 'SORRY, A text file with the name'
print ' ' + out_file
print ' already exists.'
return
out_unit = open( out_file, 'w' )
#---------------------------
# Parse all variable names
#---------------------------
n_objects = 0
object_list1 = list()
object_list2 = list()
while (True):
#------------------------------
# Read data line from in_file
#------------------------------
line = in_unit.readline()
if (line == ''):
break
#--------------------------------------------------
# Write object and quantity fullnames to TTL file
#--------------------------------------------------
line = line.strip() # (strip leading/trailing white space)
main_parts = line.split('__')
object_fullname = main_parts[0]
# quantity_fullname = main_parts[1]
#------------------------------------
# Append object name to object_list
#------------------------------------
object_list1.append( object_fullname )
object_list2.append( object_fullname )
#------------------------------------------------
# Append all parent object names to object_list
#------------------------------------------------
object_name = object_fullname
while (True):
pos = object_name.rfind('_')
if (pos < 0):
break
object_name = object_name[:pos]
object_list2.append( object_name )
#---------------------------------------------
# Create sorted lists of unique object names
# Not fastest method, but simple.
#---------------------------------------------
old_list = sorted( set(object_list1) )
new_list = sorted( set(object_list2) )
n_objects1 = len( old_list )
n_objects2 = len( new_list )
#--------------------------------------------
# Write complete object list to output file
#--------------------------------------------
for k in xrange( n_objects2 ):
out_unit.write( new_list[k] + '\n' )
#----------------------
# Close the input file
#----------------------
in_unit.close()
#----------------------------
# Close the TXT output file
#----------------------------
out_unit.close()
print 'Finished checking all object names.'
print 'Number of old object names =', n_objects1, '.'
print 'Number of new object names =', n_objects2, '.'
print ' '
# check_objects()
#------------------------------------------------------
if (__name__ == "__main__"):
#-----------------------------------------------------
# Note: First arg in sys.argv is the command itself.
#-----------------------------------------------------
n_args = len(sys.argv)
if (n_args < 2):
print 'ERROR: This tool requires an input'
print ' text file argument.'
print 'sys.argv =', sys.argv
print ' '
elif (n_args == 2):
check_objects( sys.argv[1] )
else:
print 'ERROR: Invalid number of arguments.'
#-----------------------------------------------------------------------
| mit | -1,015,613,779,617,027,700 | 32.407143 | 94 | 0.383579 | false |
PrFalken/exaproxy | lib/exaproxy/icap/response.py | 1 | 2403 |
class ICAPResponse (object):
def __init__ (self, version, code, status, headers, icap_header, http_header):
self.version = version
self.code = code
self.status = status
self.headers = headers
icap_len = len(icap_header)
http_len = len(http_header)
icap_end = icap_len
if http_header:
http_len_string = '%x\n' % http_len
http_string = http_len_string + http_header + '0\n'
http_offset = icap_end + len(http_len_string)
http_end = http_offset + http_len
else:
http_string = http_header
http_offset = icap_end
http_end = icap_end
self.response_view = memoryview(icap_header + http_string)
self.icap_view = self.response_view[:icap_end]
self.http_view = self.response_view[http_offset:http_end]
@property
def response_string (self):
return self.response_view.tobytes()
@property
def icap_header (self):
return self.icap_view.tobytes()
@property
def http_header (self):
return self.http_view.tobytes()
@property
def pragma (self):
return self.headers.get('pragma', {})
@property
def is_permit (self):
return False
@property
def is_modify (self):
return False
@property
def is_content (self):
return False
@property
def is_intercept (self):
return False
class ICAPRequestModification (ICAPResponse):
def __init__ (self, version, code, status, headers, icap_header, http_header, intercept_header=None):
ICAPResponse.__init__(self, version, code, status, headers, icap_header, http_header)
self.intercept_header = intercept_header
@property
def is_permit (self):
return self.code == 304
@property
def is_modify (self):
return self.code == 200 and self.intercept_header is None
@property
def is_intercept (self):
return self.code == 200 and self.intercept_header is not None
class ICAPResponseModification (ICAPResponse):
@property
def is_content (self):
return self.code == 200
class ICAPResponseFactory:
def __init__ (self, configuration):
self.configuration = configuration
def create (self, version, code, status, headers, icap_header, request_header, response_header, intercept_header=None):
if response_header:
response = ICAPResponseModification(version, code, status, headers, icap_header, response_header)
else:
response = ICAPRequestModification(version, code, status, headers, icap_header, request_header, intercept_header=intercept_header)
return response
| bsd-2-clause | -2,532,343,003,033,045,000 | 23.520408 | 133 | 0.714524 | false |
burntcustard/DeskBot-Zero | neural-net/keras/neuralNet.py | 1 | 4088 | '''
How to run:
$ source ~/Documents/tensorflow/bin/activate
$ cd Documents/DeskBot-Zero/neural-net/keras
$ python neuralNet.py
Heavily based on:
https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py
'''
import os
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import deskBotData
batch_size = 100
epochs = 100
data_augmentation = True
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_deskbot_distance_trained_model.h5'
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test), num_classes = deskBotData.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print(num_classes, '(potential) classes')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
# This should be the main differnce between rotation and distance training:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0, # randomly shift images horizontally (fraction of width)
height_shift_range=0, # randomly shift images vertically (fraction of height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
workers=4)
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| mit | -4,739,964,433,974,083,000 | 33.940171 | 86 | 0.687622 | false |
yoe/veyepar | dj/scripts/enc.py | 1 | 23477 | #!/usr/bin/python
"""
assembles raw cuts into final, titles, tweaks audio, encodes to format for upload.
"""
import re
import os
import sys
import subprocess
import xml.etree.ElementTree
from mk_mlt import mk_mlt
import pprint
from process import process
from main.models import Client, Show, Location, Episode, Raw_File, Cut_List
class enc(process):
ready_state = 2
def mk_title_svg(self, raw_svg, texts):
"""
Make a title slide by filling in a pre-made svg with name/authors.
return: svg
"""
tree = xml.etree.ElementTree.XMLID(raw_svg)
for key in texts:
if self.options.verbose:
print("looking for:", key)
# tollerate template where tokens have been removed
if key in tree[1]:
if key == "license":
# CC license image
if self.options.verbose:
print("found in svg:", tree[1][key])
print("replacing with:", texts[key])
t = tree[1][key]
# import code; code.interact(local=locals())
if texts[key] is None:
# del(tree[1][key])
# print tree[1].has_key(key)
tree[1][key].clear()
else:
t.set('{http://www.w3.org/1999/xlink}href', texts[key])
elif key == "date":
if self.options.verbose:
print("found in svg:", tree[1][key].text)
print("replacing with:", re.split(',',texts[key])[0]) # .encode()
tree[1][key].text = re.split(',',texts[key])[0]
else:
if self.options.verbose:
print("found in svg:", tree[1][key].text)
print("replacing with:", texts[key]) # .encode()
tree[1][key].text = texts[key]
# cooked_svg = xml.etree.ElementTree.tostring(tree[0])
# print "testing...", "license" in cooked_svg
if 'presenternames' in tree[1]:
# some people like to add spiffy text near the presenter name(s)
if texts['authors']:
# prefix = u"Featuring" if "," in texts['authors'] else "By"
# tree[1]['presenternames'].text=u"%s %s" % (prefix,texts['authors'])
tree[1]['presenternames'].text = texts['authors']
else:
# remove the text (there is a placholder to make editing sane)
tree[1]['presenternames'].text = ""
cooked_svg = xml.etree.ElementTree.tostring(tree[0]).decode('ascii')
return cooked_svg
def get_title_text(self, episode):
# lets try putting (stuff) on a new line
title = episode.name
authors = episode.authors
if episode.show.slug == 'write_docs_na_2016':
title = title.upper()
authors = authors.upper()
if False and episode.show.slug != 'pygotham_2015' and len(title) > 80: # crazy long titles need all the lines
title2 = ''
elif ": " in title: # the space keeps 9:00 from breaking
pos = title.index(":") + 1
title, title2 = title[:pos], title[pos:].strip()
elif " - " in title:
# error if there is more than 1.
title, title2 = title.split(' - ')
elif " -- " in title:
# error if there is more than 1.
title, title2 = title.split(' -- ')
elif " (" in title:
pos = title.index(" (")
# +1 skip space in " ("
title, title2 = title[:pos], title[pos + 1:]
elif " using " in title:
pos = title.index(" using ")
title, title2 = title[:pos], title[pos + 1:]
elif ";" in title:
pos = title.index(";") + 1
title, title2 = title[:pos], title[pos:].strip()
elif "? " in title: # ?(space) to not break on 'can you?'
pos = title.index("?") + 1
title, title2 = title[:pos], title[pos:].strip()
elif ". " in title:
pos = title.index(". ") + 1
title, title2 = title[:pos], title[pos:].strip()
else:
title2 = ""
if episode.license:
license = "cc/{}.svg".format(episode.license.lower())
else:
license = None
if episode.tags:
tags = episode.tags.split(',')
tag1 = tags[0]
else:
tags = []
tag1 = ''
"""
# split authors over two objects
# breaking on comma, not space.
if ',' in authors:
authors = authors.split(', ')
author2 = ', '.join(authors[1:])
authors = authors[0].strip()
else:
author2 = ''
"""
author2 = ''
date = episode.start.strftime("%B %-d, %Y")
# DebConf style
# date = episode.start.strftime("%Y-%m-%-d")
texts = {
'client': episode.show.client.name,
'show': episode.show.name,
'title': title,
'title2': title2,
'tag1': tag1,
'authors': authors,
'author2': author2,
'presentertitle': "",
'twitter_id': episode.twitter_id,
'date': date,
'time': episode.start.strftime("%H:%M"),
'license': license,
'room': episode.location.name,
}
return texts
def svg2png(self, svg_name, png_name, episode):
"""
Make a title slide png file.
melt uses librsvg which doesn't support flow,
wich is needed for long titles, so render it to a .png using inkscape
"""
# create png file
# inkscape does not return an error code on failure
# so clean up previous run and
# check for the existance of a new png
if os.path.exists(png_name):
os.remove(png_name)
cmd = ["inkscape", svg_name,
"--export-png", png_name,
# "--export-width", "720",
]
ret = self.run_cmds(episode, [cmd])
ret = os.path.exists(png_name)
# if self.options.verbose: print cooked_svg
if self.options.verbose:
print(png_name)
if not ret:
print("svg:", svg_name)
png_name = None
return png_name
def mk_title(self, episode):
# make a title slide
# if we find titles/custom/(slug).svg, use that
# else make one from the tempalte
custom_svg_name = os.path.join( "..",
"custom", "titles", episode.slug + ".svg")
if self.options.verbose: print("custom:", custom_svg_name)
abs_path = os.path.join( self.show_dir, "tmp", custom_svg_name )
if os.path.exists(abs_path):
# cooked_svg_name = custom_svg_name
cooked_svg_name = abs_path
else:
svg_name = episode.show.client.title_svg
print(svg_name)
template = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"bling",
svg_name)
raw_svg = open(template).read()
# happy_filename = episode.slug.encode('utf-8')
happy_filename = episode.slug
# happy_filename = ''.join([c for c in happy_filename if c.isalpha()])
# title_base = os.path.join(self.show_dir, "titles", happy_filename)
title_base = os.path.join("..", "titles", happy_filename)
texts = self.get_title_text(episode)
cooked_svg = self.mk_title_svg(raw_svg, texts)
# save svg to a file
# strip 'broken' chars because inkscape can't handle the truth
# output_base=''.join([ c for c in output_base if c.isalpha()])
# output_base=''.join([ c for c in output_base if ord(c)<128])
# output_base=output_base.encode('utf-8','ignore')
cooked_svg_name = os.path.join(
self.show_dir, "titles", '{}.svg'.format(episode.slug))
open(cooked_svg_name, 'w').write(cooked_svg)
png_name = os.path.join( "..",
"titles", '{}.png'.format(episode.slug))
abs_path = os.path.join( self.show_dir, "tmp", png_name )
title_img = self.svg2png(cooked_svg_name, abs_path, episode)
if title_img is None:
print("missing title png")
return False
return png_name
def get_params(self, episode, rfs, cls):
"""
assemble a dict of params to send to mk_mlt
mlt template, title screen image,
filter parameters (currently just audio)
and cutlist+raw filenames
"""
def get_title(episode):
# if we find show_dir/custom/titles/(slug).svg, use that
# else make one from the tempalte
custom_png_name = os.path.join(
self.show_dir, "custom", "titles", episode.slug + ".png")
print("custom:", custom_png_name)
if os.path.exists(custom_png_name):
title_img = custom_png_name
else:
title_img = self.mk_title(episode)
return title_img
def get_foot(episode):
credits_img = episode.show.client.credits
credits_pathname = os.path.join("..", "assets", credits_img )
return credits_pathname
def get_clips(rfs, ep):
"""
return list of possible input files
this may get the files and store them localy.
start/end segments are under get_cuts.
ps. this is not used for encoding,
just shows in ShotCut for easy dragging onto the timeline.
"""
clips = []
for rf in rfs:
clip = {'id': rf.id }
# if rf.filename.startswith('\\'):
# rawpathname = rf.filename
# else:
raw_pathname = os.path.join( "../dv",
rf.location.slug, rf.filename)
# self.episode_dir, rf.filename)
# check for missing input file
# typically due to incorrect fs mount
abs_path = os.path.join(
self.show_dir, "tmp", raw_pathname)
if not os.path.exists(abs_path):
print(( 'raw_pathname not found: "{}"'.format(
abs_path)))
return False
clip['filename']=raw_pathname
# trim start/end based on episode start/end
if rf.start < ep.start < rf.end:
# if the ep start falls durring this clip,
# trim it
d = ep.start - rf.start
clip['in']="00:00:{}".format(d.total_seconds())
else:
clip['in']=None
# if "mkv" in rf.filename:
# import code; code.interact(local=locals())
if rf.start < ep.end < rf.end:
# if the ep end falls durring this clip,
d = ep.end - rf.start
clip['out']="00:00:{}".format(d.total_seconds())
else:
clip['out']=None
pprint.pprint(clip)
clips.append(clip)
return clips
def get_cuts(cls):
"""
gets the list of cuts.
input file, start, end, filters
ps, does not reference the clips above.
"""
def hms_to_clock(hms):
"""
Converts what media players show h:m:s
to the mlt time format h:m:s.s
for more on this:
http://mltframework.blogspot.com/2012/04/time-properties.html
"""
if not hms:
return None
if ":" not in hms:
hms = "0:" + hms
if "." not in hms:
hms = hms + ".0"
return hms
cuts = []
for cl in cls:
cut = {}
cut['id'] = cl.id
rawpathname = os.path.join( "../dv",
cl.raw_file.location.slug, cl.raw_file.filename)
# self.episode_dir, cl.raw_file.filename)
# print(rawpathname)
cut['filename'] = rawpathname
# set start/end on the clips if they are set in the db
# else None
cut['in']=hms_to_clock(cl.start)
cut['out']=hms_to_clock(cl.end)
cut['length'] = cl.duration()
if cl.episode.channelcopy:
cut['channelcopy'] = cl.episode.channelcopy
else:
cut['channelcopy']='01'
if cl.episode.normalise:
cut['normalize'] = cl.episode.normalise
else:
cut['normalize']='-12.0'
cut['video_delay']='0.0'
cuts.append(cut)
return cuts
params = {}
params['title_img'] = get_title(episode)
params['foot_img'] = get_foot(episode)
params['clips'] = get_clips(rfs, episode)
params['cuts'] = get_cuts(cls)
return params
def enc_all(self, mlt_pathname, episode):
def enc_one(ext):
out_pathname = os.path.join(
self.show_dir, ext, "%s.%s" % (episode.slug, ext))
if ext == 'webm':
parms = {
'dv_format': self.options.dv_format,
'mlt': mlt_pathname,
'out': out_pathname,
'threads': self.options.threads,
'test': '',
}
# cmds=["melt %s -profile dv_ntsc -consumer avformat:%s progress=1 acodec=libvorbis ab=128k ar=44100 vcodec=libvpx minrate=0 b=600k aspect=@4/3 maxrate=1800k g=120 qmax=42 qmin=10"% (mlt_pathname,out_pathname,)]
cmds = [
"melt -profile %(dv_format)s %(mlt)s force_aspect_ratio=@64/45 -consumer avformat:%(out)s progress=1 threads=0 ab=256k vb=2000k quality=good deadline=good deinterlace=1 deinterlace_method=yadif" % parms]
if ext == 'flv':
cmds = [
"melt %(mlt)s -progress -profile %(dv_format)s -consumer avformat:%(out)s progressive=1 acodec=libfaac ab=96k ar=44100 vcodec=libx264 b=110k vpre=/usr/share/ffmpeg/libx264-hq.ffpreset" % parms]
if ext == 'flac':
# 16kHz/mono
cmds = ["melt -verbose -progress %s -consumer avformat:%s ar=16000" %
(mlt_pathname, out_pathname)]
if ext == 'mp3':
cmds = ["melt -verbose -progress %s -consumer avformat:%s" %
(mlt_pathname, out_pathname)]
if ext == 'mp4':
# High Quality Master 720x480 NTSC
parms = {
'dv_format': self.options.dv_format,
'mlt': mlt_pathname,
'out': out_pathname,
'threads': self.options.threads,
'test': '',
}
cmd = "melt -verbose -progress "\
"-profile %(dv_format)s %(mlt)s "\
"-consumer avformat:%(out)s "\
"threads=%(threads)s "\
"progressive=1 "\
"strict=-2 "\
"properties=x264-high "\
"ab=256k "\
% parms
cmd = cmd.split()
# 2 pass causes no video track, so dumping this.
# need to figure out how to switch between good and fast
if False:
cmds = [cmd + ['pass=1'],
cmd + ['pass=2']]
if True: # even faster!
cmds[0].append('fastfirstpass=1')
else:
cmds = [cmd]
# cmds.append( ["qt-faststart", tmp_pathname, out_pathname] )
if self.options.rm_temp:
cmds.append(["rm", tmp_pathname])
if ext == 'm4v':
# iPhone
tmp_pathname = os.path.join(
self.tmp_dir, "%s.%s" % (episode.slug, ext))
# combine settings from 2 files
ffpreset = open(
'/usr/share/ffmpeg/libx264-default.ffpreset').read().split('\n')
ffpreset.extend(
open('/usr/share/ffmpeg/libx264-ipod640.ffpreset').read().split('\n'))
ffpreset = [i for i in ffpreset if i]
cmd = "melt %(mlt)s -progress -profile %(dv_format)s -consumer avformat:%(tmp)s s=432x320 aspect=@4/3 progressive=1 acodec=libfaac ar=44100 ab=128k vcodec=libx264 b=70k" % parms
cmd = cmd.split()
cmd.extend(ffpreset)
cmds = [cmd]
cmds.append(["qt-faststart", tmp_pathname, out_pathname])
if self.options.rm_temp:
cmds.append(["rm", tmp_pathname])
if ext == 'dv':
out_pathname = os.path.join(
self.tmp_dir, "%s.%s" % (episode.slug, ext))
cmds = ["melt -verbose -progress %s -consumer avformat:%s pix_fmt=yuv411p progressive=1" %
(mlt_pathname, out_pathname)]
if ext == 'ogv':
# melt/ffmpeg ogv encoder is loopy,
# so make a .dv and pass it to ffmpeg2theora
ret = enc_one("dv")
if ret:
dv_pathname = os.path.join(
self.tmp_dir, "%s.dv" % (episode.slug,))
cmds = [
"ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --channels 1 %s -o %s" % (dv_pathname, out_pathname)]
if self.options.rm_temp:
cmds.append(["rm", dv_pathname])
else:
return ret
# run encoder:
if self.options.noencode:
print("sorce files generated, skipping encode.")
if self.options.melt:
self.run_cmd(['melt', mlt_pathname])
ret = False
else:
ret = self.run_cmds(episode, cmds, )
if ret and not os.path.exists(out_pathname):
print("melt returned %ret, but no output: %s" % \
(ret, out_pathname))
ret = False
return ret
ret = True
# create all the formats for uploading
for ext in self.options.upload_formats:
print("encoding to %s" % (ext,))
ret = enc_one(ext) and ret
"""
if self.options.enc_script:
cmd = [self.options.enc_script,
self.show_dir, episode.slug]
ret = ret and self.run_cmds(episode, [cmd])
"""
return ret
def dv2theora(self, episode, dv_path_name, cls, rfs):
"""
Not used any more.
transcode dv to ogv
"""
oggpathname = os.path.join(
self.show_dir, "ogv", "%s.ogv" % episode.slug)
# cmd="ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --speedlevel 0 --optimize --keyint 256 --channels 1".split()
cmd = "ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --keyint 256 --channels 1".split()
cmd += ['--output', oggpathname]
cmd += [dv_path_name]
return cmd
def process_ep(self, episode):
ret = False
cls = Cut_List.objects.filter(
episode=episode, apply=True).order_by('sequence')
if cls:
# get list of raw footage for this episode
rfs = Raw_File.objects. \
filter(cut_list__episode=episode).\
exclude(trash=True).distinct()
# get a .mlt file for this episode (mlt_pathname)
# look for custom/slug.mlt and just use it,
# else build one from client.template_mlt
mlt_pathname = os.path.join(
self.show_dir, "custom",
"{}.mlt".format(episode.slug))
if os.path.exists(mlt_pathname):
print(("found custom/slug.mlt:\n{}".format( mlt_pathname )))
ret = True
else:
template_mlt = episode.show.client.template_mlt
mlt_pathname = os.path.join(self.show_dir,
"mlt", "%s.mlt" % episode.slug)
params = self.get_params(episode, rfs, cls )
pprint.pprint(params)
print((2, mlt_pathname))
ret = mk_mlt( template_mlt, mlt_pathname, params )
if not ret:
episode.state = 0
episode.comment += "\nenc.py mlt = self.mkmlt_1 failed.\n"
episode.save()
return False
# do the final encoding:
# using melt
ret = self.enc_all(mlt_pathname, episode)
if self.options.load_temp and self.options.rm_temp:
cmds = []
for rf in rfs:
dst_path = os.path.join(
self.tmp_dir, episode.slug, os.path.dirname(rf.filename))
rawpathname = os.path.join(
self.tmp_dir, episode.slug, rf.filename)
cmds.append(['rm', rawpathname])
cmds.append(['rmdir', dst_path])
dst_path = os.path.join(self.tmp_dir, episode.slug)
cmds.append(['rmdir', dst_path])
self.run_cmds(episode, cmds)
else:
err_msg = "No cutlist found."
episode.state = 0
episode.comment += "\nenc error: %s\n" % (err_msg,)
episode.save()
print(err_msg)
return False
if self.options.test:
ret = False
# save the episode so the test suite can get the slug
self.episode = episode
return ret
def add_more_options(self, parser):
parser.add_option('--enc-script',
help='encode shell script')
parser.add_option('--noencode', action="store_true",
help="don't encode, just make svg, png, mlt")
parser.add_option('--melt', action="store_true",
help="call melt slug.melt (only w/noencode)")
parser.add_option('--load-temp', action="store_true",
help='copy .dv to temp files')
parser.add_option('--rm-temp',
help='remove large temp files')
parser.add_option('--threads',
help='thread parameter passed to encoder')
def add_more_option_defaults(self, parser):
parser.set_defaults(threads=0)
if __name__ == '__main__':
p = enc()
p.main()
| mit | 2,883,326,822,299,327,000 | 35.06298 | 226 | 0.481407 | false |
GoodCloud/johnny-cache | johnny/backends/memcached.py | 1 | 1842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Infinite caching memcached class. Caches forever when passed a timeout
of 0. For Django >= 1.3, this module also provides ``MemcachedCache`` and
``PyLibMCCache``, which use the backends of their respective analogs in
django's default backend modules.
"""
from django.core.cache.backends import memcached
from django.utils.encoding import smart_str
import django
class CacheClass(memcached.CacheClass):
"""By checking ``timeout is None`` rather than ``not timeout``, this
cache class allows for non-expiring cache writes on certain backends,
notably memcached."""
def _get_memcache_timeout(self, timeout=None):
if timeout == 0: return 0 #2591999
return super(CacheClass, self)._get_memcache_timeout(timeout)
if django.VERSION[:2] > (1, 2):
class MemcachedCache(memcached.MemcachedCache):
"""Infinitely Caching version of django's MemcachedCache backend."""
def _get_memcache_timeout(self, timeout=None):
if timeout == 0: return 0 #2591999
return super(MemcachedCache, self)._get_memcache_timeout(timeout)
class PyLibMCCache(memcached.PyLibMCCache):
"""PyLibMCCache version that interprets 0 to mean, roughly, 30 days.
This is because `pylibmc interprets 0 to mean literally zero seconds
<http://sendapatch.se/projects/pylibmc/misc.html#differences-from-python-memcached>`_
rather than "infinity" as memcached itself does. The maximum timeout
memcached allows before treating the timeout as a timestamp is just
under 30 days."""
def _get_memcache_timeout(self, timeout=None):
# pylibmc doesn't like our definition of 0
if timeout == 0: return 2591999
return super(PyLibMCCache, self)._get_memcache_timeout(timeout)
| mit | -7,832,698,847,422,093,000 | 46.230769 | 93 | 0.701412 | false |
freifunk-darmstadt/tools | update-telemetry.py | 1 | 8987 | #!/usr/bin/env python3
import psutil
import os
import json
import re
import itertools
from contextlib import contextmanager
import pprint
import time
import socket
import subprocess
import logging
logger = logging.getLogger(__name__)
def pairwise(iterable):
"s -> (s0,s1), (s2,s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
@contextmanager
def get_socket(host, port):
sock = socket.socket()
sock.settimeout(1)
sock.connect((host, port))
yield sock
sock.close()
@contextmanager
def get_unix_socket(filename):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect(filename)
yield sock
sock.close()
def write_to_graphite(data, prefix='freifunk', hostname=socket.gethostname()):
if '.' in hostname:
hostname = hostname.split('.')[0]
now = time.time()
with get_socket('stats.darmstadt.freifunk.net', 2013) as s:
for key, value in data.items():
line = "%s.%s.%s %s %s\n" % (prefix, hostname, key, value, now)
s.sendall(line.encode('latin-1'))
def write_to_node_collector(filename, data, patterns, prefix='freifunk'):
patterns = [re.compile(exp) for exp in patterns]
print(data)
updates = []
for metric, value in data.items():
for pattern in patterns:
m = pattern.match(metric)
if m:
groups = m.groupdict()
if all(key in groups for key in ['key']):
updates.append([groups, value])
break
content = []
for update, value in updates:
key = update['key'].replace('.', '_')
sub_key = update.pop('sub_key', None)
if prefix:
key = '{}_{}'.format(prefix, key)
if sub_key:
key += '_' + sub_key
params =update.copy()
params.pop('key')
params = ','.join(['{}={}'.format(k, v) for k, v in params.items()])
params = '{%s}' % (params)
content.append('{key}{params} {value}'.format(key=key, params=params, value=value))
with open(filename, 'w') as fh:
fh.write('\n'.join(content))
def read_from_fastd_socket(filename):
with get_unix_socket(filename) as client:
try:
strings = []
while True:
s = client.recv(8096)
if not s:
break
strings.append(s.decode('utf-8'))
data = json.loads(''.join(strings))
#pprint.pprint(data['statistics'])
online_peers = len([None for name, d in data['peers'].items() if d['connection']])
return {
'peers.count': len(data['peers']),
'peers.online': online_peers,
'rx.packets': data['statistics']['rx']['packets'],
'rx.bytes': data['statistics']['rx']['bytes'],
'rx.reordered.bytes': data['statistics']['rx_reordered']['bytes'],
'rx.reordered.packets': data['statistics']['rx_reordered']['packets'],
'tx.bytes': data['statistics']['tx']['bytes'],
'tx.packets': data['statistics']['tx']['packets'],
'tx.dropped.bytes': data['statistics']['tx_dropped']['bytes'],
'tx.dropped.packets': data['statistics']['tx_dropped']['packets'],
}
except Exception as e:
print(e)
return {}
def get_fastd_process_stats():
for proc in psutil.process_iter():
if proc.name() == 'fastd':
# 11905: 00000000000000000000000001000000:0035 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 0 0 4469598 2 ffff880519be5100 0
drop_count = 0
for proto in ['udp', 'udp6']:
with open('/proc/{}/net/{}'.format(proc.pid, proto), 'r') as fh:
for line in (line.strip() for line in fh.read().split('\n')):
if not line:
continue
if line.startswith('sl'):
continue
parts = line.split(' ')
drop_count += int(parts[-1])
return drop_count
return None
def get_neighbour_table_states(family=socket.AF_INET6):
if family is socket.AF_INET:
family = '-4'
elif family is socket.AF_INET6:
family = '-6'
else:
return
response = subprocess.check_output(
['/bin/ip', family, 'neigh', 'show', 'nud', 'all']
).decode()
states = {'PERMANENT': 0, 'NOARP': 0, 'REACHABLE': 0, 'STALE': 0, 'NONE': 0,
'INCOMPLETE': 0, 'DELAY': 0, 'PROBE': 0, 'FAILED': 0}
for neigh_entry in response.split('\n'):
if not neigh_entry:
continue
state = neigh_entry.split()[-1]
if state not in states:
continue
states[state] += 1
return states
def main():
fastd_sockets = (
('0', '/run/fastd-ffda-vpn.sock'),
('1', '/run/fastd-ffda-vpn1.sock'),
)
device_name_mapping = {
'freifunk': 'ffda-br',
'bat0': 'ffda-bat',
'mesh-vpn': 'ffda-vpn'
}
device_whitelist = [
'eth0',
'ffda-vpn',
'ffda-vpn-1280',
'ffda-vpn-1312',
'ffda-bat',
'ffda-br',
'ffda-transport',
'services',
]
fields = [
'bytes', 'packets', 'errs', 'drop', 'fifo',
'frame', 'compressed', 'multicast',
]
field_format = '(?P<{direction}_{field}>\d+)'
pattern = re.compile(
'^\s*(?P<device_name>[\w-]+):\s+' + '\s+'.join(
itertools.chain.from_iterable((field_format.format(direction=direction, field=field)
for field in fields) for direction in ['rx', 'tx'])
)
)
update = {}
with open('/proc/net/dev') as fh:
lines = fh.readlines()
for line in lines:
m = pattern.match(line)
if m:
groupdict = m.groupdict()
device_name = groupdict.pop('device_name')
device_name = device_name_mapping.get(device_name, device_name)
if device_name in device_whitelist or device_name.endswith('-vpn') or \
device_name.endswith('-bat') or \
device_name.endswith('-br') or \
device_name.endswith('-transport'):
for key, value in groupdict.items():
direction, metric = key.split('_')
update['%s.%s.%s' % (device_name, direction, metric)] = value
with open('/proc/loadavg', 'r') as fh:
line = fh.read()
values = line.split(' ', 3)
update['load.15'] = values[0]
update['load.5'] = values[1]
update['load.1'] = values[2]
for key in ['count', 'max']:
try:
with open('/proc/sys/net/netfilter/nf_conntrack_%s' % key, 'r') as fh:
update['netfilter.%s' % key] = fh.read().strip()
except IOError as e:
pass
with open('/proc/net/snmp6', 'r') as fh:
for line in fh.readlines():
key, value = line.split(' ', 1)
value = value.strip()
update['ipv6.%s' % key] = value
with open('/proc/net/snmp', 'r') as fh:
for heading, values in pairwise(fh.readlines()):
section, headings = heading.split(':')
headings = headings.strip().split(' ')
_, values = values.split(':')
values = values.strip().split(' ')
for key, value in zip(headings, values):
update['ipv4.%s.%s' % (section, key)] = value
for af, prefix in [(socket.AF_INET, 'ipv4.Neigh'),
(socket.AF_INET6, 'ipv6.Neigh')]:
for state, count in get_neighbour_table_states(af).items():
update['{0}.{1}'.format(prefix, state.lower())] = count
with open('/proc/stat', 'r') as fh:
for line in fh.readlines():
key, value = line.split(' ', 1)
if key == 'ctxt':
update['context_switches'] = value.strip()
break
for name, filename in fastd_sockets:
if not os.path.exists(filename):
continue
data = read_from_fastd_socket(filename)
if len(data) > 0:
update.update({'fastd.%s.%s' % (name, key): value for (key, value) in data.items()})
fastd_drops = get_fastd_process_stats()
if fastd_drops:
update['fastd.drops'] = fastd_drops
#pprint.pprint(update)
write_to_graphite(update)
write_to_node_collector('/dev/shm/telemetry.prom', update, patterns=[
# '^(?P<interface>[^.]+)\.(?P<key>(rx|tx).+)',
'^(?P<key>fastd)\.(?P<fast_instance>.+)\.(?P<sub_key>.+)',
# '^(?P<key>load)\.(?P<period>\d+)'
], prefix='ffda_')
if __name__ == "__main__":
main()
| agpl-3.0 | 6,942,988,969,528,593,000 | 31.327338 | 182 | 0.516858 | false |
maferelo/saleor | saleor/account/migrations/0001_initial.py | 3 | 19366 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("auth", "0006_require_contenttypes_0002")]
replaces = [("userprofile", "0001_initial")]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text=(
"Designates that this user has all permissions "
"without explicitly assigning them."
),
verbose_name="superuser status",
),
),
("email", models.EmailField(unique=True, max_length=254)),
(
"is_staff",
models.BooleanField(default=False, verbose_name="staff status"),
),
(
"is_active",
models.BooleanField(default=False, verbose_name="active"),
),
(
"password",
models.CharField(
verbose_name="password", max_length=128, editable=False
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="date joined",
editable=False,
),
),
(
"last_login",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="last login",
editable=False,
),
),
],
options={"db_table": "userprofile_user", "abstract": False},
),
migrations.CreateModel(
name="Address",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"first_name",
models.CharField(max_length=256, verbose_name="first name"),
),
(
"last_name",
models.CharField(max_length=256, verbose_name="last name"),
),
(
"company_name",
models.CharField(
max_length=256,
verbose_name="company or organization",
blank=True,
),
),
(
"street_address_1",
models.CharField(max_length=256, verbose_name="address"),
),
(
"street_address_2",
models.CharField(
max_length=256, verbose_name="address", blank=True
),
),
("city", models.CharField(max_length=256, verbose_name="city")),
(
"postal_code",
models.CharField(max_length=20, verbose_name="postal code"),
),
(
"country",
models.CharField(
max_length=2,
verbose_name="country",
choices=[
("AF", "Afghanistan"),
("AX", "\xc5land Islands"),
("AL", "Albania"),
("DZ", "Algeria"),
("AS", "American Samoa"),
("AD", "Andorra"),
("AO", "Angola"),
("AI", "Anguilla"),
("AQ", "Antarctica"),
("AG", "Antigua And Barbuda"),
("AR", "Argentina"),
("AM", "Armenia"),
("AW", "Aruba"),
("AU", "Australia"),
("AT", "Austria"),
("AZ", "Azerbaijan"),
("BS", "Bahamas"),
("BH", "Bahrain"),
("BD", "Bangladesh"),
("BB", "Barbados"),
("BY", "Belarus"),
("BE", "Belgium"),
("BZ", "Belize"),
("BJ", "Benin"),
("BM", "Bermuda"),
("BT", "Bhutan"),
("BO", "Bolivia"),
("BQ", "Bonaire, Saint Eustatius And Saba"),
("BA", "Bosnia And Herzegovina"),
("BW", "Botswana"),
("BV", "Bouvet Island"),
("BR", "Brazil"),
("IO", "British Indian Ocean Territory"),
("BN", "Brunei Darussalam"),
("BG", "Bulgaria"),
("BF", "Burkina Faso"),
("BI", "Burundi"),
("KH", "Cambodia"),
("CM", "Cameroon"),
("CA", "Canada"),
("CV", "Cape Verde"),
("KY", "Cayman Islands"),
("CF", "Central African Republic"),
("TD", "Chad"),
("CL", "Chile"),
("CN", "China"),
("CX", "Christmas Island"),
("CC", "Cocos (Keeling) Islands"),
("CO", "Colombia"),
("KM", "Comoros"),
("CG", "Congo"),
("CD", "Congo, The Democratic Republic of the"),
("CK", "Cook Islands"),
("CR", "Costa Rica"),
("CI", "C\xf4te D'Ivoire"),
("HR", "Croatia"),
("CU", "Cuba"),
("CW", "Cura\xe7o"),
("CY", "Cyprus"),
("CZ", "Czech Republic"),
("DK", "Denmark"),
("DJ", "Djibouti"),
("DM", "Dominica"),
("DO", "Dominican Republic"),
("EC", "Ecuador"),
("EG", "Egypt"),
("SV", "El Salvador"),
("GQ", "Equatorial Guinea"),
("ER", "Eritrea"),
("EE", "Estonia"),
("ET", "Ethiopia"),
("FK", "Falkland Islands (Malvinas)"),
("FO", "Faroe Islands"),
("FJ", "Fiji"),
("FI", "Finland"),
("FR", "France"),
("GF", "French Guiana"),
("PF", "French Polynesia"),
("TF", "French Southern Territories"),
("GA", "Gabon"),
("GM", "Gambia"),
("GE", "Georgia"),
("DE", "Germany"),
("GH", "Ghana"),
("GI", "Gibraltar"),
("GR", "Greece"),
("GL", "Greenland"),
("GD", "Grenada"),
("GP", "Guadeloupe"),
("GU", "Guam"),
("GT", "Guatemala"),
("GG", "Guernsey"),
("GN", "Guinea"),
("GW", "Guinea-Bissau"),
("GY", "Guyana"),
("HT", "Haiti"),
("HM", "Heard Island And Mcdonald Islands"),
("VA", "Holy See (Vatican City State)"),
("HN", "Honduras"),
("HK", "Hong Kong"),
("HU", "Hungary"),
("IS", "Iceland"),
("IN", "India"),
("ID", "Indonesia"),
("IR", "Iran, Islamic Republic of"),
("IQ", "Iraq"),
("IE", "Ireland"),
("IM", "Isle of Man"),
("IL", "Israel"),
("IT", "Italy"),
("JM", "Jamaica"),
("JP", "Japan"),
("JE", "Jersey"),
("JO", "Jordan"),
("KZ", "Kazakhstan"),
("KE", "Kenya"),
("KI", "Kiribati"),
("KP", "Korea, Democratic People's Republic of"),
("KR", "Korea, Republic of"),
("KW", "Kuwait"),
("KG", "Kyrgyzstan"),
("LA", "Lao People's Democratic Republic"),
("LV", "Latvia"),
("LB", "Lebanon"),
("LS", "Lesotho"),
("LR", "Liberia"),
("LY", "Libya"),
("LI", "Liechtenstein"),
("LT", "Lithuania"),
("LU", "Luxembourg"),
("MO", "Macao"),
("MK", "Macedonia, The Former Yugoslav Republic of"),
("MG", "Madagascar"),
("MW", "Malawi"),
("MY", "Malaysia"),
("MV", "Maldives"),
("ML", "Mali"),
("MT", "Malta"),
("MH", "Marshall Islands"),
("MQ", "Martinique"),
("MR", "Mauritania"),
("MU", "Mauritius"),
("YT", "Mayotte"),
("MX", "Mexico"),
("FM", "Micronesia, Federated States of"),
("MD", "Moldova, Republic of"),
("MC", "Monaco"),
("MN", "Mongolia"),
("ME", "Montenegro"),
("MS", "Montserrat"),
("MA", "Morocco"),
("MZ", "Mozambique"),
("MM", "Myanmar"),
("NA", "Namibia"),
("NR", "Nauru"),
("NP", "Nepal"),
("NL", "Netherlands"),
("NC", "New Caledonia"),
("NZ", "New Zealand"),
("NI", "Nicaragua"),
("NE", "Niger"),
("NG", "Nigeria"),
("NU", "Niue"),
("NF", "Norfolk Island"),
("MP", "Northern Mariana Islands"),
("NO", "Norway"),
("OM", "Oman"),
("PK", "Pakistan"),
("PW", "Palau"),
("PS", "Palestinian Territory, Occupied"),
("PA", "Panama"),
("PG", "Papua New Guinea"),
("PY", "Paraguay"),
("PE", "Peru"),
("PH", "Philippines"),
("PN", "Pitcairn"),
("PL", "Poland"),
("PT", "Portugal"),
("PR", "Puerto Rico"),
("QA", "Qatar"),
("RE", "R\xe9union"),
("RO", "Romania"),
("RU", "Russian Federation"),
("RW", "Rwanda"),
("BL", "Saint Barth\xe9lemy"),
("SH", "Saint Helena, Ascension And Tristan Da Cunha"),
("KN", "Saint Kitts And Nevis"),
("LC", "Saint Lucia"),
("MF", "Saint Martin (French Part)"),
("PM", "Saint Pierre And Miquelon"),
("VC", "Saint Vincent And the Grenadines"),
("WS", "Samoa"),
("SM", "San Marino"),
("ST", "Sao Tome And Principe"),
("SA", "Saudi Arabia"),
("SN", "Senegal"),
("RS", "Serbia"),
("SC", "Seychelles"),
("SL", "Sierra Leone"),
("SG", "Singapore"),
("SX", "Sint Maarten (Dutch Part)"),
("SK", "Slovakia"),
("SI", "Slovenia"),
("SB", "Solomon Islands"),
("SO", "Somalia"),
("ZA", "South Africa"),
("GS", "South Georgia and the South Sandwich Islands"),
("ES", "Spain"),
("LK", "Sri Lanka"),
("SD", "Sudan"),
("SR", "Suriname"),
("SJ", "Svalbard and Jan Mayen"),
("SZ", "Swaziland"),
("SE", "Sweden"),
("CH", "Switzerland"),
("SY", "Syria"),
("TW", "Taiwan"),
("TJ", "Tajikistan"),
("TZ", "Tanzania"),
("TH", "Thailand"),
("TL", "Timor-Leste"),
("TG", "Togo"),
("TK", "Tokelau"),
("TO", "Tonga"),
("TT", "Trinidad And Tobago"),
("TN", "Tunisia"),
("TR", "Turkey"),
("TM", "Turkmenistan"),
("TC", "Turks And Caicos Islands"),
("TV", "Tuvalu"),
("UG", "Uganda"),
("UA", "Ukraine"),
("AE", "United Arab Emirates"),
("GB", "United Kingdom"),
("US", "United States"),
("UM", "United States Minor Outlying Islands"),
("UY", "Uruguay"),
("UZ", "Uzbekistan"),
("VU", "Vanuatu"),
("VE", "Venezuela"),
("VN", "Viet Nam"),
("VG", "Virgin Islands, British"),
("VI", "Virgin Islands, U.S."),
("WF", "Wallis And Futuna"),
("EH", "Western Sahara"),
("YE", "Yemen"),
("ZM", "Zambia"),
("ZW", "Zimbabwe"),
],
),
),
(
"country_area",
models.CharField(
max_length=128, verbose_name="state or province", blank=True
),
),
(
"phone",
models.CharField(
max_length=30, verbose_name="phone number", blank=True
),
),
],
options={"db_table": "userprofile_address"},
),
migrations.AddField(
model_name="user",
name="addresses",
field=models.ManyToManyField(to="account.Address"),
),
migrations.AddField(
model_name="user",
name="default_billing_address",
field=models.ForeignKey(
related_name="+",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="default billing address",
blank=True,
to="account.Address",
null=True,
),
),
migrations.AddField(
model_name="user",
name="default_shipping_address",
field=models.ForeignKey(
related_name="+",
on_delete=django.db.models.deletion.SET_NULL,
verbose_name="default shipping address",
blank=True,
to="account.Address",
null=True,
),
),
migrations.AddField(
model_name="user",
name="groups",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Group",
blank=True,
help_text=(
"The groups this user belongs to. "
"A user will get all permissions granted to each of their groups."
),
verbose_name="groups",
),
),
migrations.AddField(
model_name="user",
name="user_permissions",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Permission",
blank=True,
help_text="Specific permissions for this user.",
verbose_name="user permissions",
),
),
]
| bsd-3-clause | 2,425,811,758,527,500,300 | 42.617117 | 86 | 0.291748 | false |
ag-sc/QALD | 4/scripts/Evaluation.py | 1 | 26257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.dom.minidom as dom
import xml.dom
from decimal import *
import sys
import os
import datetime
#from Numeric import *
# Dokument erzeugen
implement = xml.dom.getDOMImplementation()
###################Globale Variablen###################
task=None
choosen_tag={}
system_time=0
filename_out_html = None
filename_out_txt = None
system_name=None
configuration=None
testing=False
###################Funktionen##########################
def set_system_name(name):
global system_name
system_name=name
def set_configuration(name):
global configuration
configuration=name
def _ausgabe_(ausgabe):
print ausgabe
def set_filename_txt_out(time):
global filename_out_txt
filename_out_txt="upload/out"+str(time)+".txt"
def set_filename_out(time):
global filename_out_html
filename_out_html="upload/out"+str(time)+".html"
def _knoten_auslesen(knoten):
try:
string = knoten.firstChild.data.strip().encode("utf-8")
# print "knoten_auslesen: "+string
return string
except:
# print "Unexpected error:", sys.exc_info()[0]
pass
#def _knoten_auslesen(knoten):
# return eval("%s('%s')" % (knoten.getAttribute("typ"),
# knoten.firstChild.data.strip()))
def lade_musterloesung(dateiname):
d = {}
global choosen_tag
#baum = dom.parse(dateiname.encode( "utf-8" ))
baum = dom.parse(dateiname)
zaehler=1
for eintrag in baum.firstChild.childNodes:
if eintrag.nodeName == "question":
id=(eintrag.attributes["id"]).value
question_text = query = None
answer=[]
for knoten in eintrag.childNodes:
if knoten.nodeName == "text" or knoten.nodeName == "string":
if (knoten.attributes["lang"]).value == "en":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "de":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "es":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "it":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "fr":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "nl":
question_text = _knoten_auslesen(knoten)
# elif knoten.nodeName == "query":
# query=knoten.firstChild.data.strip()
if knoten.nodeName=="answers":
answer_elem_1=[]
for knoten_answer in knoten.childNodes:
#here i have to check for optional.
if knoten_answer.nodeName=="answer":
answer_elem=[]
for knoten_answer1 in knoten_answer.childNodes:
for id_loesung,tag_loesung in choosen_tag.iteritems():
if(id==id_loesung):
###########################
#
#
# In QALD3 only uri/boolean/number and date are allowed, so string is "turned off"
#
#
###########################
if knoten_answer1.nodeName == "string" and choosen_tag[id]=="string":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "boolean" and choosen_tag[id]=="boolean":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "number"and choosen_tag[id]=="number":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "date" and choosen_tag[id]=="date":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "uri" and choosen_tag[id]=="uri":
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
answer_elem_1.append(answer_elem)
answer.append(answer_elem_1)
# print(answer)
d[id] = [query,question_text,answer]
# print str(d)
return d
def bearbeite_baum(dateiname):
#setze Zeielnumbrueche, damit der Parser spaeter besser mit dem Dokument zurecht kommt
fobj = open(dateiname, "r")
string=""
for line1 in fobj:
line=str(line1)
line=line.replace('<question','\n<question')
#line=line.replace('<string>','\n<string>')
line=line.replace('</string>','</string>\n')
line=line.replace('</keywords>','</keywords>\n')
line=line.replace('</query>','</query>\n')
line=line.replace('<answers>','<answers>\n')
line=line.replace('<answer>','<answer>\n')
line=line.replace('</answer>','</answer>\n')
line=line.replace('</answers>','</answers>\n')
line=line.replace('</uri>','</uri>\n')
line=line.replace('</boolean>','</boolean>\n')
line=line.replace('</number>','</number>\n')
line=line.replace('</date>','</date>\n')
#line=line.replace('&','&')
string+=line
fobj.close()
# print string
fobj = open(dateiname, "w")
fobj.write(string)
fobj.close()
def lade_baum(dateiname):
d = {}
bearbeite_baum(dateiname)
global choosen_tag
global testing
# print "after bearbeite baum"
baum = dom.parse(dateiname.encode( "utf-8" ))
zaehler=1
# print "after parsing baum"
for eintrag in baum.firstChild.childNodes:
if(zaehler==1):
knoten_id=((eintrag.parentNode).attributes["id"]).value
zaehler=2
# print "after 1"
if eintrag.nodeName == "question":
# print "in question"
id=(eintrag.attributes["id"]).value
# print "id: "+str(id)
question_text = query = None
answer=[]
for knoten in eintrag.childNodes: #
# print "in for knoten in eintrag.childNodes: "
if knoten.nodeName == "text" or knoten.nodeName == "string":
if (knoten.attributes["lang"]).value == "en":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "de":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "es":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "it":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "fr":
question_text = _knoten_auslesen(knoten)
elif (knoten.attributes["lang"]).value == "nl":
question_text = _knoten_auslesen(knoten)
# print str(question_txt)
# elif knoten.nodeName == "query":
# query=knoten.firstChild.data.strip()
elif knoten.nodeName=="answers":
try:
answer_elem_1=[]
for knoten_answer in knoten.childNodes:
if knoten_answer.nodeName=="answer":
answer_elem=[]
###########################
#
#
# In QALD3 only uri/boolean/number and date are allowed, so string is "turned off"
#
#
###########################
mehr_als_ein_typ=False
eins=zwei=None
eins=((knoten_answer.childNodes).item(1)).nodeName
if((knoten_answer.childNodes).item(3)):
zwei=((knoten_answer.childNodes).item(3)).nodeName
else:
zwei= None
if(eins==zwei or zwei==None):
mehr_als_ein_typ=False
choosen_tag[id]=((knoten_answer.childNodes).item(1)).nodeName
else:
mehr_als_ein_typ=True
#choosen_tag[id]="string"
choosen_tag[id]="uri"
for knoten_answer1 in knoten_answer.childNodes:
if(knoten_answer1.nodeName!="#text"):
if knoten_answer1.nodeName == "string" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "boolean" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "number" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "date" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
if knoten_answer1.nodeName == "uri" and mehr_als_ein_typ==False:
try:
answer_elem.append(knoten_answer1.firstChild.data.strip())
except Exception:
answer_elem.append(" ")
#if knoten_answer1.nodeName == choosen_tag[id] and mehr_als_ein_typ==True:
# try:
# answer_elem.append(knoten_answer1.firstChild.data.strip())
# except Exception:
# answer_elem.append(" ")
answer_elem_1.append(answer_elem)
except Exception as inst:
error= "<!doctype html> <html> <head> <title>ERROR</title></head> <body> <p>"+str(type(inst))+"</p><p>"+str(inst.args)+"</p><p>"+str(inst)+"</p><p>"+id+"</p><p>PLEASE CHECK YOUR XML FILE</p></body></html>"
outfile=open(filename_out_html,"w")
# _ausgabe_(filename_out_html)
outfile.write(error)
outfile.close()
choosen_tag[id]="string"
answer_elem_1.append("ERROR IN FILE")
# print "Unexpected error:", sys.exc_info()[0]
# print "9"
answer.append(answer_elem_1)
d[question_text] = [query,id,answer]
# print str(d)
return d
def sortedDictValues2(adict):
keys = adict.keys()
keys.sort()
return [dict[key] for key in keys]
def _evaluation(loesung, musterloesung, task):
anzahl_bearbeiteter_fragen=0
anzahl_korrekter_antworten=0
anzahl_falscher_antworten=0
falsche_antworten=[]
anzahl_bearbeiteter_fragen=len(loesung)
bewertung_ausgabe={}
#number_answers_goldstandard = 0
number_answers_user = 0
#for question_text, query_loesung in musterloesung.iteritems():
# gold_loesung1=query_loesung[2]
# gold_loesung=gold_loesung1[0]
# number_answer_goldstandard += len(gold_loesung)
for question_text, query_loesung in loesung.iteritems():
anzahl_falscher_frageelemente=anzahl_richtiger_frageelemente=0
R=P=F=0
# print question_text
# print
# print str(query_loesung[2])
answer_loesung1=query_loesung[2]
answer_loesung=answer_loesung1[0]
number_answers_user += len(answer_loesung)
loesung_id=query_loesung[1]
answer_musterloesung1=musterloesung[loesung_id]
answer_musterloesung2=answer_musterloesung1[2]
answer_musterloesung=answer_musterloesung2[0]
#print "user: "+str(answer_loesung)
#print "gold: "+str(answer_musterloesung)
if len(answer_musterloesung) == len(answer_loesung) and len(answer_loesung) == 0:
bewertung_ausgabe[loesung_id]=[question_text,str(1.0),str(1.0),str(1.0)]
anzahl_korrekter_antworten+=1
elif(len(answer_loesung)==0):
# anzahl_falscher_fragen+=1
anzahl_falscher_antworten+=1
falsche_antworten.append(loesung_id)
R=P=F=0
bewertung_ausgabe[loesung_id]=[question_text,str(R),str(P),str(F)]
else:
if(len(answer_musterloesung)>len(answer_loesung)):
anzahl_falscher_antworten+=1
anzahl_falscher_frageelemente+=(len(answer_musterloesung)-len(answer_loesung))
falsche_antworten.append(loesung_id)
for i in range(0,len(answer_loesung)):
for j in range(0,len(answer_musterloesung)):
if(answer_loesung[i]==answer_musterloesung[j]):
anzahl_richtiger_frageelemente+=1
break
if(anzahl_richtiger_frageelemente==0):
R=F=P=0
else:
R1=Decimal(anzahl_richtiger_frageelemente)
R2=Decimal(len(answer_musterloesung))
R=round((R1/R2),5)
P1=R1
P2=Decimal(len(answer_loesung))
P=round((P1/P2),5)
F=round(((2*P*R)/(R+P)),5)
bewertung_ausgabe[loesung_id]=[question_text,str(R),str(P),str(F)]
else:
for i in range(0,len(answer_loesung)):
for j in range(0,len(answer_musterloesung)):
if(answer_loesung[i]==answer_musterloesung[j]):
anzahl_richtiger_frageelemente+=1
break
if(anzahl_richtiger_frageelemente==len(answer_loesung)):
anzahl_korrekter_antworten+=1
else:
anzahl_falscher_antworten+=1
falsche_antworten.append(loesung_id)
if(anzahl_richtiger_frageelemente==0):
R=F=P=0
else:
R1=Decimal(anzahl_richtiger_frageelemente)
R2=Decimal(len(answer_musterloesung))
R=round((R1/R2),5)
P1=R1
P2=Decimal(len(answer_loesung))
P=round((P1/P2),5)
F=round(((2*P*R)/(R+P)),5)
bewertung_ausgabe[loesung_id]=[question_text,str(R),str(P),str(F)]
if(anzahl_korrekter_antworten==0):
fmeasure=recall=precision=0
else:
wert1=Decimal(anzahl_korrekter_antworten)
wert2=Decimal(anzahl_bearbeiteter_fragen)
recall=round(((wert1/len(musterloesung))),5)
precision=round(((wert1/wert2)),5)
fmeasure=round(((2*recall*precision)/(recall+precision)),5)
recall=str(recall)
precision=str(precision)
fmeasure=str(fmeasure)
number_correct_user_answers = anzahl_bearbeiteter_fragen
anzahl_bearbeiteter_fragen=str(anzahl_bearbeiteter_fragen)
anzahl_korrekter_antworten=str(anzahl_korrekter_antworten)
anzahl_falscher_antworten=str(anzahl_falscher_antworten)
############################################################################################
# #
#Recall = Overall numbers of correct answers / overall number of goldstandard answers #
#Precision = Overall numbers of correct answers / overall number of all answers(given xml)
#F-Measure = (2*Recall*Precision)/(Recall+Precision)
# #
############################################################################################
global_precision=0.0
global_recall=0.0
global_fmeasure=0.0
for id,value in bewertung_ausgabe.iteritems():
tmp = id +";"
x = value[0]
x = x.decode("ascii","ignore")
tmp += x +";"
tmp += str(value[2])+";"
tmp += str(value[1])+";"
tmp += str(value[3])+";"
#print"tmp: "+ tmp
#tmp = (id+";"+str(value[0])+";"+str(value[2])+";"+str(value[1])+";"+str(value[3])+"\n").encode("utf-8")
string = "qald-4_"
if task == 1: string += "multilingual"
if task == 2: string += "biomedical"
if task == 3: string += "hybrid"
string += tmp
global_precision += float(value[2])
global_recall += float(value[1])
if global_recall == 0.0 or global_precision == 0.0:
global_precision = str(0)
global_recall = str(0)
global_fmeasure = str(0)
else:
global_precision = global_precision/len(musterloesung)
global_recall = global_recall/len(musterloesung)
global_fmeasure=str((2*global_recall*global_precision)/(global_precision + global_recall))
global_precision = str(global_precision)
global_recall = str(global_recall)
write_html(string,anzahl_falscher_antworten,anzahl_korrekter_antworten,anzahl_bearbeiteter_fragen,global_fmeasure,global_precision,global_recall,bewertung_ausgabe,falsche_antworten)
def write_txt(anzahl_falscher_antworten,anzahl_korrekter_antworten,anzahl_bearbeiteter_fragen,fmeasure,precision,recall,bewertung_ausgabe,falsche_antworten):
#global system_name, configuration
bla=""
bla=system_name+";"+configuration+"\n"
globale_uebersicht_txt= anzahl_bearbeiteter_fragen+";"+anzahl_korrekter_antworten+";"+anzahl_falscher_antworten+";"+recall+";"+precision+";"+fmeasure+"\n"
string=""
for id,answer in bewertung_ausgabe.iteritems():
question = answer[0]
question = question.decode("ascii","ignore")
string += id+";"+question+";"+answer[1]+";"+answer[2]+";"+answer[3]+"\n"
outfile=open(filename_out_txt,"w")
outfile.write(bla+globale_uebersicht_txt+string)
outfile.close()
_ausgabe_(filename_out_txt)
def write_html(string,anzahl_falscher_antworten,anzahl_korrekter_antworten,anzahl_bearbeiteter_fragen,fmeasure,precision,recall,bewertung_ausgabe,falsche_antworten):
tabelle3="<table class=\"eval\" border=\"1\"><tr><th>Failed questions (IDs)</th></tr>"
string_question ="<tr>"
for i in range(0,len(falsche_antworten)):
string_question+="<td>"+str(falsche_antworten[i])+"</td></tr>"
end_tabelle3="</table>"
start_table= "<!doctype html> <html> <head> <title>Evaluation of "+string+"</title></head> <body> <p>Evaluation</p><p>Skript Version 5.5</p>"
space="<p></p><p></p><p></p><p></p><p></p>"
tabelle1="<table class=\"eval\" border=\"1\"><tr><th>ID</th><th>Question</th><th>Recall</th><th>Precision</th><th>F-Measure</th></tr>"
tabelle2="<table class=\"eval\" border=\"1\"><tr><th>Number of constructed Queries</th><th>Number of correct Answers</th><th>Number of wrong Answers</th><th>Global Recall</th><th>Global Precision</th><th>Global F-Measure</th></tr>"
inhalt_tabelle2="<tr><td>"+anzahl_bearbeiteter_fragen+"</td><td>"+anzahl_korrekter_antworten+"</td><td>"+anzahl_falscher_antworten+"</td><td>"+recall+"</td><td>"+precision+"</td><td>"+fmeasure+"</td></tr>"
end_tabelle2="</table>"
end_tabelle1="</table>"
ende="</body> </html>"
string=""
for id,answer in bewertung_ausgabe.iteritems():
question = answer[0]
question = question.decode("ascii","ignore")
string_bla="<tr><td>"+id+"</td><td>"+question+"</td><td>"+answer[1]+"</td><td>"+answer[2]+"</td><td>"+answer[3]+"</td></tr>"
string+=string_bla
outfile=open(filename_out_html,"w")
outfile.write(start_table+space+tabelle2+inhalt_tabelle2+end_tabelle2+space+tabelle1+string+end_tabelle1+space+tabelle3+string_question+end_tabelle3+ende)
outfile.close()
_ausgabe_(filename_out_html)
################### MAIN ##################################################
def main():
global system_time, testing, task
system_time = datetime.datetime.now()
set_filename_out(system_time)
set_filename_txt_out(system_time)
#print system_time
#print filename_out_html
# Train or Test
if sys.argv[2] == "test":
testing = True
else:
testing = False
# Task
task = sys.argv[3]
# Set gold standard
gold = '../data/qald-4_'
if task == '1': gold += 'multilingual'
elif task == '2': gold += 'biomedical'
elif task == '3': gold += 'hybrid'
if testing: gold += '_test'
else: gold += '_train'
gold += '_withanswers.xml'
import urllib
dateiname=sys.argv[1]
if (len(sys.argv)>=6):
set_system_name(sys.argv[4])
set_configuration(sys.argv[5])
else:
set_system_name("None")
set_configuration("None")
loesung=None
try:
loesung=lade_baum(dateiname)
except Exception as inst:
error= "<!doctype html> <html> <head> <title>ERROR</title></head> <body> <p>"+str(type(inst))+"</p><p>"+str(inst.args)+"</p><p>"+str(inst)+"</p><p>PLEASE CHECK YOUR XML FILE</p></body></html>"
outfile=open(filename_out_html,"w")
outfile.write(error)
outfile.close()
_ausgabe_(filename_out_html)
# print "Unexpected error:", sys.exc_info()[0]
# print "8"
gstandard_importet=True
try:
musterloesung=lade_musterloesung(urllib.urlopen(gold))
except Exception as inst:
error= "<!doctype html> <html> <head> <title>ERROR</title></head> <body> <p>"+str(type(inst))+"</p><p>"+str(inst.args)+"</p><p>"+str(inst)+"</p></body></html>"
write_error(error)
# print "Unexpected error:", sys.exc_info()[0]
# print "7"
else:
_evaluation(loesung,musterloesung,task)
# print "Unexpected error:", sys.exc_info()[0]
# print "6"
def write_error(error):
global filename_out_html
outfile=open(filename_out_html,"w")
outfile.write(error)
outfile.close()
_ausgabe_(filename_out_html)
if __name__ == "__main__":
main()
| mit | -724,464,349,123,768,700 | 42.688852 | 235 | 0.473778 | false |
Re4son/Kali-Pi | Menus/menu_pause.py | 1 | 1785 | #!/usr/bin/env python
import pygame, os, sys, subprocess, time
import RPi.GPIO as GPIO
from pygame.locals import *
from subprocess import *
if "TFT" in os.environ and os.environ["TFT"] == "0":
# No TFT screen
SCREEN=0
pass
elif "TFT" in os.environ and os.environ["TFT"] == "2":
# TFT screen with mouse
SCREEN=2
os.environ["SDL_FBDEV"] = "/dev/fb1"
elif "TFT" in os.environ and os.environ["TFT"] == "3":
# HDMI touchscreen
SCREEN=3
os.environ["SDL_FBDEV"] = "/dev/fb0"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
elif "TFT" in os.environ and os.environ["TFT"] == "4":
# Raspberry Pi 7" touchscreen
SCREEN=4
from ft5406 import Touchscreen
os.environ["SDL_FBDEV"] = "/dev/fb0"
ts = Touchscreen()
else:
# TFT touchscreen
SCREEN=1
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
# Initialize pygame modules individually (to avoid ALSA errors) and hide mouse
pygame.font.init()
pygame.display.init()
pygame.mouse.set_visible(0)
# Initialise GPIO
GPIO.setwarnings(False)
#While loop to manage touch screen inputs
state = [False for x in range(10)]
while 1:
if SCREEN==4:
for touch in ts.poll():
if state[touch.slot] != touch.valid:
if touch.valid:
sys.exit()
else:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
sys.exit()
#Debug:
#ensure there is always a safe way to end the program if the touch screen fails
##if event.type == KEYDOWN:
## if event.key == K_ESCAPE:
## sys.exit()
time.sleep(0.4)
| gpl-3.0 | 3,545,909,162,319,380,500 | 28.262295 | 87 | 0.614566 | false |
foobarbazblarg/stayclean | stayclean-2016-april/serve-signups-with-flask.py | 1 | 8086 | #!/usr/bin/python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '4bvb7i', '4c1crs', '4c5lvg', '4ca9ff', '4cf91t', '4ckta7', '4cp4ir' ]
flaskport = 8883
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
comments += praw.helpers.flatten_tree(submission.comments)
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| mit | 8,488,130,493,691,722,000 | 40.255102 | 129 | 0.67351 | false |
zhibolau/webApp | www/models.py | 1 | 1589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Zhibo Liu'
import time,uuid
from transwarp.db import next_id # 直接from Import会出错 必须在那个目录下建立__init__.py 文件!!!!!!!!
from transwarp.orm import Model, StringField, BooleanField, FloatField, TextField
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(updatable=False, ddl='varchar(50)')
password = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(updatable=False, default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(updatable=False, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
| gpl-2.0 | -8,860,960,697,945,917,000 | 35.162791 | 86 | 0.67717 | false |
thomaserlang/XenBackup | src/xenbackup/XenAPI.py | 1 | 9750 | # Copyright (c) Citrix Systems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# --------------------------------------------------------------------
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import gettext
import xmlrpclib
import httplib
import socket
import sys
import ssl
translation = gettext.translation('xen-xm', fallback = True)
API_VERSION_1_1 = '1.1'
API_VERSION_1_2 = '1.2'
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception, exn:
import sys
print >>sys.stderr, exn
return "Xen-API failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
# Just a "constant" that we use to decide whether to retry the RPC
_RECONNECT_AND_RETRY = object()
class UDSHTTPConnection(httplib.HTTPConnection):
"""HTTPConnection subclass to allow HTTP over Unix domain sockets. """
def connect(self):
path = self.host.replace("_", "/")
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(path)
class UDSHTTP(httplib.HTTP):
_connection_class = UDSHTTPConnection
class UDSTransport(xmlrpclib.Transport):
def __init__(self, use_datetime=0):
self._use_datetime = use_datetime
self._extra_headers=[]
def add_extra_header(self, key, value):
self._extra_headers += [ (key,value) ]
def make_connection(self, host):
# Python 2.4 compatibility
if sys.version_info[0] <= 2 and sys.version_info[1] < 6:
return UDSHTTP(host)
else:
return UDSHTTPConnection(host)
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", handler)
for key, value in self._extra_headers:
connection.putheader(key, value)
class Session(xmlrpclib.ServerProxy):
"""A server proxy and session manager for communicating with xapi using
the Xen-API.
Example:
session = Session('http://localhost/')
session.login_with_password('me', 'mypassword')
session.xenapi.VM.start(vm_uuid)
session.xenapi.session.logout()
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1):
try:
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
verbose, allow_none, context=ssl._create_unverified_context())
except AttributeError:
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose, allow_none)
self.transport = transport
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
retry_count = 0
while retry_count < 3:
full_params = (self._session,) + params
result = _parse_result(getattr(self, methodname)(*full_params))
if result is _RECONNECT_AND_RETRY:
retry_count += 1
if self.last_login_method:
self._login(self.last_login_method,
self.last_login_params)
else:
raise xmlrpclib.Fault(401, 'You must log in')
else:
return result
raise xmlrpclib.Fault(
500, 'Tried 3 times to get a valid session, but failed')
def _login(self, method, params):
result = _parse_result(getattr(self, 'session.%s' % method)(*params))
if result is _RECONNECT_AND_RETRY:
raise xmlrpclib.Fault(
500, 'Received SESSION_INVALID when logging in')
self._session = result
self.last_login_method = method
self.last_login_params = params
self.API_version = self._get_api_version()
def _logout(self):
try:
if self.last_login_method.startswith("slave_local"):
return _parse_result(self.session.local_logout(self._session))
else:
return _parse_result(self.session.logout(self._session))
finally:
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = API_VERSION_1_1
def _get_api_version(self):
pool = self.xenapi.pool.get_all()[0]
host = self.xenapi.pool.get_master(pool)
major = self.xenapi.host.get_API_version_major(host)
minor = self.xenapi.host.get_API_version_minor(host)
return "%s.%s"%(major,minor)
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.API_version, self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
else:
return xmlrpclib.ServerProxy.__getattr__(self, name)
def xapi_local():
return Session("http://_var_xapi_xapi/", transport=UDSTransport())
def _parse_result(result):
if type(result) != dict or 'Status' not in result:
raise xmlrpclib.Fault(500, 'Missing Status in response from server' + result)
if result['Status'] == 'Success':
if 'Value' in result:
return result['Value']
else:
raise xmlrpclib.Fault(500,
'Missing Value in response from server')
else:
if 'ErrorDescription' in result:
if result['ErrorDescription'][0] == 'SESSION_INVALID':
return _RECONNECT_AND_RETRY
else:
raise Failure(result['ErrorDescription'])
else:
raise xmlrpclib.Fault(
500, 'Missing ErrorDescription in response from server')
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, API_version, send, name):
self.__API_version = API_version
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<XenAPI._Dispatcher for %s>' % self.__name
else:
return '<XenAPI._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__API_version, self.__send, name)
else:
return _Dispatcher(self.__API_version, self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
| mit | -7,467,321,875,946,531,000 | 38.156627 | 101 | 0.628205 | false |
volkandkaya/trader | trader/joins/migrations/0005_auto__add_unique_join_email_ref_id.py | 1 | 1346 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Join', fields ['email', 'ref_id']
db.create_unique(u'joins_join', ['email', 'ref_id'])
def backwards(self, orm):
# Removing unique constraint on 'Join', fields ['email', 'ref_id']
db.delete_unique(u'joins_join', ['email', 'ref_id'])
models = {
u'joins.join': {
'Meta': {'unique_together': "(('email', 'ref_id'),)", 'object_name': 'Join'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'default': "'ABC'", 'max_length': '123'}),
'ref_id': ('django.db.models.fields.CharField', [], {'default': "'ABC'", 'max_length': '123'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['joins'] | mit | 6,737,420,251,535,101,000 | 41.09375 | 114 | 0.572065 | false |
mohseniaref/PySAR-1 | pysar/pysarApp.py | 1 | 21751 | #! /usr/bin/env python
###############################################################################
#
# Project: PySAR
# Purpose: Python Module for InSAR Time-series Analysis
# Author: Heresh Fattahi
# Created: July 2013
# Modified: Yunjun Zhang, Feb 2015
###############################################################################
# Copyright (c) 2013, Heresh Fattahi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import glob
import time
import _readfile as readfile
import h5py
import subprocess
from pysar._pysar_utilities import check_variable_name
def radar_Or_geo(igramFile):
h5file=h5py.File(igramFile,'r')
igramList=h5file['interferograms'].keys()
if 'X_FIRST' in h5file['interferograms'][igramList[0]].attrs.keys():
rdr_geo='geo'
else:
rdr_geo='radar'
h5file.close()
return rdr_geo
def Usage():
print '''
*******************************************************
*******************************************************
*******************************************************
*******************************************************
********* OOOOO OOOOO O OOOO *********
********* O O O O O O O O O *********
********* OOOOO OOO OOOOO OOOOO OOOO *********
********* O O O O O O O *********
********* O OOO OOOOO O O O O *********
********* *********
*******************************************************
*******************************************************
*******************************************************
*******************************************************
A Python Module for InSAR time-series analysis.
PySAR v1.0 July 2013, InSAR Lab, RSMAS, University of Miami
usage:
pysarApp.py TEMPLATEFILE
example:
pysarApp.py /nethome/hfattahi/SanAndreasT356EnvD.template
pysarApp.py $TE/SanAndreasT356EnvD.template
*******************************************************
Template file options:
pysar.inputdata=/scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG*/filt*0*c10.unw
pysar.CorFiles = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG*/filt*0*.cor
pysar.wraped = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG*/filt*0*.int
pysar.geomap = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/GEO/geomap_12/geomap_8rlks.trans
pysar.dem = /scratch/hfattahi/PROCESS/SanAndreasT356EnvD/DONE/IFG_20050102_20070809/radar_8lks.hgt
pysar.subset.yx = 1800:2000,700:800
pysar.seed.ll=31.5, 67 or pysar.seed.yx=257 , 151
pysar.unwrap_error = yes [no]
pysar.tropospheric_delay = yes ['no']
pysar.tropospheric_delay.method = pyaps ['height-correlation']
pysar.Numerical_Weather_Model = ECMWF ['MERRA', 'NARR']
pysar.acquisition_time = 00:00 ['06:00', '12:00', '18:00']
pysar.topo_error = yes [no]
pysar.orbit_error = yes [np]
pysar.orbit_error.method = plane ['quadratic', 'plane', 'quardatic_range', 'quadratic_azimiuth', 'plane_range', 'plane_azimuth','baselineCor','BaseTropCor']
pysar.mask=yes
pysar.mask.threshold = 0.7
pysar.geocode = yes
*******************************************************
'''
#########################################
def main(argv):
try:
templateFile = argv[1]
except:
Usage();sys.exit(1)
projectName = os.path.basename(templateFile.partition('.')[0])
try:
tssarProjectDir = os.getenv('TSSARDIR') +'/'+projectName
except:
tssarProjectDir = os.getenv('SCRATCHDIR') + '/' + projectName + "/TSSAR" # FA 7/2015: adopted for new directory structure
print "QQ " + tssarProjectDir
if not os.path.isdir(tssarProjectDir): os.mkdir(tssarProjectDir)
os.chdir(tssarProjectDir)
igramFile = 'LoadedData.h5'
Modified_igramFile = 'Modified_LoadedData.h5'
if os.path.isfile(Modified_igramFile):
print Modified_igramFile + ' already exists.'
igramFile=Modified_igramFile
template = readfile.read_template(templateFile)
Rlooks = template['Rlooks_unw']
#########################################
# Loading interferograms
#########################################
print '******************************************'
print''
if os.path.isfile(igramFile):
print igramFile + ' already exists.'
else:
loadCmd='load_data.py ' + templateFile
print loadCmd
os.system(loadCmd)
# copyDemCmd='copy_dem_trans.py ' + templateFile
# print copyDemCmd
# os.system(copyDemCmd)
print''
print '******************************************'
#########################################
# Check the subset
#########################################
try:
subset= template['pysar.subset.yx'].split(',')
print subset
print subset[0]
subsetOutName='subset_'+igramFile
subsetCmd='subset.py -f '+ igramFile + ' -y '+subset[0]+' -x '+subset[1] + ' -o ' + subsetOutName
print '*****************************************'
print 'Subset the area ...'
print subsetCmd
os.system(subsetCmd)
igramFile=subsetOutName
print '*****************************************'
except:
print '*****************************************'
print 'No Subset selected. Processing the whole area'
print '*****************************************'
#########################################
#Referencing all interferograms to the same pixel
#########################################
rdr_or_geo=radar_Or_geo(igramFile)
print '******************************************'
print''
if os.path.isfile('Seeded_'+igramFile):
igramFile = 'Seeded_'+igramFile
print igramFile + ' already exists.'
else:
print 'referncing all interferograms to the same pixel.'
if 'pysar.seed.ll' in template.keys():
'Checking the lat/lon refernce point'
lat= template['pysar.seed.ll'].split(',')[0]
lon= template['pysar.seed.ll'].split(',')[1]
seedCmd= 'SeedData.py -f ' + igramFile + ' -l ' +lat+ ' -L '+lon
elif 'pysar.seed.yx' in template.keys():
'Checking y/x reference point'
y= template['pysar.seed.yx'].split(',')[0]
x= template['pysar.seed.yx'].split(',')[1]
seedCmd= 'seed_data.py -f ' + igramFile + ' -y ' +y+ ' -x '+x
else:
seedCmd= 'seed_data.py -f ' + igramFile
igramFile = 'Seeded_'+igramFile
print seedCmd
os.system(seedCmd)
print''
print '******************************************'
############################################
#unwrapping error correction based on the
# consistency of triplets of interferograms
############################################
print '******************************************'
print''
try:
template['pysar.unwrap_error']
if template['pysar.unwrap_error'] in ('y','yes','Yes','YES'):
print 'unwrapping error correction might take a while depending on the size of your data set! '
unwCmd='unwrap_error.py '+igramFile
os.system(unwCmd)
igramFile=igramFile.split('.')[0]+'_unwCor.h5'
else:
print 'No unwrapping error correction.'
except:
print 'No unwrapping error correction.'
print''
print '******************************************'
#########################################
# inversion of interferograms
########################################
print '******************************************'
print''
if os.path.isfile(igramFile.split('.')[0]+'_unwCor.h5'):
igramFile = igramFile.split('.')[0]+'_unwCor.h5'
print igramFile + ' exists.'
if os.path.isfile('timeseries.h5'):
print 'timeseries.h5 already exists, inversion is not needed.'
else:
invertCmd = 'igram_inversion.py '+ igramFile
print invertCmd
os.system(invertCmd)
timeseriesFile='timeseries.h5'
print''
print '******************************************'
##############################################
#temporal coherence:
#A parameter to evaluate the consistency of
# timeseries with the interferograms
##############################################
print '******************************************'
print''
# if os.path.isfile('temporal_coherence.h5'):
# print 'temporal_coherence.h5 already exists.'
# else:
# tempcohCmd='temporal_coherence.py '+igramFile+' '+timeseriesFile
# print tempcohCmd
# os.system(tempcohCmd)
tempcohCmd='temporal_coherence.py '+igramFile+' '+timeseriesFile
print tempcohCmd
os.system(tempcohCmd)
print''
print '******************************************'
##############################################
#update Mask based on temporal coherence
# add by Yunjun Feb 15, 2015
##############################################
print '******************************************'
print''
try:
template['pysar.mask']
if template['pysar.mask'] in ('yes','Yes','YES','y'):
print 'Updating mask according to temporal coherence'
cohT=template['pysar.mask.threshold']
maskCmd='generate_mask.py -f temporal_coherence.h5 -m '+ cohT +' -M 1.0 -o Mask.h5'
print maskCmd
os.system(maskCmd)
else:
print 'No update for mask.'
except:
print 'No update for mask.'
print''
print '******************************************'
##############################################
# Generate incident angle
# add by Yunjun Feb 15, 2015
##############################################
print '******************************************'
print''
inciCmd='incidence_angle.py -f timeseries.h5'
print inciCmd
os.system(inciCmd)
print''
print '******************************************'
##############################################
#If Satellite is Envisat and if Coordinate
#system is radar then LOD correction
##############################################
print '******************************************'
print''
h5file=h5py.File(timeseriesFile,'r')
if rdr_or_geo =='radar':
if h5file['timeseries'].attrs['PLATFORM']=='ENVISAT':
LODcmd='lod.py '+timeseriesFile
print LODcmd
os.system(LODcmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_LODcor.h5'
print''
print '******************************************'
##############################################
# Tropospheric Correction
##############################################
print '******************************************'
print''
try:
if (template['pysar.tropospheric_delay'] in ('y','yes','Yes','YES')) and template['pysar.orbit_error.method']=='BaseTropCor':
print '''
+++++++++++++++++++++++++++++++++++++++++++++++++++
WARNING:
Orbital error correction was BaseTropCor.
Tropospheric correction was already applied simultaneous with baseline error correction.
Tropospheric correction can not be applied again.
To apply the tropospheric correction separate from baseline error correction, chhose other existing options for orbital error correction.
+++++++++++++++++++++++++++++++++++++++++++++++++++
'''
template['pysar.tropospheric_delay']='no'
except:
print 'Checking the tropospheric delay correction ...'
if template['pysar.tropospheric_delay'] in ('y','yes','Yes','YES'):
# demFile='radar_'+Rlooks+'rlks.hgt'
demFile=template['pysar.dem']
demFile=check_variable_name(demFile)
# print 'DEM file: '+demFile
if not os.path.isfile(demFile):
print '++++++++++++++++++++++++++++++++++++++++++++++'
print 'Error:'
print 'DEM (radar_*rlks.hgt file) was not found!'
print 'Continue without tropospheric correction ...'
print '++++++++++++++++++++++++++++++++++++++++++++++'
else:
if template['pysar.tropospheric_delay.method'] in ['height-correlation','height_correlation','Height-Correlation','Height_Correlation']:
print 'tropospheric delay correction with height-correlation approach'
try:
polyOrder=template['pysar.trop.polyOrder']
except:
print 'Deafult polynomial order for troposphreic correction = 1'
polyOrder='1'
cmdTrop='tropospheric_correction.py'+ ' -f '+ timeseriesFile + ' -d '+ demfile + ' -p '+ polyOrder
os.system(cmdTrop)
timeseriesFile=timeseriesFile.split('.')[0]+'_tropCor.h5'
elif template['pysar.tropospheric_delay.method']=='pyaps':
print 'Atmospheric correction using Numerical Weather Models (using PyAPS software)'
print 'reading DEM, source of NWM and acquisition time from template file'
source_of_NWM=template['pysar.Numerical_Weather_Model']
print 'Numerical Weather Model: '+source_of_NWM
acquisition_time=template['pysar.acquisition_time']
print 'acquisition time: '+acquisition_time
# cmdTrop = ["tropcor_pyaps.py -f ",timeseriesFile," -d ",demFile," -s ",source_of_NWM," -h ",acquisition_time," -i incidence_angle.h5"]
cmdTrop = 'tropcor_pyaps.py -f '+timeseriesFile+ ' -d '+ demFile +' -s ' + source_of_NWM + ' -h '+ acquisition_time + ' -i incidence_angle.h5'
print cmdTrop
os.system(cmdTrop)
# subprocess.Popen(cmdTrop).wait()
timeseriesFile=timeseriesFile.split('.')[0]+'_'+source_of_NWM+'.h5'
else:
print 'Atmospheric correction method not recognized.'
else:
print 'No atmospheric delay correction.'
print''
print '******************************************'
##############################################
#topographic residuals
##############################################
print '******************************************'
print''
try:
template['pysar.topo_error']
if template['pysar.topo_error'] in ('yes','Yes','YES','y'):
print 'Correcting topographic residuals'
topoCmd='dem_error.py '+ timeseriesFile +' '+ igramFile
print topoCmd
os.system(topoCmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_demCor.h5'
else:
print 'No correction for topographic residuals.'
except:
print 'No correction for topographic residuals.'
print''
print '******************************************'
##############################################
#Orbit correction
##############################################
print '******************************************'
print''
try:
template['pysar.orbit_error']
if template['pysar.orbit_error'] in ('yes','Yes','YES','y'):
try:
orbit_error_method=template['pysar.orbit_error.method']
print 'orbit error correction method : '+orbit_error_method
if orbit_error_method in ['quadratic', 'plane', 'quardatic_range', 'quadratic_azimiuth', 'plane_range', 'plane_azimuth']:
orbitCmd='remove_plane.py '+timeseriesFile+' '+template['pysar.orbit_error.method'] #+ ' Mask.h5'
timeseriesFile=timeseriesFile.split('.')[0]+'_'+template['pysar.orbit_error.method']+'.h5'
print orbitCmd
os.system(orbitCmd)
elif orbit_error_method == 'baselineCor':
orbitCmd='baseline_error.py ' +timeseriesFile #+ ' Mask.h5'
print orbitCmd
try:
h5file=h5py.File(timeseriesFile,'r')
daz=float(h5file['timeseries'].attrs['AZIMUTH_PIXEL_SIZE'])
os.system(orbitCmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_'+template['pysar.orbit_error.method']+'.h5'
except:
print 'WARNING!'
print 'Skipping orbital error correction.'
print 'baselineCor method can only be applied in radar coordinate'
elif orbit_error_method =='BaseTropCor':
demfile=template['pysar.dem']
demfile=check_variable_name(demfile)
try:
polyOrder=template['pysar.trop.polyOrder']
except:
print 'Deafult polynomial order for troposphreic correction = 1'
polyOrder=1
try:
h5file=h5py.File(timeseriesFile,'r')
daz=float(h5file['timeseries'].attrs['AZIMUTH_PIXEL_SIZE'])
orbitCmd='baseline_trop.py '+timeseriesFile+' '+ demfile +' '+ polyOrder +'range_and_azimuth'
print 'Joint estimation of Baseline error and tropospheric delay [height-correlation approach]'
print orbitCmd
os.system(orbitCmd)
timeseriesFile=timeseriesFile.split('.')[0]+'_'+template['pysar.orbit_error.method']+'.h5'
except:
print 'WARNING!'
print 'Skipping orbital error correction.'
print 'baselineCor method can only be applied in radar coordinate'
else:
print '+++++++++++++++++++++++++++++++++++++++++++++++++++++++'
print 'WARNING!'
print 'Orbital error correction method was not recognized!'
print 'Possible options are:'
print 'quadratic, plane, quardatic_range, quadratic_azimiuth, plane_range, plane_azimuth,baselineCor,BaseTropCor'
print 'Continue without orbital errors correction...'
print '+++++++++++++++++++++++++++++++++++++++++++++++++++++++'
except:
print 'No orbital errors correction.'
else:
print 'No orbital errors correction.'
except:
print 'No orbital errors correction.'
print''
print '******************************************'
#############################################
#Velocity and rmse maps
#############################################
print '******************************************'
print''
velCmd='timeseries2velocity.py '+timeseriesFile
print velCmd
os.system(velCmd)
print''
print '******************************************'
#############################################
#Masking the velocity based on the temporal
#coherence or rmse if it's specified
#############################################
print '******************************************'
print''
try:
template['pysar.mask']
if template['pysar.mask'] in ('yes','Yes','YES','y'):
try:
template['pysar.mask.threshold']
maskCmd='masking.py -f velocity.h5 -m temporal_coherence.h5 -t '+template['pysar.mask.threshold']
print 'Masking the velocity file using the temporal coherence with the threshold of '+template['pysar.mask.threshold']
except:
maskCmd='Masking.py -f velocity.h5 -m temporal_coherence.h5 -t 0.7'
print 'Masking the velocity file using the temporal coherence with the threshold of 0.7'
os.system(maskCmd)
# rmCmd='rm velocity.h5'
# os.system(rmCmd)
# mvCmd='mv velocity_masked.h5 velocity.h5'
# os.system(mvCmd)
else:
print 'No masking applied'
except:
print 'No masking applied'
print''
print '******************************************'
############################################
#Geocoding
############################################
print '******************************************'
print''
try:
template['pysar.geocode']
if template['pysar.geocode'] in ('y','yes','Yes','YES'):
geomapFile='geomap_'+Rlooks+'rlks.trans'
# geoCmd = 'geocode.py '+timeseriesFile+' '+geomapFile
# print geoCmd
# os.system(geoCmd)
geoCmd = 'geocode.py velocity.h5 '+geomapFile
print geoCmd
os.system(geoCmd)
geoCmd = 'geocode.py Mask.h5 '+geomapFile
print geoCmd
os.system(geoCmd)
# maskCmd = 'Masking.py -f geo_'+timeseriesFile+' -m geo_Mask.h5'
# print maskCmd
# os.system(maskCmd)
maskCmd = 'masking.py -f geo_velocity.h5 -m geo_Mask.h5'
print maskCmd
os.system(maskCmd)
else:
print 'No geocoding applied'
except:
print 'No geocoding applied'
print''
print '******************************************'
#############################################
# PySAR v1.0 #
#############################################
print''
print '###############################################'
print ''
print 'End of PySAR processing.'
print ''
print '################################################'
if __name__ == '__main__':
main(sys.argv[:])
| mit | 6,423,405,613,165,199,000 | 37.841071 | 157 | 0.507379 | false |
qbuat/rootpy | rootpy/tree/model.py | 1 | 5162 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import inspect
from cStringIO import StringIO
import types
import ROOT
from .. import log; log = log[__name__]
from .treetypes import Column
from .treebuffer import TreeBuffer
__all__ = [
'TreeModel',
]
class TreeModelMeta(type):
"""
Metaclass for all TreeModels
Addition/subtraction of TreeModels is handled
as set union and difference of class attributes
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
TreeModelMeta.checkattr(attr, value)
return type.__new__(cls, name, bases, dct)
def __add__(cls, other):
return type('_'.join([cls.__name__, other.__name__]),
(cls, other), {})
def __iadd__(cls, other):
return cls.__add__(other)
def __sub__(cls, other):
attrs = dict(set(cls.get_attrs()).difference(set(other.get_attrs())))
return type('_'.join([cls.__name__, other.__name__]),
(TreeModel,), attrs)
def __isub__(cls, other):
return cls.__sub__(other)
def __setattr__(cls, attr, value):
TreeModelMeta.checkattr(attr, value)
type.__setattr__(cls, attr, value)
@classmethod
def checkattr(metacls, attr, value):
"""
Only allow class attributes that are instances of
rootpy.types.Column, ROOT.TObject, or ROOT.ObjectProxy
"""
if not isinstance(value, (
types.MethodType,
types.FunctionType,
classmethod,
staticmethod,
property)):
if attr in dir(type('dummy', (object,), {})) + \
['__metaclass__']:
return
if attr.startswith('_'):
raise SyntaxError(
"TreeModel attribute `{0}` "
"must not start with `_`".format(attr))
if not inspect.isclass(value):
if not isinstance(value, Column):
raise TypeError(
"TreeModel attribute `{0}` "
"must be an instance of "
"`rootpy.tree.treetypes.Column`".format(attr))
return
if not issubclass(value, (ROOT.TObject, ROOT.ObjectProxy)):
raise TypeError(
"TreeModel attribute `{0}` must inherit "
"from `ROOT.TObject` or `ROOT.ObjectProxy`".format(
attr))
def prefix(cls, name):
"""
Create a new TreeModel where class attribute
names are prefixed with ``name``
"""
attrs = dict([(name + attr, value) for attr, value in cls.get_attrs()])
return TreeModelMeta(
'_'.join([name, cls.__name__]),
(TreeModel,), attrs)
def suffix(cls, name):
"""
Create a new TreeModel where class attribute
names are suffixed with ``name``
"""
attrs = dict([(attr + name, value) for attr, value in cls.get_attrs()])
return TreeModelMeta(
'_'.join([cls.__name__, name]),
(TreeModel,), attrs)
def get_attrs(cls):
"""
Get all class attributes ordered by definition
"""
ignore = dir(type('dummy', (object,), {})) + ['__metaclass__']
attrs = [
item for item in inspect.getmembers(cls) if item[0] not in ignore
and not isinstance(
item[1], (
types.FunctionType,
types.MethodType,
classmethod,
staticmethod,
property))]
# sort by idx and use attribute name to break ties
attrs.sort(key=lambda attr: (getattr(attr[1], 'idx', -1), attr[0]))
return attrs
def to_struct(cls, name=None):
"""
Convert the TreeModel into a compiled C struct
"""
if name is None:
name = cls.__name__
basic_attrs = dict([(attr_name, value)
for attr_name, value in cls.get_attrs()
if isinstance(value, Column)])
if not basic_attrs:
return None
src = 'struct {0} {{'.format(name)
for attr_name, value in basic_attrs.items():
src += '{0} {1};'.format(value.type.typename, attr_name)
src += '};'
if ROOT.gROOT.ProcessLine(src) != 0:
return None
return getattr(ROOT, name, None)
def __repr__(cls):
out = StringIO()
for name, value in cls.get_attrs():
print >> out, '{0} -> {1}'.format(name, value)
return out.getvalue()[:-1]
def __str__(cls):
return repr(cls)
class TreeModel(object):
__metaclass__ = TreeModelMeta
def __new__(cls):
"""
Return a TreeBuffer for this TreeModel
"""
treebuffer = TreeBuffer()
for name, attr in cls.get_attrs():
treebuffer[name] = attr()
return treebuffer
| gpl-3.0 | 1,867,577,259,396,475,100 | 31.465409 | 79 | 0.518016 | false |
jolid/script.module.donnie | lib/donnie/furk.py | 1 | 6496 | import urllib2, urllib, sys, os, re, random, copy
from BeautifulSoup import BeautifulSoup, Tag, NavigableString
import xbmc,xbmcplugin,xbmcgui,xbmcaddon
from t0mm0.common.net import Net
from t0mm0.common.addon import Addon
from scrapers import CommonScraper
net = Net()
try:
import json
except:
# pre-frodo and python 2.4
import simplejson as json
''' ###########################################################
Usage and helper functions
############################################################'''
class FurkServiceSracper(CommonScraper):
def __init__(self, settingsid, DB=None, REG=None):
if DB:
self.DB=DB
if REG:
self.REG=REG
self.addon_id = 'script.module.donnie'
self.service='furk'
self.name = 'furk.net'
self.raiseError = False
self.referrer = 'http://www.furk.net/'
self.base_url = 'https://api.furk.net/api/'
self.user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
self.provides = []
self.settingsid = settingsid
self._loadsettings()
self.settings_addon = self.addon
def _getShows(self, silent=False):
self.log('Do Nothing here')
def _getRecentShows(self, silent=False):
self.log('Do Nothing here')
def _getEpisodes(self, showid, show, url, pDialog, percent, silent):
self.log('Do Nothing here')
def _getMovies(self, silent=False):
self.log('Do Nothing here')
def _getRecentMovies(self, silent):
self.log('Do Nothing here')
def _setKey(self, api_key):
xbmcaddon.Addon(id='script.module.donnie').setSetting('furk-apikey', api_key)
def _getKey(self):
api_key = xbmcaddon.Addon(id='script.module.donnie').getSetting('furk-apikey')
if api_key == '':
return None
return api_key
def cleanQuery(self, query):
self.log('Cleaning furk search string')
cleaned = query
if re.search('\\(\\d\\d\\d\\d\\)$', cleaned):
cleaned = cleaned[0:len(cleaned)-7]
cleaned = cleaned.replace(":", '')
cleaned = cleaned.replace("'", '')
cleaned = cleaned.replace("-", ' ')
cleaned = cleaned.replace("_", ' ')
print cleaned
return cleaned
def _login(self):
api_key = self._getKey()
if api_key:
self.log('Using cached api key')
return api_key
loginurl = "%slogin/login" % self.base_url
login = self.getSetting('furk-username')
password = self.getSetting('furk-password')
post_dict = {"login": login, "pwd": password}
cookiejar = os.path.join(self.cookie_path,'furk.lwp')
try:
response = net.http_POST(loginurl, post_dict).content
data = json.loads(response)
status = data['status']
api_key = data['api_key']
self._setKey(api_key)
self.log("Furk response: %s", response)
if status=="ok":
net.save_cookies(cookiejar)
else:
print 'Furk Account: login failed'
return api_key
except Exception, e:
print '**** Furk Error: %s' % e
pass
def _getStreams(self, episodeid=None, movieid=None):
api_key = self._login()
query = ""
if episodeid:
row = self.DB.query("SELECT rw_shows.showname, season, episode FROM rw_episodes JOIN rw_shows ON rw_shows.showid=rw_episodes.showid WHERE episodeid=?", [episodeid])
name = row[0].replace("'", "")
if re.search('\\(\\d\\d\\d\\d\\)$', row[0]):
name = name[0:len(name)-7]
season = row[1].zfill(2)
episode = row[2].zfill(2)
query = str("%s S%sE%s" % (name, season, episode))
elif movieid:
row = self.DB.query("SELECT movie, year FROM rw_movies WHERE imdb=? LIMIT 1", [movieid])
movie = self.cleanQuery(row[0])
query = "%s %s" %(movie, row[1])
streams = []
url = "%splugins/metasearch" % self.base_url
params = {"type": "video", "filter": "cached", "api_key": api_key, "q": query}
pagedata = net.http_POST(url, params).content
if pagedata=='':
return False
data = json.loads(pagedata)
try:
files = data['files']
for f in files:
if f['type'] == 'video':
raw_url = f['id']
name = f['name']
size = int(f['size']) / (1024 * 1024)
if size > 2000:
size = size / 1024
unit = 'GB'
else :
unit = 'MB'
self.getStreamByPriority('Furk - %s ([COLOR blue]%s %s[/COLOR])' %(name, size, unit), self.service + '://' + raw_url)
except Exception, e:
self.log("********Donnie Error: %s, %s" % (self.service, e))
self.DB.commit()
def getStreamByPriority(self, link, stream):
self.log(link)
host = 'furk.net'
SQL = "INSERT INTO rw_stream_list(stream, url, priority, machineid) " \
"SELECT ?, ?, priority, ? " \
"FROM rw_providers " \
"WHERE mirror=? and provider=?"
self.DB.execute(SQL, [link, stream, self.REG.getSetting('machine-id'), host, self.service])
def _getServicePriority(self, link):
self.log(link)
host = 'furk.net'
row = self.DB.query("SELECT priority FROM rw_providers WHERE mirror=? and provider=?", [host, self.service])
return row[0]
def _resolveStream(self, stream):
raw_url = stream.replace(self.service + '://', '')
resolved_url = ''
t_files = []
t_options = []
sdialog = xbmcgui.Dialog()
api_key = self._getKey()
params = {"type": "video", "id": raw_url, "api_key": api_key, 't_files': 1}
url = "%sfile/get" % self.base_url
pagedata = net.http_POST(url, params).content
if pagedata=='':
return False
#print pagedata
data = json.loads(str(pagedata))
try:
files = data['files'][0]['t_files']
for f in files:
if re.search('^video/', f['ct']):
size = int(f['size']) / (1024 * 1024)
if size > 2000:
size = size / 1024
unit = 'GB'
else :
unit = 'MB'
t_files.append("%s ([COLOR blue]%s %s[/COLOR])" %(f['name'], size, unit))
t_options.append(f['url_dl'])
file_select = sdialog.select('Select Furk Stream', t_files)
if file_select < 0:
return resolved_url
resolved_url = str(t_options[file_select])
except Exception, e:
self.log("********Donnie Error: %s, %s" % (self.service, e))
self.log("Furk retruned: %s", resolved_url, level=0)
return resolved_url
def _resolveIMDB(self, uri): #Often needed if a sites movie index does not include imdb links but the movie page does
imdb = ''
print uri
pagedata = self.getURL(uri, append_base_url=True)
if pagedata=='':
return
imdb = re.search('http://www.imdb.com/title/(.+?)/', pagedata).group(1)
return imdb
def whichHost(self, host): #Sometimes needed
table = { 'Watch Blah' : 'blah.com',
'Watch Blah2' : 'blah2.com',
}
try:
host_url = table[host]
return host_url
except:
return 'Unknown'
| gpl-2.0 | 5,660,529,019,860,364,000 | 28.935484 | 167 | 0.622229 | false |
twitterdev/twitter-leaderboard | services/migrations/0001_initial.py | 1 | 1248 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-28 08:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timezone', models.CharField(blank=True, max_length=100, null=True)),
('curator_auth_token', models.CharField(blank=True, max_length=40, null=True)),
('twitter_id', models.CharField(blank=True, max_length=25, null=True)),
('twitter_access_token', models.CharField(blank=True, max_length=75, null=True)),
('twitter_access_token_secret', models.CharField(blank=True, max_length=75, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | -8,509,788,362,144,956,000 | 39.258065 | 145 | 0.63141 | false |
jayrumi/walmart-reviews | setup.py | 1 | 1197 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'walmart-reviews',
version = '1.2.0.dev1',
packages = find_packages(),
requires = ['python (>= 3.5)'],
#install_requires = ['random', 'requests', 'lxml', 'datetime', 'time'],
description = 'Parsing reviews from Walmart.com without using API',
long_description = long_description, #'A package for parsing reviews and all information about reviewers from walmart.com for specific item. For more information read README.rst', #open('README.rst').read(),
author = 'Yauheni Rumiantsau',
author_email = '[email protected]',
url = 'https://github.com/jayrumi/walmart-reviews',
#download_url = '',
license = 'MIT License',
keywords = 'walmart parsing',
classifiers = [
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| mit | 8,552,747,381,099,799,000 | 34.205882 | 211 | 0.634085 | false |
harrylewis/python-uinames | uinames/models.py | 1 | 1384 | from utils import PropertyUnavailable
class People(object):
"""
A collection of people, represented by the Person class.
"""
def __init__(self, json=None):
self._json = json or {}
self.data = [Person(identity) for identity in self._json]
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return "<People instance: {} Persons>".format(len(self.data))
class Person(object):
"""
A representation of a person identity, generated from the UINames API.
"""
def __init__(self, json=None):
self._json = json or {}
def __getattr__(self, item):
try:
obj = self._json[item]
# determine if string or dict
if isinstance(obj, str) or isinstance(obj, unicode):
return obj.encode("utf-8")
return obj
except KeyError:
raise PropertyUnavailable(
"Property '{}' is does not exist or is not available for this "
"Person.".format(item))
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return "<Person instance: {} {} from {}>".format(self.name,
self.surname,
self.region)
if __name__ == "__main__":
pass
| mit | 8,602,743,123,588,186,000 | 26.68 | 79 | 0.510116 | false |
digwanderlust/pants | tests/python/pants_test/base/test_payload_field.py | 1 | 10723 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from hashlib import sha1
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_dependency import IvyArtifact, JarDependency
from pants.backend.python.python_requirement import PythonRequirement
from pants.base.payload import Payload
from pants.base.payload_field import (ExcludesField, FileField, FingerprintedField,
FingerprintedMixin, JarsField, PrimitiveField,
PythonRequirementsField, SourcesField, TargetListField)
from pants_test.base_test import BaseTest
class PayloadTest(BaseTest):
def test_excludes_field(self):
empty = ExcludesField()
empty_fp = empty.fingerprint()
self.assertEqual(empty_fp, empty.fingerprint())
normal = ExcludesField([Exclude('com', 'foozle'), Exclude('org')])
normal_fp = normal.fingerprint()
self.assertEqual(normal_fp, normal.fingerprint())
normal_dup = ExcludesField([Exclude('com', 'foozle'), Exclude('org')])
self.assertEqual(normal_fp, normal_dup.fingerprint())
self.assertNotEqual(empty_fp, normal_fp)
def test_jars_field_order(self):
jar1 = JarDependency('com', 'foo', '1.0.0')
jar2 = JarDependency('org', 'baz')
self.assertNotEqual(
JarsField([jar1, jar2]).fingerprint(),
JarsField([jar2, jar1]).fingerprint(),
)
def test_jars_field_artifacts(self):
jar1 = JarDependency('com', 'foo', '1.0.0').with_artifact('com', 'baz')
jar2 = JarDependency('com', 'foo', '1.0.0')
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_artifacts_arg(self):
jar1 = JarDependency('com', 'foo', '1.0.0', artifacts=[IvyArtifact('com', 'baz')])
jar2 = JarDependency('com', 'foo', '1.0.0')
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_artifacts_arg_vs_method(self):
jar1 = JarDependency('com', 'foo', '1.0.0', artifacts=[IvyArtifact('com', 'baz')])
jar2 = JarDependency('com', 'foo', '1.0.0').with_artifact('com', 'baz')
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_artifacts(self):
jar1 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('com', 'baz')
.with_artifact('org', 'bat'))
jar2 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('org', 'bat')
.with_artifact('com', 'baz'))
jar3 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('org', 'bat'))
jar4 = JarDependency('com', 'foo', '1.0.0')
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar3]).fingerprint(),
)
self.assertNotEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar4]).fingerprint(),
)
self.assertNotEqual(
JarsField([jar3]).fingerprint(),
JarsField([jar4]).fingerprint(),
)
def test_jars_field_artifacts_ordering(self):
"""JarDependencies throw away ordering information about their artifacts in the cache key.
But they do not throw it away in their internal representation! In the future, this should be
fixed: either they should sort them as they are added and keep a canonical representation, or
the order information should be preserved.
"""
jar1 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('com', 'baz')
.with_artifact('org', 'bat'))
jar2 = (JarDependency('com', 'foo', '1.0.0')
.with_artifact('org', 'bat')
.with_artifact('com', 'baz'))
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_deprecated_jars_field_methods(self):
"""with_sources() and with_docs() are now no-ops. This test shows they don't affect
fingerprinting.
"""
jar1 = (JarDependency('com', 'foo', '1.0.0'))
jar2 = (JarDependency('com', 'foo', '1.0.0')
.with_sources()
.with_docs())
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_jars_field_apidocs(self):
"""apidocs are not properly rolled into the cache key right now. Is this intentional?"""
jar1 = JarDependency('com', 'foo', '1.0.0', apidocs='pantsbuild.github.io')
jar2 = JarDependency('com', 'foo', '1.0.0', apidocs='someother.pantsbuild.github.io')
self.assertEqual(
JarsField([jar1]).fingerprint(),
JarsField([jar2]).fingerprint(),
)
def test_python_requirements_field(self):
req1 = PythonRequirement('foo==1.0')
req2 = PythonRequirement('bar==1.0')
self.assertNotEqual(
PythonRequirementsField([req1]).fingerprint(),
PythonRequirementsField([req2]).fingerprint(),
)
def test_python_requirements_field_version_filter(self):
"""version_filter is a lambda and can't be hashed properly.
Since in practice this is only ever used to differentiate between py3k and py2, it should use
a tuple of strings or even just a flag instead.
"""
req1 = PythonRequirement('foo==1.0', version_filter=lambda py, pl: False)
req2 = PythonRequirement('foo==1.0')
self.assertEqual(
PythonRequirementsField([req1]).fingerprint(),
PythonRequirementsField([req2]).fingerprint(),
)
def test_primitive_field(self):
self.assertEqual(
PrimitiveField({'foo': 'bar'}).fingerprint(),
PrimitiveField({'foo': 'bar'}).fingerprint(),
)
self.assertEqual(
PrimitiveField(['foo', 'bar']).fingerprint(),
PrimitiveField(('foo', 'bar')).fingerprint(),
)
self.assertEqual(
PrimitiveField(['foo', 'bar']).fingerprint(),
PrimitiveField(('foo', 'bar')).fingerprint(),
)
self.assertEqual(
PrimitiveField('foo').fingerprint(),
PrimitiveField(b'foo').fingerprint(),
)
self.assertNotEqual(
PrimitiveField('foo').fingerprint(),
PrimitiveField('bar').fingerprint(),
)
def test_excludes_field(self):
self.assertEqual(
ExcludesField([Exclude('com', 'foo')]).fingerprint(),
ExcludesField([Exclude('com', 'foo')]).fingerprint(),
)
self.assertEqual(
ExcludesField([]).fingerprint(),
ExcludesField().fingerprint(),
)
self.assertNotEqual(
ExcludesField([Exclude('com', 'foo')]).fingerprint(),
ExcludesField([Exclude('com')]).fingerprint(),
)
self.assertNotEqual(
ExcludesField([Exclude('com', 'foo'), Exclude('org', 'bar')]).fingerprint(),
ExcludesField([Exclude('org', 'bar'), Exclude('com', 'foo')]).fingerprint(),
)
def test_sources_field(self):
self.create_file('foo/bar/a.txt', 'a_contents')
self.create_file('foo/bar/b.txt', 'b_contents')
self.assertNotEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['b.txt'],
).fingerprint(),
)
self.assertEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
)
self.assertEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint(),
)
self.assertEqual(
SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt', 'b.txt'],
).fingerprint(),
SourcesField(
sources_rel_path='foo/bar',
sources=['b.txt', 'a.txt'],
).fingerprint(),
)
fp1 = SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint()
self.create_file('foo/bar/a.txt', 'a_contents_different')
fp2 = SourcesField(
sources_rel_path='foo/bar',
sources=['a.txt'],
).fingerprint()
self.assertNotEqual(fp1, fp2)
def test_fingerprinted_field(self):
class TestValue(FingerprintedMixin):
def __init__(self, test_value):
self.test_value = test_value
def fingerprint(self):
hasher = sha1()
hasher.update(self.test_value)
return hasher.hexdigest()
field1 = TestValue('field1')
field1_same = TestValue('field1')
field2 = TestValue('field2')
self.assertEquals(field1.fingerprint(), field1_same.fingerprint())
self.assertNotEquals(field1.fingerprint(), field2.fingerprint())
fingerprinted_field1 = FingerprintedField(field1)
fingerprinted_field1_same = FingerprintedField(field1_same)
fingerprinted_field2 = FingerprintedField(field2)
self.assertEquals(fingerprinted_field1.fingerprint(), fingerprinted_field1_same.fingerprint())
self.assertNotEquals(fingerprinted_field1.fingerprint(), fingerprinted_field2.fingerprint())
def test_unimplemented_fingerprinted_field(self):
class TestUnimplementedValue(FingerprintedMixin):
pass
with self.assertRaises(NotImplementedError):
FingerprintedField(TestUnimplementedValue()).fingerprint()
def test_file_field(self):
fp1 = FileField(self.create_file('foo/bar.config', contents='blah blah blah')).fingerprint()
fp2 = FileField(self.create_file('foo/bar.config', contents='meow meow meow')).fingerprint()
fp3 = FileField(self.create_file('spam/egg.config', contents='blah blah blah')).fingerprint()
self.assertNotEquals(fp1, fp2)
self.assertNotEquals(fp1, fp3)
self.assertNotEquals(fp2, fp3)
def test_target_list_field(self):
specs = [':t1', ':t2', ':t3']
payloads = [Payload() for i in range(3)]
for i, (s, p) in enumerate(zip(specs, payloads)):
p.add_field('foo', PrimitiveField(i))
self.make_target(s, payload=p)
s1, s2, s3 = specs
context = self.context()
fp1 = TargetListField([s1, s2]).fingerprint_with_context(context)
fp2 = TargetListField([s2, s1]).fingerprint_with_context(context)
fp3 = TargetListField([s1, s3]).fingerprint_with_context(context)
self.assertEquals(fp1, fp2)
self.assertNotEquals(fp1, fp3)
| apache-2.0 | 7,316,399,254,146,659,000 | 32.61442 | 98 | 0.633498 | false |
pinax/pinax-eventlog | pinax/eventlog/migrations/0001_initial.py | 1 | 1443 | # Generated by Django 3.1 on 2020-08-15 10:08
from django.conf import settings
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
from ..compat import JSONField
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('action', models.CharField(db_index=True, max_length=50)),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('extra', JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder)),
('content_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.contenttype')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-timestamp'],
},
),
]
| mit | 8,742,902,873,296,838,000 | 38 | 152 | 0.634789 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/express_route_circuit_sku.py | 1 | 1525 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard' and
'Premium'. Possible values include: 'Standard', 'Premium'
:type tier: str or
~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2017_11_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, name=None, tier=None, family=None):
super(ExpressRouteCircuitSku, self).__init__()
self.name = name
self.tier = tier
self.family = family
| mit | -7,039,590,627,522,585,000 | 37.125 | 79 | 0.602623 | false |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/io/tests/test_sql.py | 1 | 93879 | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, the different tested flavors (sqlite3, MySQL, PostgreSQL)
derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback` and `TestMySQLLegacy`)
"""
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import sys
import nose
import warnings
import numpy as np
from datetime import datetime, date, time
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types
from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
}
}
class PandasSQLTest(unittest.TestCase):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def drop_table(self, table_name):
self._get_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26', '1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query('SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
self.assertEqual(ix_cols, [['A',],])
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res), 0)
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res2), 1)
#------------------------------------------------------------------------------
#--- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode (`TestSQLiteFallbackApi`).
These tests are run with sqlite3. Specific tests for the different
sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
with tm.assert_produces_warning(FutureWarning):
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replace')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='append')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5',
self.conn, flavor='sqlite', index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
panel = tm.makePanel()
self.assertRaises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn, flavor='sqlite')
def test_legacy_write_frame(self):
# Assume that functionality is already tested above so just do
# quick check that it basically works
with tm.assert_produces_warning(FutureWarning):
sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn,
flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'),
'Table not written to DB')
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, flavor='sqlite', chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sq
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
self.assertFalse(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a':[1+1j, 2j]})
# Complex data type should raise error
self.assertRaises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index_name',
"Index name not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product([('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'level_0')
self.assertEqual(frame.columns[1], 'level_1')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Specified index_labels not written to database")
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Index names not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],
"Specified index_labels not written to database")
# wrong length of index_label
self.assertRaises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A','B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A','B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
con=self.conn)
self.assertTrue('CREATE' in create_sql)
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a':[1.1,1.2], 'b':[2.1,2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test', 'sqlite',
con=self.conn, dtype={'b':dtype})
self.assertTrue('CREATE' in create_sql)
self.assertTrue('INTEGER' in create_sql)
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
class TestSQLApi(_TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
raise nose.SkipTest('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
self.assertEqual(result.columns.tolist(), cols,
"Columns not correctly selected")
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
self.assertEqual(result.index.names, ["index"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"],
columns=["C", "D"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
self.assertEqual(result.columns.tolist(), ["C", "D"],
"columns not set correctly whith index_col")
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for other table")
def test_warning_case_insensitive_table_name(self):
# see GH7815.
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for writing a table")
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
self.assertTrue(isinstance(table.table.c['time'].type, sqltypes.DateTime))
class TestSQLiteFallbackApi(_TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn,
flavor="sqlite", index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn,
flavor="sqlite", index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite')
self.assertTrue('CREATE' in create_sql)
def test_tquery(self):
with tm.assert_produces_warning(FutureWarning):
iris_results = sql.tquery("SELECT * FROM iris", con=self.conn)
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_uquery(self):
with tm.assert_produces_warning(FutureWarning):
rows = sql.uquery("SELECT * FROM iris LIMIT 1", con=self.conn)
self.assertEqual(rows, -1)
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn, self.flavor)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
self.assertEqual(self._get_sqlite_column_type(schema, 'time'),
"TIMESTAMP")
#------------------------------------------------------------------------------
#--- Database flavor specific tests
class _TestSQLAlchemy(PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@classmethod
def setUpClass(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
raise nose.SkipTest(msg)
def setUp(self):
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
raise nose.SkipTest("Can't connect to {0} server".format(self.flavor))
def tearDown(self):
raise NotImplementedError()
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
pandasSQL.drop_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64':[2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'], coerce=True)
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
# comes back as datetime64
tm.assert_series_equal(res['a'], to_datetime(df['a']))
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1,dtype=np.int32)
s2 = Series(0.0,dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A':[0, 1, 2], 'B':[0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A':[0, 1, 2], 'B':[np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A':[0, 1, 2], 'B':['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.TEXT))
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.String))
self.assertEqual(sqltype.length, 10)
def test_notnull_dtype(self):
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
self.assertTrue(isinstance(col_dict['Bool'].type, my_type))
self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))
self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32':Series([V,], dtype='float32'),
'f64':Series([V,], dtype='float64'),
'f64_as_f32':Series([V,], dtype='float64'),
'i32':Series([5,], dtype='int32'),
'i64':Series([5,], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32':sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
self.assertEqual(np.round(df['f64'].iloc[0],14),
np.round(res['f64'].iloc[0],14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
self.assertEqual(str(col_dict['f32'].type),
str(col_dict['f64_as_f32'].type))
self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))
class TestSQLiteAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def tearDown(self):
# in memory so tables should not be removed explicitly
pass
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a':[1,2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
self.assertEqual(len(w), 0, "Warning triggered for other table")
class TestMySQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = 'pymysql'
except ImportError:
raise nose.SkipTest('pymysql not installed')
def tearDown(self):
c = self.conn.execute('SHOW TABLES')
for table in c.fetchall():
self.conn.execute('DROP TABLE %s' % table[0])
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# MySQL has no real BOOL type (it's an alias for TINYINT)
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA = int column with NA values => becomes float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b':[0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc)
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class TestPostgreSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2
cls.driver = 'psycopg2'
except ImportError:
raise nose.SkipTest('psycopg2 not installed')
def tearDown(self):
c = self.conn.execute(
"SELECT table_name FROM information_schema.tables"
" WHERE table_schema = 'public'")
for table in c.fetchall():
self.conn.execute("DROP TABLE %s" % table[0])
def test_schema_support(self):
# only test this for postgresql (schema's not supported in mysql/sqlite)
df = DataFrame({'col1':[1, 2], 'col2':[0.1, 0.2], 'col3':['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
self.assertRaises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
## different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table('test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
## specifying schema in user-provided meta
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='append')
res1 = sql.read_sql_table('test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
def test_datetime_with_time_zone(self):
# Test to see if we read the date column with timezones that
# the timezone information is converted to utc and into a
# np.datetime64 (GH #7139)
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.DateColWithTz.dtype.type, np.datetime64),
"DateColWithTz loaded with incorrect type")
# "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
self.assertEqual(df.DateColWithTz[0], Timestamp('2000-01-01 08:00:00'))
# "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
self.assertEqual(df.DateColWithTz[1], Timestamp('2000-06-01 07:00:00'))
#------------------------------------------------------------------------------
#--- Test Sqlite / MySQL fallback
class TestSQLiteFallback(PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def setUp(self):
self.conn = self.connect()
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.SQLiteDatabase, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),
'Table not written to DB')
self.pandasSQL.drop_table('drop_test_frame')
self.assertFalse(self.pandasSQL.has_table('drop_test_frame'),
'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False, flavor=self.flavor)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
# test it raises an error and not fails silently (GH8341)
if self.flavor == 'sqlite':
self.assertRaises(sqlite3.InterfaceError, sql.to_sql, df,
'test_time', self.conn)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
self.assertEqual(self._get_sqlite_column_type('dtype_test', 'B'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type('dtype_test2', 'B'), 'STRING')
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
def test_notnull_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
self.assertEqual(self._get_sqlite_column_type(tbl, 'Bool'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Date'), 'TIMESTAMP')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Int'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Float'), 'REAL')
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
self.assertRaises(ValueError, df.to_sql, "", self.conn,
flavor=self.flavor)
for ndx, weird_name in enumerate(['test_weird_name]','test_weird_name[',
'test_weird_name`','test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'12345','12345blah']):
df.to_sql(weird_name, self.conn, flavor=self.flavor)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d'%ndx
df2.to_sql(c_tbl, self.conn, flavor=self.flavor)
sql.table_exists(c_tbl, self.conn)
class TestMySQLLegacy(TestSQLiteFallback):
"""
Test the legacy mode against a MySQL database.
"""
flavor = 'mysql'
@classmethod
def setUpClass(cls):
cls.setup_driver()
# test connection
try:
cls.connect()
except cls.driver.err.OperationalError:
raise nose.SkipTest("{0} - can't connect to MySQL server".format(cls))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed')
@classmethod
def connect(cls):
return cls.driver.connect(host='127.0.0.1', user='root', passwd='', db='pandas_nosetest')
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def _count_rows(self, table_name):
cur = self._get_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchall()
return rows[0][0]
def setUp(self):
try:
self.conn = self.connect()
except self.driver.err.OperationalError:
raise nose.SkipTest("Can't connect to MySQL server")
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'mysql')
self._load_iris_data()
self._load_test1_data()
def tearDown(self):
c = self.conn.cursor()
c.execute('SHOW TABLES')
for table in c.fetchall():
c.execute('DROP TABLE %s' % table[0])
self.conn.commit()
self.conn.close()
def test_a_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn,
flavor='mysql')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='mysql'),
'Table not written to DB')
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SHOW INDEX IN %s" % tbl_name, self.conn)
ix_cols = {}
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return list(ix_cols.values())
def test_to_sql_save_index(self):
self._to_sql_save_index()
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return ix_cols.values()
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_illegal_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# These tables and columns should be ok
for ndx, ok_name in enumerate(['99beginswithnumber','12345']):
df.to_sql(ok_name, self.conn, flavor=self.flavor, index=False,
if_exists='replace')
self.conn.cursor().execute("DROP TABLE `%s`" % ok_name)
self.conn.commit()
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', ok_name])
c_tbl = 'test_ok_col_name%d'%ndx
df2.to_sql(c_tbl, self.conn, flavor=self.flavor, index=False,
if_exists='replace')
self.conn.cursor().execute("DROP TABLE `%s`" % c_tbl)
self.conn.commit()
# For MySQL, these should raise ValueError
for ndx, illegal_name in enumerate(['test_illegal_name]','test_illegal_name[',
'test_illegal_name`','test_illegal_name"', 'test_illegal_name\'', '']):
self.assertRaises(ValueError, df.to_sql, illegal_name, self.conn,
flavor=self.flavor, index=False)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', illegal_name])
c_tbl = 'test_illegal_col_name%d'%ndx
self.assertRaises(ValueError, df2.to_sql, c_tbl,
self.conn, flavor=self.flavor, index=False)
#------------------------------------------------------------------------------
#--- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isnull(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def _skip_if_no_pymysql():
try:
import pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed, skipping')
class TestXSQLite(tm.TestCase):
def setUp(self):
self.db = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.db.cursor()
cur.execute(create_sql)
cur = self.db.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.db.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY ("A","B")' in create_sql)
cur = self.db.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.db.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.db)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.db.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
self.db.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.db)
finally:
sys.stdout = sys.__stdout__
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.read_frame("select * from test_table", self.db)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.write_frame(frame2, name='test_table2', con=self.db)
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_tquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.tquery("select A from test_table", self.db)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.db), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords')
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df=DataFrame([1 , 2], columns=['c0'])
sql.write_frame(mono_df, con = self.db, name = 'mono_df')
# computing the sum via sql
con_x=self.db
the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
self.assertEqual(the_sum , 3)
result = sql.read_frame("select * from mono_df",con_x)
tm.assert_frame_equal(result,mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
if sql.table_exists(test_table_to_drop, self.db, flavor='sqlite'):
cur = self.db.cursor()
cur.execute("DROP TABLE %s" % test_table_to_drop)
cur.close()
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='sqlite',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='sqlite',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='sqlite', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
class TestXMySQL(tm.TestCase):
@classmethod
def setUpClass(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def setUp(self):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.db = pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
self.db = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def tearDown(self):
from pymysql.err import Error
try:
self.db.close()
except Error:
pass
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.ix[0].values.tolist()
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'mysql')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY (`A`,`B`)' in create_sql)
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.db)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
self.db.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.db)
finally:
sys.stdout = sys.__stdout__
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table", self.db)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame2, name='test_table2', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_tquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
result = sql.tquery("select A from test_table", self.db)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.db), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
_skip_if_no_pymysql()
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords',
if_exists='replace', flavor='mysql')
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
if sql.table_exists(test_table_to_drop, self.db, flavor='mysql'):
cur = self.db.cursor()
cur.execute("DROP TABLE %s" % test_table_to_drop)
cur.close()
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='mysql',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='mysql',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='mysql', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit | -1,394,526,819,625,852,700 | 36.581665 | 99 | 0.559241 | false |
NitrousPG/forkbot | server_events.py | 1 | 4992 | # LICENSE
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""server message handler"""
# Numerics aggregated from https://www.alien.net.au/irc/irc2numerics.html
class Server_Events:
"""handles events"""
def __init__(self, forkbot):
self.forkbot = forkbot
self.server_msg = ""
self.server_msg_info = ""
self.num_switch = {
#misc
"NOTICE": self.not_implemented,
# start-up
"001": self.rpl_welcome, # welcome information
"002": self.rpl_yourhost, # server information
"004": self.not_implemented, # RPL_MYINFO, TODO
"005": self.not_implemented, # RPL_BOUNCE
# server info
"251": self.not_implemented,
"252": self.not_implemented,
"253": self.not_implemented,
"254": self.not_implemented,
"255": self.not_implemented,
"265": self.not_implemented,
"266": self.not_implemented,
# channel info
"315": self.rpl_synirc_who_end, # end of WHO
"332": self.rpl_topic, # topic of channel
"333": self.not_implemented, # ??
"352": self.rpl_synirc_who, # response to WHO
"353": self.rpl_namreply, # user list
"366": self.rpl_endofnames, # end of user list
# motd
"372": self.motd, # motd start/continuation
"375": self.not_implemented,
"376": self.motd, # end of motd
}
def not_implemented(self):
"""000"""
#placeholder function
pass
def motd(self):
"""372, 376"""
# for right now we do not
# care about the MOTD
pass
def rpl_welcome(self):
"""001"""
self.forkbot.log("welcome recieved")
self.forkbot.hostmask = self.server_msg.split(" ")[-1].replace("\r", "")
self.forkbot.log("hostmask is " + self.forkbot.hostmask)
def rpl_yourhost(self):
"""002"""
pass
def rpl_synirc_who_end(self):
"""find out what this numeric for this is"""
pass
def rpl_topic(self):
"""332"""
channel = self.server_msg_info[-1]
topic = self.server_msg[:-1]
self.forkbot.channel_topics.update({channel: topic})
def rpl_synirc_who(self):
"""who request handler"""
msg_info = self.server_msg_info
host = msg_info[5]
nick = msg_info[7]
self.forkbot.users.hostmask.update({nick: host})
def rpl_namreply(self):
"""353"""
names = self.server_msg.split(" ")[:-1]
channel = self.server_msg_info[-1]
for name in names:
name = "".join([x for x in name if x not in "@+~"])
self.forkbot.users.add_user(channel, name)
for name in names:
op_type = ""
if name.startswith("~"):
op_type = "owner"
elif name.startswith("@"):
op_type = "operator"
elif name.startswith("&"):
op_type = "admin"
elif name.startswith("+"):
op_type = "voice"
if op_type != "":
self.forkbot.users.add_op(channel, "".join(
[x for x in name if x not in "@+&~"]), op_type)
def rpl_endofnames(self):
"""366"""
pass
def process(self, msg_info, msg):
"""processes and delegates the server event to the correct function"""
self.server_msg_info = msg_info
self.server_msg = msg
# find the key given by the server, and
# execute the function that deals with that key
try:
self.num_switch[msg_info[1]]()
except KeyError as ex:
self.forkbot.log(f"Unsupported Numeric: {ex}")
def find_ops(self, nicks):
"""parse the list of nicks given by the server and
register the ops"""
owner, operator, voice, none = [], [], [], []
for nick in nicks:
if nick.startswith("~"):
owner.append(nick[1:])
elif nick.startswith("@"):
operator.append(nick[1:])
elif nick.startswith("+"):
voice.append(nick[1:])
else:
none.append(nick)
ops = {
"owner": owner,
"operator": operator,
"voice": voice,
"none": none
}
return ops
| gpl-3.0 | -7,410,502,775,914,562,000 | 29.625767 | 80 | 0.527244 | false |
Pragmatismo/TimelapsePi-EasyControl | webcamcap_show_numpy.py | 1 | 8684 | #!/usr/bin/python
import time
import os
import sys
import pygame
import numpy
from PIL import Image, ImageDraw, ImageChops
print("")
print("")
print(" USE l=3 to take a photo every 3 somethings, try a 1000 or 2")
print(" t to take triggered photos ")
print(" cap=/home/pi/folder/ to set caps path other than current dir")
print(" ")
pi_paper = False #updates pi wall paper, use -nopaper to turn it off.
s_val = "10"
c_val = "2"
g_val = "10"
b_val = "15"
x_dim = 1600
y_dim = 896
additonal_commands = "-d/dev/video1 -w"
try:
cappath = os.getcwd()
cappath += "/"
except:
print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
cappath = "./"
print(" COULD NOT GET CURRENT DIR SET WITH A FLAG ")
loc_settings = "./camera_settings.txt"
try:
with open(loc_settings, "r") as f:
for line in f:
s_item = line.split("=")
if s_item[0] == "s_val":
s_val = s_item[1].split("\n")[0]
elif s_item[0] == "c_val":
c_val = s_item[1].split("\n")[0]
elif s_item[0] == "g_val":
g_val = s_item[1].split("\n")[0]
elif s_item[0] == "b_val":
b_val = s_item[1].split("\n")[0]
elif s_item[0] == "x_dim":
x_dim = s_item[1].split("\n")[0]
elif s_item[0] == "y_dim":
y_dim = s_item[1].split("\n")[0]
elif s_item[0] == "additonal_commands":
additonal_commands = s_item[1].split("\n")[0]
except:
print("No config file for camera, using default")
print("Run cam_config.py to create one")
def photo():
# take and save photo
timenow = time.time()
timenow = str(timenow)[0:10]
filename= "cap_"+str(timenow)+".jpg"
#os.system("uvccapture "+additonal_commands+" -S"+s_val+" -C" + c_val + " -G"+ g_val +" -B"+ b_val +" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
cmd = str("uvccapture "+additonal_commands+" -x"+str(x_dim)+" -y"+str(y_dim)+" -v -t0 -o"+cappath+filename)
print("####")
print("####")
print cmd
print("####")
print("####")
os.system(cmd)
print("Image taken and saved to "+cappath+filename)
if pi_paper == True:
os.system("export DISPLAY=:0 && pcmanfm --set-wallpaper "+cappath+filename)
return filename
if 'wp' in sys.argv or 'wallpaper' in sys.argv:
pi_paper = True
print(" Going to try changing wall paper")
loop = False
trig = False
for argu in sys.argv[1:]:
try:
thearg = str(argu).split('=')[0]
except:
thearg = str(argu)
if thearg == 'cap' or thearg =='cappath':
cappath = str(argu).split('=')[1]
elif thearg == 'l' or thearg == 'looped':
try:
num = int(str(argu).split('=')[1])
except:
print("No speed supplied, taking every 10")
num = 10
loop = True
elif thearg == 't' or thearg == 'TRIGGERED':
trig = True
print(" Saving files to, " + str(cappath))
pygame.init()
display_width = x_dim
display_height = y_dim
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Most recent image')
black = (0,0,0)
white = (255,255,255)
clock = pygame.time.Clock()
crashed = False
import matplotlib.pyplot as plt
def show_pic(imgtaken, x=0,y=0):
gameDisplay.blit(imgtaken, (x,y))
gameDisplay.fill(white)
c_photo = photo()
pil_c_photo = Image.open(c_photo)
numpy_pic = numpy.array(pil_c_photo)
b_photo = photo()
pil_b_photo = Image.open(b_photo)
numpy_pic_b = numpy.array(pil_b_photo)
mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
mask2 = numpy_pic_b < numpy_pic - 30
lol = mask + mask2
e_pic = numpy_pic.copy()
num = 0
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
timenow = time.time()
e_photo = str(timenow).split(".")[0]
e_photo= "numpy_"+str(timenow)+".jpg"
num = num + 1
b_photo = c_photo
c_photo = photo()
numpy_pic_b = numpy_pic.copy()
pil_c_photo = Image.open(c_photo)
numpy_pic = numpy.array(pil_c_photo)
print numpy_pic.size
#print len(numpy_pic[3])
print "###"
#print numpy_pic[1:,1,1]
#a = np.arange(100)
print "##########"
#numpy_pic[1:500, range(0, len(numpy_pic[2]), 10), 1] = 0
#for x in numpy_pic[1:500, range(0, len(numpy_pic[2])), 1]:
# if x >= 100:
# x = 255
#for x in range(10,170,10):
# mask = numpy_pic < x
# numpy_pic[mask] = 255-x #numpy_pic[mask] + numpy_pic[mask]
#for x in range(200,255,5):
# mask = numpy_pic > x
# numpy_pic[mask] = 0+(x/10) # numpy_pic[mask] / numpy_pic[mask]+(numpy_pic[mask]/numpy_pic[mask])
#print numpy_pic[1:,1,1]
#print numpy_pic.min()
print "###"
#print numpy_pic.shape #Array dimensions
#print numpy_pic.ndim #Number of array dimensions
#print numpy_pic.dtype #Data type of array elements
#print numpy_pic.dtype.name #Name of data type
#print numpy_pic.mean()
#print numpy_pic.max()
#print numpy_pic.min()
#print numpy.info(numpy.ndarray.dtype)
#print numpy_pic.astype(int)
#mask = numpy_pic > numpy_pic_b
#mask = numpy_pic[:, :, 2] > 150
#numpy_pic[mask] = [0, 0, 255]
#lol = numpy_pic +
#mask = numpy_pic_b > numpy_pic + 30 #the +30 gets rid of noise
#mask2 = numpy_pic_b < numpy_pic - 30
margin = 20
maskr = numpy_pic[:, :, 0] < numpy_pic_b[:, :, 0] - margin
maskg = numpy_pic[:, :, 1] < numpy_pic_b[:, :, 1] - margin
maskb = numpy_pic[:, :, 2] < numpy_pic_b[:, :, 2] - margin
maskr2 = numpy_pic[:, :, 0] > numpy_pic_b[:, :, 0] + margin
maskg2 = numpy_pic[:, :, 1] > numpy_pic_b[:, :, 1] + margin
maskb2 = numpy_pic[:, :, 2] > numpy_pic_b[:, :, 2] + margin
#numpy_pic[mask] = [0, 0, 255]
#lol_old = lol
#lol = mask + mask2
#lol = lol + lol_old
persist = 'ohhh'
if persist == 'True':
numpy_pic[maskr] = [255, 0, 0]
numpy_pic[maskg] = [0, 255, 0]
numpy_pic[maskb] = [0, 0, 255]
numpy_pic[maskb2] = [0, 0, 100]
numpy_pic[maskr2] = [100, 0, 0]
numpy_pic[maskg2] = [0, 100, 0]
Image.fromarray(numpy_pic).save(e_photo)
elif persist == 'False':
old_e = e_pic
e_pic = numpy_pic.copy()
e_pic[maskr] = [255, 0, 0]
e_pic[maskg] = [0, 255, 0]
e_pic[maskb] = [0, 0, 255]
e_pic[maskr2] = [100, 0, 0]
e_pic[maskg2] = [0, 100, 0]
e_pic[maskb2] = [0, 0, 100]
show1 = 'waa'
if show1 == '1':
e_pic = ((e_pic/4) - (numpy_pic))*3
e_pic = e_pic / 3 + old_e / 2
elif show1 == 'tripsy':
e_pic = ((e_pic/4) - (numpy_pic))*3
e_pic = e_pic - old_e / 2
elif show1 == 'waa':
e_pic = ((e_pic/4) - (numpy_pic))*3
#e_pic = old_e * 0.8 + e_pic * 0.2
Image.fromarray(e_pic).save(e_photo)
elif persist == 'ohhh':
old_e = e_pic.copy()
mask_b_pic = numpy_pic.copy()
mask_d_pic = numpy_pic.copy()
mask_b_pic[maskr] = [255, 255, 255]
mask_b_pic[maskg] = [255, 255, 255]
mask_b_pic[maskb] = [255, 255, 255]
mask_d_pic[maskr2] = [0, 0, 0]
mask_d_pic[maskg2] = [0, 0, 0]
mask_d_pic[maskb2] = [0, 0, 0]
#e_pic = e_pic/6 + old_e
e_pic = [200, 200, 0]
#e_pic = e_pic/2 - ((mask_d_pic) + (mask_b_pic))
#e_pic = e_pic/2 + ((mask_d_pic) + (mask_b_pic))
#choose one of the following
#e_pic = mask_d_pic #shows when pixel is darker than it was
#e_pic = mask_b_pic #shows when pixel is lighter than prior
e_pic = mask_d_pic - mask_b_pic #black execpt for movement
e_pic = mask_b_pic / (mask_d_pic / 100) #black execpt for movement
#e_pic = mask_d_pic + mask_b_pic #looks odd
Image.fromarray(e_pic).save(e_photo)
#plt.imshow(lol)
#plt.show()
#Image.fromarray(numpy_pic).save(e_photo)
onscreen = pygame.image.load(e_photo)
gameDisplay.blit(onscreen, (0,0))
pygame.display.update()
if trig == True:
print("Waiting for input before taking next image...")
tp = raw_input("press return to take picture; ")
if tp == "q":
print("---bye!")
exit()
clock.tick(20)
if loop == True:
pygame.time.wait(num)
clock.tick(20)
elif trig == False and loop == False:
crashed = True
#while True:
#pygame.time.wait(1000)
#clock.tick(20)
pygame.quit()
quit()
| gpl-2.0 | -2,106,105,906,145,419,300 | 30.23741 | 168 | 0.539843 | false |
mbj4668/pyang | pyang/xpath.py | 1 | 12087 | from . import xpath_lexer
from . import xpath_parser
from .error import err_add
from .util import prefix_to_module, search_data_node, data_node_up
from .syntax import re_identifier
core_functions = {
'last': ([], 'number'),
'position': ([], 'number'),
'count': (['node-set'], 'number'),
'id': (['object'], 'node-set'),
'local-name': (['node-set', '?'], 'string'),
'namespace-uri': (['node-set', '?'], 'string'),
'name': (['node-set', '?'], 'string'),
'string': (['object'], 'string'),
'concat': (['string', 'string', '*'], 'string'),
'starts-with': (['string', 'string'], 'boolean'),
'contains': (['string', 'string'], 'boolean'),
'substring-before': (['string', 'string'], 'string'),
'substring-after': (['string', 'string'], 'string'),
'substring': (['string', 'number', 'number', '?'], 'string'),
'string-length': (['string', '?'], 'number'),
'normalize-space': (['string', '?'], 'string'),
'translate': (['string', 'string', 'string'], 'string'),
'boolean': (['object'], 'boolean'),
'not': (['boolean'], 'boolean'),
'true': ([], 'boolean'),
'false': ([], 'boolean'),
'lang': (['string'], 'boolean'),
'number': (['object'], 'number'),
'sum': (['node-set'], 'number'),
'floor': (['number'], 'number'),
'ceiling': (['number'], 'number'),
'round': (['number'], 'number'),
}
yang_xpath_functions = {
'current': ([], 'node-set')
}
yang_1_1_xpath_functions = {
'bit-is-set': (['node-set', 'string'], 'boolean'),
'enum-value': (['string'], 'number'),
'deref': (['node-set'], 'node-set'),
'derived-from': (['node-set', 'qstring'], 'boolean'),
'derived-from-or-self': (['node-set', 'qstring'], 'boolean'),
're-match': (['string', 'string'], 'boolean'),
}
extra_xpath_functions = {
'deref': (['node-set'], 'node-set'), # pyang extension for 1.0
}
def add_extra_xpath_function(name, input_params, output_param):
extra_xpath_functions[name] = (input_params, output_param)
def add_prefix(prefix, s):
"Add `prefix` to all unprefixed names in `s`"
# tokenize the XPath expression
toks = xpath_lexer.scan(s)
# add default prefix to unprefixed names
toks2 = [_add_prefix(prefix, tok) for tok in toks]
# build a string of the patched expression
ls = [x.value for x in toks2]
return ''.join(ls)
def _add_prefix(prefix, tok):
if tok.type == 'name':
m = xpath_lexer.re_ncname.match(tok.value)
if m.group(2) is None:
tok.value = prefix + ':' + tok.value
return tok
## TODO: validate must/when after deviate
# node is the initial context node or None if it is not known
def v_xpath(ctx, stmt, node):
try:
if hasattr(stmt, 'i_xpath') and stmt.i_xpath is not None:
q = stmt.i_xpath
else:
q = xpath_parser.parse(stmt.arg)
stmt.i_xpath = q
chk_xpath_expr(ctx, stmt.i_orig_module, stmt.pos, node, node, q, None)
except xpath_lexer.XPathError as e:
err_add(ctx.errors, stmt.pos, 'XPATH_SYNTAX_ERROR', e.msg)
stmt.i_xpath = None
except SyntaxError as e:
err_add(ctx.errors, stmt.pos, 'XPATH_SYNTAX_ERROR', e.msg)
stmt.i_xpath = None
# mod is the (sub)module where the stmt is defined, which we use to
# resolve prefixes.
def chk_xpath_expr(ctx, mod, pos, initial, node, q, t):
if isinstance(q, list):
chk_xpath_path(ctx, mod, pos, initial, node, q)
elif isinstance(q, tuple):
if q[0] == 'absolute':
chk_xpath_path(ctx, mod, pos, initial, 'root', q[1])
elif q[0] == 'relative':
chk_xpath_path(ctx, mod, pos, initial, node, q[1])
elif q[0] == 'union':
for qa in q[1]:
chk_xpath_path(ctx, mod, pos, initial, node, qa)
elif q[0] == 'comp':
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'arith':
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'bool':
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'negative':
chk_xpath_expr(ctx, mod, pos, initial, node, q[1], None)
elif q[0] == 'function_call':
chk_xpath_function(ctx, mod, pos, initial, node, q[1], q[2])
elif q[0] == 'path_expr':
chk_xpath_expr(ctx, mod, pos, initial, node, q[1], t)
elif q[0] == 'path': # q[1] == 'filter'
chk_xpath_expr(ctx, mod, pos, initial, node, q[2], None)
chk_xpath_expr(ctx, mod, pos, initial, node, q[3], None)
elif q[0] == 'var':
# NOTE: check if the variable is known; currently we don't
# have any variables in YANG xpath expressions
err_add(ctx.errors, pos, 'XPATH_VARIABLE', q[1])
elif q[0] == 'literal':
# kind of hack to detect qnames, and mark the prefixes
# as being used in order to avoid warnings.
s = q[1]
if s[0] == s[-1] and s[0] in ("'", '"'):
s = s[1:-1]
i = s.find(':')
# make sure there is just one : present
# FIXME: more colons should possibly be reported, instead
if i != -1 and s.find(':', i + 1) == -1:
prefix = s[:i]
tag = s[i + 1:]
if (re_identifier.search(prefix) is not None and
re_identifier.search(tag) is not None):
# we don't want to report an error; just mark the
# prefix as being used.
my_errors = []
prefix_to_module(mod, prefix, pos, my_errors)
for pos0, code, arg in my_errors:
if code == 'PREFIX_NOT_DEFINED' and t == 'qstring':
# we know for sure that this is an error
err_add(ctx.errors, pos0,
'PREFIX_NOT_DEFINED', arg)
else:
# this may or may not be an error;
# report a warning
err_add(ctx.errors, pos0,
'WPREFIX_NOT_DEFINED', arg)
def chk_xpath_function(ctx, mod, pos, initial, node, func, args):
signature = None
if func in core_functions:
signature = core_functions[func]
elif func in yang_xpath_functions:
signature = yang_xpath_functions[func]
elif mod.i_version != '1' and func in yang_1_1_xpath_functions:
signature = yang_1_1_xpath_functions[func]
elif ctx.strict and func in extra_xpath_functions:
err_add(ctx.errors, pos, 'STRICT_XPATH_FUNCTION', func)
return None
elif not ctx.strict and func in extra_xpath_functions:
signature = extra_xpath_functions[func]
if signature is None:
err_add(ctx.errors, pos, 'XPATH_FUNCTION', func)
return None
# check that the number of arguments are correct
nexp = len(signature[0])
nargs = len(args)
if nexp == 0:
if nargs != 0:
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, nexp, nargs))
elif signature[0][-1] == '?':
if nargs != (nexp - 1) and nargs != (nexp - 2):
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, "%s-%s" % (nexp - 2, nexp - 1), nargs))
elif signature[0][-1] == '*':
if nargs < (nexp - 1):
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, "at least %s" % (nexp - 1), nargs))
elif nexp != nargs:
err_add(ctx.errors, pos, 'XPATH_FUNC_ARGS',
(func, nexp, nargs))
# FIXME implement checks from check_function()
# check the arguments - FIXME check type
i = 0
args_signature = signature[0][:]
for arg in args:
chk_xpath_expr(ctx, mod, pos, initial, node, arg, args_signature[i])
if args_signature[i] == '*':
args_signature.append('*')
i = i + 1
return signature[1]
def chk_xpath_path(ctx, mod, pos, initial, node, path):
if len(path) == 0:
return
head = path[0]
if head[0] == 'var':
# check if the variable is known as a node-set
# currently we don't have any variables, so this fails
err_add(ctx.errors, pos, 'XPATH_VARIABLE', head[1])
elif head[0] == 'function_call':
func = head[1]
args = head[2]
rettype = chk_xpath_function(ctx, mod, pos, initial, node, func, args)
if rettype is not None:
# known function, check that it returns a node set
if rettype != 'node-set':
err_add(ctx.errors, pos, 'XPATH_NODE_SET_FUNC', func)
if func == 'current':
chk_xpath_path(ctx, mod, pos, initial, initial, path[1:])
elif head[0] == 'step':
axis = head[1]
nodetest = head[2]
preds = head[3]
node1 = None
if axis == 'self':
pass
elif axis == 'child' and nodetest[0] == 'name':
prefix = nodetest[1]
name = nodetest[2]
if prefix is None:
if initial is None:
pmodule = None
elif initial.keyword == 'module':
pmodule = initial
else:
pmodule = initial.i_module
else:
pmodule = prefix_to_module(mod, prefix, pos, ctx.errors)
# if node and initial are None, it means we're checking an XPath
# expression when it is defined in a grouping or augment, i.e.,
# when the full tree is not expanded. in this case we can't check
# the paths
if pmodule is not None and node is not None and initial is not None:
if node == 'root':
children = pmodule.i_children
else:
children = getattr(node, 'i_children', None) or []
child = search_data_node(children, pmodule.i_modulename, name)
if child is None and node == 'root':
err_add(ctx.errors, pos, 'XPATH_NODE_NOT_FOUND2',
(pmodule.i_modulename, name, pmodule.arg))
elif child is None and node.i_module is not None:
err_add(ctx.errors, pos, 'XPATH_NODE_NOT_FOUND1',
(pmodule.i_modulename, name,
node.i_module.i_modulename, node.arg))
elif child is None:
err_add(ctx.errors, pos, 'XPATH_NODE_NOT_FOUND2',
(pmodule.i_modulename, name, node.arg))
elif (getattr(initial, 'i_config', None) is True
and getattr(child, 'i_config', None) is False):
err_add(ctx.errors, pos, 'XPATH_REF_CONFIG_FALSE',
(pmodule.i_modulename, name))
else:
node1 = child
elif axis == 'parent' and nodetest == ('node_type', 'node'):
if node is None:
pass
elif node == 'root':
err_add(ctx.errors, pos, 'XPATH_PATH_TOO_MANY_UP', ())
else:
p = data_node_up(node)
if p is None:
err_add(ctx.errors, pos, 'XPATH_PATH_TOO_MANY_UP', ())
else:
node1 = p
else:
# we can't validate the steps on other axis, but we can validate
# functions etc.
pass
for p in preds:
chk_xpath_expr(ctx, mod, pos, initial, node1, p, None)
chk_xpath_path(ctx, mod, pos, initial, node1, path[1:])
| isc | -2,654,624,637,892,220,000 | 41.410526 | 80 | 0.514933 | false |
simone-campagna/zapper | lib/python/zapper/lock_file.py | 1 | 1913 | #!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
import os
import time
import errno
import fcntl
import contextlib
@contextlib.contextmanager
def Lock(filename, mode="r", blocking=True, timeout=10):
# enter
lock_op = fcntl.LOCK_EX
if not blocking:
lock_op += fcntl.LOCK_NB
count = 0
interval = 0.1
if timeout is not None:
count = int(round(timeout/interval, 0))
if count <= 0:
count = 1
with open(filename, mode) as f:
for i in range(count):
try:
#fcntl.fcntl(self.fileno(), lock_op, os.O_NDELAY)
fcntl.lockf(f.fileno(), lock_op)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
if timeout:
time.sleep(interval)
continue
except:
import traceback
traceback.print_exc()
time.sleep(interval)
yield f
#exit
fcntl.lockf(f.fileno(), fcntl.LOCK_UN)
if __name__ == "__main__":
import sys
with Lock('a.lock', 'a') as f_out:
for arg in sys.argv:
f_out.write(arg + '\n')
f_out.flush()
print("sleeping...")
time.sleep(10)
print("done.")
f_out.write("finito!\n")
| apache-2.0 | 7,631,584,786,992,049,000 | 27.132353 | 74 | 0.590173 | false |
LinguList/server | app/settings/sound_classes/asjp/asjp.py | 1 | 3293 | #! /usr/bin/env python
from __future__ import division,print_function
from lingpy.data.derive import compile_model
from scipy.spatial.distance import squareform
from time import sleep
from pickle import dump
asjp = {}
score = open('score','r').read()
score = score.split('\n')
del score[-1]
dicto = {}
for line in score:
lin = line.split('\t')
dicto[lin[0]] = lin[1:]
letters = []
for i in range(len(score)):
score[i] = score[i].split('\t')
letters.append(score[i][0])
del score[i][0]
matrix = []
for i in range(len(score)):
for l in letters:
if i < len(dicto[l]):
matrix.append(float(dicto[l][i]))
matrix = squareform(matrix)
consonants = ['p'] + letters
consonant_matrix = matrix.copy()
score = open('vows_score','r').read()
score = score.split('\n')
del score[-1]
dicto = {}
for line in score:
lin = line.split('\t')
dicto[lin[0]] = lin[1:]
letters = []
for i in range(len(score)):
score[i] = score[i].split('\t')
letters.append(score[i][0])
del score[i][0]
matrix = []
for i in range(len(score)):
for l in letters:
if i < len(dicto[l]):
matrix.append(float(dicto[l][i]))
matrix = squareform(matrix)
vowel_matrix = matrix.copy()
vowels = ['i'] + letters
for i in range(len(vowel_matrix)):
vowel_matrix[i][i] = 40
for i in range(len(consonant_matrix)):
consonant_matrix[i][i] = 40
for i in range(31):
for j in range(31):
asjp[consonants[i],consonants[j]] = consonant_matrix[i][j]
for i in range(7):
for j in range(7):
asjp[vowels[i],vowels[j]] = vowel_matrix[i][j]
for l in vowels:
asjp[l,'X'] = 0
asjp['X',l] = 0
for l in consonants:
asjp[l,'X'] = 0
asjp['X',l] = 0
asjp['X','X'] = 0
for v in vowels:
for c in consonants:
asjp[v,c] = -20
asjp[c,v] = -20
for key in asjp.keys():
if asjp[key] == 0:
asjp[key] = 0
else:
asjp[key] = int(asjp[key]+0.5)
for v1 in vowels:
for v2 in vowels:
asjp[v1,v2] = int(asjp[v1,v2] * 0.25 + 0.5) + 10
asjp['i','y'] = -2
asjp['y','i'] = -2
asjp['u','w'] = -2
asjp['w','u'] = -2
asjp['u','v'] = -4
asjp['v','u'] = -4
asjp['u','f'] = -6
asjp['f','u'] = -6
keys = []
for keyA,keyB in asjp.keys():
keys.append((keyA,keyB))
for keyA,keyB in keys:
asjp[keyA,'+'] = -20
asjp['+',keyB] = -20
asjp[keyA,'0'] = 0
asjp['0',keyB] = 0
asjp['X','+'] = -5
asjp['+','X'] = -5
asjp['+','+'] = 0 # swaps
asjp['0','0'] = 0 # missing values
asjp['X','0'] = 0
asjp['0','X'] = 0
for i in '0123456':
for j in '0123456':
if i == j:
asjp[i,j] = 10
else:
asjp[i,j] = 5
keys = []
for keyA,keyB in asjp.keys():
keys.append((keyA,keyB))
for keyA,keyB in keys:
for i in '123456':
if keyA not in '123456' and keyB not in '123456':
asjp[keyA,i] = -20
asjp[i,keyB] = -20
asjp[keyA,'_'] = -50
asjp['_',keyB] = -50
asjp['_','_'] = 0
for x in asjp.keys():
asjp[x] = asjp[x] / 4.0
if asjp[x] > 0 and asjp[x] != 10:
asjp[x] += 0.75 * asjp[x]
elif asjp[x] < 0:
asjp[x] += 0.75 * asjp[x]
out = open('scorer.bin','wb')
dump(asjp,out)
out.close()
compile_model('asjp')
print("[i] Compilation of the ASJP model was successful!")
sleep(1)
| gpl-2.0 | 3,162,807,456,613,274,600 | 19.710692 | 66 | 0.54631 | false |
wevoice/wesub | dev_settings_test.py | 1 | 2112 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from dev_settings import *
INSTALLED_APPS += (
'django_nose',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
CACHE_PREFIX = "testcache"
CACHE_TIMEOUT = 60
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_PLUGINS = ['utils.test_utils.plugin.UnisubsTestPlugin']
CELERY_ALWAYS_EAGER = True
YOUTUBE_CLIENT_ID = 'test-youtube-id'
YOUTUBE_CLIENT_SECRET = 'test-youtube-secret'
YOUTUBE_API_KEY = 'test-youtube-api-key'
API_ALWAYS_USE_FUTURE = True
# Use MD5 password hashing, other algorithms are purposefully slow to increase
# security. Also include the SHA1 hasher since some of the tests use it.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
)
# Let the nose CaptureLogging plugin handle logging. It doesn't display
# logging at all, except if there's a test failure.
del LOGGING
NOSE_ARGS = ['--logging-filter=test_steps, -remote_connection, '
'-selenium.webdriver.remote.remote_connection',
'--with-xunit', '--logging-level=ERROR',
'--xunit-file=nosetests.xml',
]
try:
from dev_settings_test_local import *
except ImportError:
pass
| agpl-3.0 | -7,629,318,859,687,858,000 | 30.058824 | 78 | 0.69697 | false |
robotican/ric | ric_board/scripts/RiCConfigurator/GUI/SimulationWindow.py | 1 | 5700 | import GUI.MainWindow
__author__ = 'tom'
from PyQt4.QtGui import *
from GUI.Schemes.gazeboGui import Ui_gazebo_gui
from BAL.Interface.DeviceFrame import SERVO, BATTERY, SWITCH, IMU, PPM, GPS, RELAY, URF, CLOSE_LOP_ONE, CLOSE_LOP_TWO, \
OPEN_LOP, DIFF_CLOSE, DIFF_OPEN, EX_DEV, HOKUYO, OPRNNI, USBCAM, DIFF_CLOSE_FOUR, ROBOT_MODEL, SLAM, Keyboard, \
JOYSTICK, SMOOTHER
import rospkg
import pickle
from PyQt4.QtCore import Qt
from lxml.etree import Element, SubElement
class SimulationWindow(QDialog, Ui_gazebo_gui):
def __init__(self, parent=None):
super(SimulationWindow, self).__init__(parent)
self.setupUi(self)
self._devs = []
self.loadButton.clicked.connect(self.loadEvent)
self.launchButton.clicked.connect(self.launchEvent)
self.devList.itemClicked.connect(self.listChangeEvent)
self.loadFile()
self.showSimDetail()
def listChangeEvent(self, item):
dev = self._devs[self.devList.row(item)]
if item.checkState() > 0:
dev[1] = True
else:
dev[1] = False
def loadFile(self):
self._devs = []
pkg = rospkg.RosPack().get_path('ric_board')
fileName = QFileDialog.getOpenFileName(self, self.tr("Open file"), "%s/DATA" % pkg, self.tr("RiC File (*.RIC)"))
if fileName == '': return
devices = pickle.load(open(fileName))[2]
self.arrangeDevices(devices)
def arrangeDevices(self, devices):
for dev in devices:
if dev['type'] in [DIFF_CLOSE, IMU, OPRNNI, HOKUYO, USBCAM, URF]:
self._devs.append([dev, True])
def showSimDetail(self):
for dev in self._devs:
if dev[0]['type'] == OPRNNI:
listItem = QListWidgetItem('OpenniCamera')
else:
listItem = QListWidgetItem(dev[0]['name'])
listItem.setCheckState(Qt.Checked)
self.devList.addItem(listItem)
def clearLst(self):
size = self.devList.count()
for i in xrange(size):
self.devList.takeItem(0)
def loadEvent(self):
self.loadFile()
self.clearLst()
self.showSimDetail()
def launchEvent(self):
root = Element('launch')
SubElement(root, 'arg', {
'name': 'paused',
'default': 'false'
})
SubElement(root, 'arg', {
'name': 'use_sim_time',
'default': 'true'
})
SubElement(root, 'arg', {
'name': 'gui',
'default': 'true'
})
SubElement(root, 'arg', {
'name': 'headless',
'default': 'false'
})
SubElement(root, 'arg', {
'name': 'debug',
'default': 'false'
})
world = SubElement(root, 'include', dict(file='$(find gazebo_ros)/launch/empty_world.launch'))
SubElement(world, 'arg', {
'name': 'debug',
'value': '$(arg debug)'
})
SubElement(world, 'arg', {
'name': 'gui',
'value': '$(arg gui)'
})
SubElement(world, 'arg', {
'name': 'paused',
'value': '$(arg paused)'
})
SubElement(world, 'arg', {
'name': 'use_sim_time',
'value': '$(arg use_sim_time)'
})
SubElement(world, 'arg', {
'name': 'headless',
'value': '$(arg headless)'
})
SubElement(root, 'param', {
'name': 'robot_description',
'command': "$(find xacro)/xacro.py '$(find ric_gazebo)/robots/komodo/komodo.xacro' ns:='init' color_name:='Grey'"
})
haveCam = 'false'
haveOpenNi = 'false'
haveLaser = 'false'
haveUrf = 'false'
haveDiff = 'false'
haveImu = 'false'
for dev in self._devs:
if dev[1]:
if dev[0]['type'] == DIFF_CLOSE: haveDiff = 'true'
if dev[0]['type'] == IMU: haveImu = 'true'
if dev[0]['type'] == OPRNNI: haveOpenNi = 'true'
if dev[0]['type'] == HOKUYO: haveLaser = 'true'
if dev[0]['type'] == USBCAM: haveCam = 'true'
if dev[0]['type'] == URF: haveUrf = 'true'
amount = self.numberOfRobotsSpinBox.value()
for i in xrange(amount):
robotFile = SubElement(root, 'include', {'file': '$(find ric_gazebo)/launch/spawn_komodo.launch'})
SubElement(robotFile, 'arg', dict(name='name', value='komodo_%d' % (i + 1)))
SubElement(robotFile, 'arg', dict(name='color', value='White'))
SubElement(robotFile, 'arg', dict(name='x', value='0.0'))
SubElement(robotFile, 'arg', dict(name='y', value='%d.0' % i))
SubElement(robotFile, 'arg', dict(name='z', value='0.1'))
SubElement(robotFile, 'arg', dict(name='R', value='0.0'))
SubElement(robotFile, 'arg', dict(name='P', value='0.0'))
SubElement(robotFile, 'arg', dict(name='Y', value='0.0'))
SubElement(robotFile, 'arg', dict(name='arm_camera', value='true'))
SubElement(robotFile, 'arg', dict(name='front_camera', value=haveCam))
SubElement(robotFile, 'arg', dict(name='isDiff', value=haveDiff))
SubElement(robotFile, 'arg', dict(name='depth_camera', value=haveOpenNi))
SubElement(robotFile, 'arg', dict(name='laser_scanner', value=haveLaser))
SubElement(robotFile, 'arg', dict(name='urf', value=haveUrf))
SubElement(robotFile, 'arg', dict(name='imu', value=haveImu))
open('/home/tom/test.launch', 'w').write(GUI.MainWindow.prettify(root))
| bsd-3-clause | -2,762,979,084,064,864,000 | 35.538462 | 126 | 0.542632 | false |
andrey-yemelyanov/competitive-programming | cp-book/ch1/adhoc/chess/10849_MoveTheBishop.py | 1 | 1456 | # Problem name: 10849 Move the bishop
# Problem url: https://uva.onlinejudge.org/external/108/10849.pdf
# Author: Andrey Yemelyanov
import sys
import math
WHITE, BLACK = 0, 1
INFINITY = -1
def main():
n_test_cases = int(sys.stdin.readline().strip())
for i in range(n_test_cases):
sys.stdin.readline()
n_tests = int(sys.stdin.readline().strip())
N = int(sys.stdin.readline().strip())
for j in range(n_tests):
from_row, from_col, to_row, to_col = [int(t) for t in sys.stdin.readline().split()]
n_moves = count_bishop_moves(from_row, from_col, to_row, to_col)
if n_moves == INFINITY:
print("no move")
else:
print(n_moves)
def count_bishop_moves(from_row, from_col, to_row, to_col):
if from_row == to_row and from_col == to_col:
return 0;
elif square_color(from_row, from_col) != square_color(to_row, to_col):
return INFINITY
elif on_the_same_diagonal(from_row, from_col, to_row, to_col):
return 1
else:
return 2
def on_the_same_diagonal(row1, col1, row2, col2):
return abs(row1 - row2) == abs(col1 - col2)
def square_color(row, col):
if row % 2 == 0:
if col % 2 == 0:
return WHITE
else:
return BLACK
else:
if col % 2 == 0:
return BLACK
else:
return WHITE
if __name__=="__main__":
main()
| mit | -4,838,296,833,532,812,000 | 27 | 95 | 0.56044 | false |
zfrxiaxia/Code-zfr | 计蒜客/比赛/2015/light.py | 1 | 1148 | # -*- coding: utf-8 -*-
"""
Created on Sat May 28 19:17:42 2016
@author: AtoZ
"""
N,M = raw_input().split()
N,M = int(N),int(M)
while(1):
i0 = 0
while(i0<2**(M*N)):
bit = [0]*M*N
t_bit = bin(i0)
lt_bit = len(t_bit)
for j0 in range(lt_bit-2):
bit[j0] = int(t_bit[j0+2])
i0 +=1
print bit
"""
N,M = raw_input().split()
N,M = int(N),int(M)
L0 = []
L = []
time = 0
def change(h,l):
global time
time += 1
if h == N-1:
pass
else:
L[h*M+l] = not L[h*M+l]
L[(h+1)*M+l] = not L[(h+1)*M+l]
if l != 0:
L[(h+1)*M+l-1] = not L[(h+1)*M+l-1]
if l != M-1:
L[(h+1)*M+l+1] = not L[(h+1)*M+l+1]
if h != N-2:
L[(h+2)*M+l] = not L[(h+2)*M+l]
while(1):
try:
temp = raw_input()
L0.append(temp.split())
except:
break
for i in range(N):
for j in range(M):
L.append(int(L0[i][j]))
LL = [1]*N*M
j = 0
while(j<N):
for i in range(M):
ii = i+j*M
if L[ii]==0:
change(j,i)
j += 1
if L==LL:
print time
else:
print "no solution"
""" | gpl-3.0 | 7,432,343,685,599,265,000 | 16.953125 | 47 | 0.419861 | false |
linearb/mojo | rasppi/rasp.py | 1 | 3195 | #!/usr/bin/python
"""Sound an alarm if a raspberry pi hasn't been heard from lately
To set an alarm for pi named 'pi', create a file in mmdata/pulse.d named pi.alarm
"""
import os.path
import time
pulse="/home/mojotronadmin/mmdata/pulse.d/"
logfile="/home/mojotronadmin/mmdata/incoming.log"
maxinterval = 15*60 # how many seconds without contact before sounding first alarm
alarm_once = False # if True then only sound alarm once, then disable it
snooze = True # if True then delay before re-sounding alarm
snoozedelay = 120*60 # in seconds
should_sendsms = True # send an sms on alarm
alarm_smsnumber = "NEEDED"
should_sendemail = False # send an email on alarm
alarm_emailaddress = "[email protected]"
from twilio.rest import TwilioRestClient
def sendsms(tonumber, message):
account_sid = "NEEDED"
auth_token = "NEEDED"
client = TwilioRestClient(account_sid, auth_token)
twilio_number = "NEEDED"
reply = client.messages.create(to=tonumber, from_=twilio_number, body=message)
import commands
def sendemail(toaddress, message):
cmd = "echo '' | mail -s '{}' {}".format(message, toaddress)
(status, output) = commands.getstatusoutput(cmd)
# should catch error if status is not 0
def alarm(pi_name):
message = pi_name + " is down."
if should_sendsms:
sendsms(alarm_smsnumber, message)
if should_sendemail:
sendemail(alarm_emailaddress, message)
# If alarm file '[piname].alarm' does not exist, the alarm for that pi is disabled.
# If that file is empty, the alarm goes off if maxdelay seconds have passed since last heard from pi.
# If it contains an integer the snooze is enabled. That sets the alarm to go off if maxdelay seconds
# have passed since last alarm. If the alarm file contains anything else, the alarm is disabled.
def main():
alarmfilelist = [x for x in os.listdir(pulse) if x.endswith(".alarm")]
for filename in alarmfilelist:
# get information about last time this pi contacted us
last_timestamp = "0"
pi_filename = filename[:-6]
if os.path.exists(pulse + pi_filename):
with open(pulse + pi_filename, 'r') as f:
last_timestamp = f.readline().rstrip()
# if there is an alarm file, sound alarm if haven't heard from pi recently
with open(pulse + filename, 'r+') as f:
timestamp = f.readline().rstrip()
if timestamp == "":
timestamp = last_timestamp
if timestamp.isdigit():
now = time.time()
if now - int(timestamp) > maxinterval:
alarm(pi_filename)
if alarm_once:
# only send alarm once, so disable alarm now
f.seek(0)
f.write("disabled\n")
f.truncate()
elif snooze:
# reset alarm time to snoozedelay seconds in future
f.seek(0)
f.write(str(int(now + snoozedelay)) + "\n")
f.truncate()
if __name__ == "__main__":
main()
| mit | -2,364,741,625,820,722,000 | 37.035714 | 101 | 0.610329 | false |
garrettr/securedrop | securedrop/journalist_app/col.py | 1 | 3264 | # -*- coding: utf-8 -*-
from flask import (Blueprint, redirect, url_for, render_template, flash,
request, abort, send_file, current_app)
from flask_babel import gettext
from sqlalchemy.orm.exc import NoResultFound
import crypto_util
import store
from db import db_session, Submission
from journalist_app.forms import ReplyForm
from journalist_app.utils import (make_star_true, make_star_false, get_source,
delete_collection, col_download_unread,
col_download_all, col_star, col_un_star,
col_delete)
def make_blueprint(config):
view = Blueprint('col', __name__)
@view.route('/add_star/<filesystem_id>', methods=('POST',))
def add_star(filesystem_id):
make_star_true(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route("/remove_star/<filesystem_id>", methods=('POST',))
def remove_star(filesystem_id):
make_star_false(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route('/<filesystem_id>')
def col(filesystem_id):
form = ReplyForm()
source = get_source(filesystem_id)
source.has_key = crypto_util.getkey(filesystem_id)
return render_template("col.html", filesystem_id=filesystem_id,
source=source, form=form)
@view.route('/delete/<filesystem_id>', methods=('POST',))
def delete_single(filesystem_id):
"""deleting a single collection from its /col page"""
source = get_source(filesystem_id)
delete_collection(filesystem_id)
flash(gettext("{source_name}'s collection deleted")
.format(source_name=source.journalist_designation),
"notification")
return redirect(url_for('main.index'))
@view.route('/process', methods=('POST',))
def process():
actions = {'download-unread': col_download_unread,
'download-all': col_download_all, 'star': col_star,
'un-star': col_un_star, 'delete': col_delete}
if 'cols_selected' not in request.form:
flash(gettext('No collections selected.'), 'error')
return redirect(url_for('main.index'))
# getlist is cgi.FieldStorage.getlist
cols_selected = request.form.getlist('cols_selected')
action = request.form['action']
if action not in actions:
return abort(500)
method = actions[action]
return method(cols_selected)
@view.route('/<filesystem_id>/<fn>')
def download_single_submission(filesystem_id, fn):
"""Sends a client the contents of a single submission."""
if '..' in fn or fn.startswith('/'):
abort(404)
try:
Submission.query.filter(
Submission.filename == fn).one().downloaded = True
db_session.commit()
except NoResultFound as e:
current_app.logger.error(
"Could not mark " + fn + " as downloaded: %s" % (e,))
return send_file(store.path(filesystem_id, fn),
mimetype="application/pgp-encrypted")
return view
| agpl-3.0 | -8,452,638,703,044,574,000 | 36.090909 | 78 | 0.594975 | false |
Haizs/NEU-mathe | FILEtoCSV.py | 1 | 1799 | import os.path
import csv
import re
headers = ['Id', 'KnowledgeId', 'Type', 'Src', 'Name', 'Count']
def doSubject(subject):
idCount = 0
rows = []
for root, dirs, files in os.walk('ChoiceSource/' + subject):
for name in files:
if (name != '.DS_Store'):
idCount += 1
qType = 1 if ('Easy' in root) else 2 if ('Averge' in root) else 3
rows.append([idCount, int(re.findall(r'(?<=/)\d+', root)[0]), qType, re.findall(r'/.*', root)[0] + '/',
os.path.splitext(name)[0],6])
with open(subject + '.csv', 'w') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
if __name__ == '__main__':
doSubject('高等数学_GS')
doSubject('复变函数_FB')
doSubject('概率统计_GL')
rows = []
idCount = 0
for root, dirs, files in os.walk('ChoiceSource/线性代数_XD'):
for name in files:
if (name != '.DS_Store'):
idCount += 1
if ('Easy' in root):
rows.append(
[idCount, int(re.findall(r'(?<=chapter)\d', root)[0]), 1, re.findall(r'/.*', root)[0] + '/',
os.path.splitext(name)[0], 6])
elif ('Hard' in root):
rows.append(
[idCount, int(re.findall(r'(?<=chapter)\d', root)[0]), 3, re.findall(r'/.*', root)[0] + '/',
os.path.splitext(name)[0], 6])
else:
rows.append([idCount, 8, 2, re.findall(r'/.*', root)[0] + '/', os.path.splitext(name)[0], 5])
with open('线性代数_XD.csv', 'w') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
| gpl-3.0 | -2,962,789,084,758,935,600 | 35.645833 | 119 | 0.466742 | false |
obulpathi/poppy | poppy/manager/base/driver.py | 1 | 2204 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class ManagerDriverBase(object):
"""Base class for driver manager."""
def __init__(self, conf, storage, providers, dns, distributed_task,
notification):
self._conf = conf
self._storage = storage
self._providers = providers
self._dns = dns
self._distributed_task = distributed_task
self._notification = notification
@property
def conf(self):
"""conf
:returns conf
"""
return self._conf
@property
def storage(self):
"""storage
:returns storage
"""
return self._storage
@property
def providers(self):
"""providers
:returns providers
"""
return self._providers
@property
def dns(self):
return self._dns
@property
def distributed_task(self):
return self._distributed_task
@property
def notification(self):
return self._notification
@abc.abstractproperty
def services_controller(self):
"""Returns the driver's services controller
:raises NotImplementedError
"""
raise NotImplementedError
@abc.abstractproperty
def flavors_controller(self):
"""Returns the driver's flavors controller
:raises NotImplementedError
"""
raise NotImplementedError
@abc.abstractproperty
def health_controller(self):
"""Returns the driver's health controller
:raises NotImplementedError
"""
raise NotImplementedError
| apache-2.0 | 6,152,679,558,350,396,000 | 23.21978 | 71 | 0.640653 | false |
cheery/essence | essence3/renderer/patch9.py | 1 | 2518 | import pygame
from texture import Texture
def borders(surface):
width, height = surface.get_size()
y0 = 0
y1 = 0
x0 = 0
x1 = 0
i = 0
while i < height:
r,g,b,a = surface.get_at((0,i))
if a > 0:
y0 = i
break
i += 1
while i < height:
r,g,b,a = surface.get_at((0,i))
if a == 0:
y1 = i
break
i += 1
i = 0
while i < width:
r,g,b,a = surface.get_at((i,0))
if a > 0:
x0 = i
break
i += 1
while i < width:
r,g,b,a = surface.get_at((i,0))
if a == 0:
x1 = i
break
i += 1
return [1, x0, x1, width], [1, y0, y1, height]
class Patch9(object):
def __init__(self, texture, (xc, yc)):
self.texture = texture
self.coords = xc, yc
self.width = texture.width - 1
self.height = texture.height - 1
self.padding = xc[1]-xc[0], yc[1]-yc[0], xc[3]-xc[2], yc[3]-yc[2]
@classmethod
def load(cls, atlas, path):
surface = pygame.image.load(path)
width, height = surface.get_size()
data = pygame.image.tostring(surface, "RGBA", 0)
texture = atlas.add_rgba_string(width, height, data)
coords = borders(surface)
return cls(texture, coords)
def __call__(self, emit, (left, top, width, height), color=None):
texture = self.texture
color = color or texture.atlas.white
# c_x = float(color.x+2) / color.atlas.width
# c_y = float(color.y+2) / color.atlas.height
s0 = float(texture.x) / texture.atlas.width
t0 = float(texture.y) / texture.atlas.height
s1 = float(texture.width) / texture.atlas.width
t1 = float(texture.height) / texture.atlas.height
sn = s1 / texture.width
tn = t1 / texture.height
x_cs, y_cs = self.coords
xs = (left, left+self.padding[0], left+width-self.padding[2], left+width)
ys = (top, top +self.padding[1], top+height-self.padding[3], top+height)
for i in range(9):
x = i % 3
y = i / 3
emit(xs[x+0], ys[y+0], x_cs[x+0]*sn + s0, y_cs[y+0]*tn + t0, color.s, color.t)
emit(xs[x+1], ys[y+0], x_cs[x+1]*sn + s0, y_cs[y+0]*tn + t0, color.s, color.t)
emit(xs[x+1], ys[y+1], x_cs[x+1]*sn + s0, y_cs[y+1]*tn + t0, color.s, color.t)
emit(xs[x+0], ys[y+1], x_cs[x+0]*sn + s0, y_cs[y+1]*tn + t0, color.s, color.t)
| gpl-3.0 | -2,208,417,184,195,690,200 | 32.573333 | 90 | 0.503971 | false |
amitay/samba | source4/scripting/python/samba/samdb.py | 1 | 31787 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2007-2010
# Copyright (C) Matthias Dieter Wallnoefer 2009
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <[email protected]> 2005
# Copyright (C) Giampaolo Lauria <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Convenience functions for using the SAM."""
import samba
import ldb
import time
import base64
import os
from samba import dsdb
from samba.ndr import ndr_unpack, ndr_pack
from samba.dcerpc import drsblobs, misc
from samba.common import normalise_int32
__docformat__ = "restructuredText"
class SamDB(samba.Ldb):
"""The SAM database."""
hash_oid_name = {}
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None, global_schema=True,
auto_connect=True, am_rodc=None):
self.lp = lp
if not auto_connect:
url = None
elif url is None and lp is not None:
url = lp.samdb_url()
self.url = url
super(SamDB, self).__init__(url=url, lp=lp, modules_dir=modules_dir,
session_info=session_info, credentials=credentials, flags=flags,
options=options)
if global_schema:
dsdb._dsdb_set_global_schema(self)
if am_rodc is not None:
dsdb._dsdb_set_am_rodc(self, am_rodc)
def connect(self, url=None, flags=0, options=None):
'''connect to the database'''
if self.lp is not None and not os.path.exists(url):
url = self.lp.private_path(url)
self.url = url
super(SamDB, self).connect(url=url, flags=flags,
options=options)
def am_rodc(self):
'''return True if we are an RODC'''
return dsdb._am_rodc(self)
def am_pdc(self):
'''return True if we are an PDC emulator'''
return dsdb._am_pdc(self)
def domain_dn(self):
'''return the domain DN'''
return str(self.get_default_basedn())
def disable_account(self, search_filter):
"""Disables an account
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
flags = samba.dsdb.UF_ACCOUNTDISABLE
self.toggle_userAccountFlags(search_filter, flags, on=True)
def enable_account(self, search_filter):
"""Enables an account
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
flags = samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_PASSWD_NOTREQD
self.toggle_userAccountFlags(search_filter, flags, on=False)
def toggle_userAccountFlags(self, search_filter, flags, flags_str=None,
on=True, strict=False):
"""Toggle_userAccountFlags
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
:param flags: samba.dsdb.UF_* flags
:param on: on=True (default) => set, on=False => unset
:param strict: strict=False (default) ignore if no action is needed
strict=True raises an Exception if...
"""
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=["userAccountControl"])
if len(res) == 0:
raise Exception("Unable to find account where '%s'" % search_filter)
assert(len(res) == 1)
account_dn = res[0].dn
old_uac = int(res[0]["userAccountControl"][0])
if on:
if strict and (old_uac & flags):
error = "Account flag(s) '%s' already set" % flags_str
raise Exception(error)
new_uac = old_uac | flags
else:
if strict and not (old_uac & flags):
error = "Account flag(s) '%s' already unset" % flags_str
raise Exception(error)
new_uac = old_uac & ~flags
if old_uac == new_uac:
return
mod = """
dn: %s
changetype: modify
delete: userAccountControl
userAccountControl: %u
add: userAccountControl
userAccountControl: %u
""" % (account_dn, old_uac, new_uac)
self.modify_ldif(mod)
def force_password_change_at_next_login(self, search_filter):
"""Forces a password change at next login
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
"""
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=[])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % search_filter)
assert(len(res) == 1)
user_dn = res[0].dn
mod = """
dn: %s
changetype: modify
replace: pwdLastSet
pwdLastSet: 0
""" % (user_dn)
self.modify_ldif(mod)
def newgroup(self, groupname, groupou=None, grouptype=None,
description=None, mailaddress=None, notes=None, sd=None):
"""Adds a new group with additional parameters
:param groupname: Name of the new group
:param grouptype: Type of the new group
:param description: Description of the new group
:param mailaddress: Email address of the new group
:param notes: Notes of the new group
:param sd: security descriptor of the object
"""
group_dn = "CN=%s,%s,%s" % (groupname, (groupou or "CN=Users"), self.domain_dn())
# The new user record. Note the reliance on the SAMLDB module which
# fills in the default informations
ldbmessage = {"dn": group_dn,
"sAMAccountName": groupname,
"objectClass": "group"}
if grouptype is not None:
ldbmessage["groupType"] = normalise_int32(grouptype)
if description is not None:
ldbmessage["description"] = description
if mailaddress is not None:
ldbmessage["mail"] = mailaddress
if notes is not None:
ldbmessage["info"] = notes
if sd is not None:
ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
self.add(ldbmessage)
def deletegroup(self, groupname):
"""Deletes a group
:param groupname: Name of the target group
"""
groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (ldb.binary_encode(groupname), "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
self.transaction_start()
try:
targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=groupfilter, attrs=[])
if len(targetgroup) == 0:
raise Exception('Unable to find group "%s"' % groupname)
assert(len(targetgroup) == 1)
self.delete(targetgroup[0].dn)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def add_remove_group_members(self, groupname, listofmembers,
add_members_operation=True):
"""Adds or removes group members
:param groupname: Name of the target group
:param listofmembers: Comma-separated list of group members
:param add_members_operation: Defines if its an add or remove
operation
"""
groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (
ldb.binary_encode(groupname), "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
groupmembers = listofmembers.split(',')
self.transaction_start()
try:
targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=groupfilter, attrs=['member'])
if len(targetgroup) == 0:
raise Exception('Unable to find group "%s"' % groupname)
assert(len(targetgroup) == 1)
modified = False
addtargettogroup = """
dn: %s
changetype: modify
""" % (str(targetgroup[0].dn))
for member in groupmembers:
targetmember = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression="(|(sAMAccountName=%s)(CN=%s))" % (
ldb.binary_encode(member), ldb.binary_encode(member)), attrs=[])
if len(targetmember) != 1:
continue
if add_members_operation is True and (targetgroup[0].get('member') is None or str(targetmember[0].dn) not in targetgroup[0]['member']):
modified = True
addtargettogroup += """add: member
member: %s
""" % (str(targetmember[0].dn))
elif add_members_operation is False and (targetgroup[0].get('member') is not None and str(targetmember[0].dn) in targetgroup[0]['member']):
modified = True
addtargettogroup += """delete: member
member: %s
""" % (str(targetmember[0].dn))
if modified is True:
self.modify_ldif(addtargettogroup)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def newuser(self, username, password,
force_password_change_at_next_login_req=False,
useusernameascn=False, userou=None, surname=None, givenname=None,
initials=None, profilepath=None, scriptpath=None, homedrive=None,
homedirectory=None, jobtitle=None, department=None, company=None,
description=None, mailaddress=None, internetaddress=None,
telephonenumber=None, physicaldeliveryoffice=None, sd=None,
setpassword=True):
"""Adds a new user with additional parameters
:param username: Name of the new user
:param password: Password for the new user
:param force_password_change_at_next_login_req: Force password change
:param useusernameascn: Use username as cn rather that firstname +
initials + lastname
:param userou: Object container (without domainDN postfix) for new user
:param surname: Surname of the new user
:param givenname: First name of the new user
:param initials: Initials of the new user
:param profilepath: Profile path of the new user
:param scriptpath: Logon script path of the new user
:param homedrive: Home drive of the new user
:param homedirectory: Home directory of the new user
:param jobtitle: Job title of the new user
:param department: Department of the new user
:param company: Company of the new user
:param description: of the new user
:param mailaddress: Email address of the new user
:param internetaddress: Home page of the new user
:param telephonenumber: Phone number of the new user
:param physicaldeliveryoffice: Office location of the new user
:param sd: security descriptor of the object
:param setpassword: optionally disable password reset
"""
displayname = ""
if givenname is not None:
displayname += givenname
if initials is not None:
displayname += ' %s.' % initials
if surname is not None:
displayname += ' %s' % surname
cn = username
if useusernameascn is None and displayname is not "":
cn = displayname
user_dn = "CN=%s,%s,%s" % (cn, (userou or "CN=Users"), self.domain_dn())
dnsdomain = ldb.Dn(self, self.domain_dn()).canonical_str().replace("/", "")
user_principal_name = "%s@%s" % (username, dnsdomain)
# The new user record. Note the reliance on the SAMLDB module which
# fills in the default informations
ldbmessage = {"dn": user_dn,
"sAMAccountName": username,
"userPrincipalName": user_principal_name,
"objectClass": "user"}
if surname is not None:
ldbmessage["sn"] = surname
if givenname is not None:
ldbmessage["givenName"] = givenname
if displayname is not "":
ldbmessage["displayName"] = displayname
ldbmessage["name"] = displayname
if initials is not None:
ldbmessage["initials"] = '%s.' % initials
if profilepath is not None:
ldbmessage["profilePath"] = profilepath
if scriptpath is not None:
ldbmessage["scriptPath"] = scriptpath
if homedrive is not None:
ldbmessage["homeDrive"] = homedrive
if homedirectory is not None:
ldbmessage["homeDirectory"] = homedirectory
if jobtitle is not None:
ldbmessage["title"] = jobtitle
if department is not None:
ldbmessage["department"] = department
if company is not None:
ldbmessage["company"] = company
if description is not None:
ldbmessage["description"] = description
if mailaddress is not None:
ldbmessage["mail"] = mailaddress
if internetaddress is not None:
ldbmessage["wWWHomePage"] = internetaddress
if telephonenumber is not None:
ldbmessage["telephoneNumber"] = telephonenumber
if physicaldeliveryoffice is not None:
ldbmessage["physicalDeliveryOfficeName"] = physicaldeliveryoffice
if sd is not None:
ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
self.transaction_start()
try:
self.add(ldbmessage)
# Sets the password for it
if setpassword:
self.setpassword("(samAccountName=%s)" % ldb.binary_encode(username), password,
force_password_change_at_next_login_req)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def deleteuser(self, username):
"""Deletes a user
:param username: Name of the target user
"""
filter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (ldb.binary_encode(username), "CN=Person,CN=Schema,CN=Configuration", self.domain_dn())
self.transaction_start()
try:
target = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=filter, attrs=[])
if len(target) == 0:
raise Exception('Unable to find user "%s"' % username)
assert(len(target) == 1)
self.delete(target[0].dn)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def setpassword(self, search_filter, password,
force_change_at_next_login=False, username=None):
"""Sets the password for a user
:param search_filter: LDAP filter to find the user (eg
samccountname=name)
:param password: Password for the user
:param force_change_at_next_login: Force password change
"""
self.transaction_start()
try:
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter, attrs=[])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % (username or search_filter))
if len(res) > 1:
raise Exception('Matched %u multiple users with filter "%s"' % (len(res), search_filter))
user_dn = res[0].dn
setpw = """
dn: %s
changetype: modify
replace: unicodePwd
unicodePwd:: %s
""" % (user_dn, base64.b64encode(("\"" + password + "\"").encode('utf-16-le')))
self.modify_ldif(setpw)
if force_change_at_next_login:
self.force_password_change_at_next_login(
"(distinguishedName=" + str(user_dn) + ")")
# modify the userAccountControl to remove the disabled bit
self.enable_account(search_filter)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def setexpiry(self, search_filter, expiry_seconds, no_expiry_req=False):
"""Sets the account expiry for a user
:param search_filter: LDAP filter to find the user (eg
samaccountname=name)
:param expiry_seconds: expiry time from now in seconds
:param no_expiry_req: if set, then don't expire password
"""
self.transaction_start()
try:
res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_filter,
attrs=["userAccountControl", "accountExpires"])
if len(res) == 0:
raise Exception('Unable to find user "%s"' % search_filter)
assert(len(res) == 1)
user_dn = res[0].dn
userAccountControl = int(res[0]["userAccountControl"][0])
accountExpires = int(res[0]["accountExpires"][0])
if no_expiry_req:
userAccountControl = userAccountControl | 0x10000
accountExpires = 0
else:
userAccountControl = userAccountControl & ~0x10000
accountExpires = samba.unix2nttime(expiry_seconds + int(time.time()))
setexp = """
dn: %s
changetype: modify
replace: userAccountControl
userAccountControl: %u
replace: accountExpires
accountExpires: %u
""" % (user_dn, userAccountControl, accountExpires)
self.modify_ldif(setexp)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
def set_domain_sid(self, sid):
"""Change the domain SID used by this LDB.
:param sid: The new domain sid to use.
"""
dsdb._samdb_set_domain_sid(self, sid)
def get_domain_sid(self):
"""Read the domain SID used by this LDB. """
return dsdb._samdb_get_domain_sid(self)
domain_sid = property(get_domain_sid, set_domain_sid,
"SID for the domain")
def set_invocation_id(self, invocation_id):
"""Set the invocation id for this SamDB handle.
:param invocation_id: GUID of the invocation id.
"""
dsdb._dsdb_set_ntds_invocation_id(self, invocation_id)
def get_invocation_id(self):
"""Get the invocation_id id"""
return dsdb._samdb_ntds_invocation_id(self)
invocation_id = property(get_invocation_id, set_invocation_id,
"Invocation ID GUID")
def get_oid_from_attid(self, attid):
return dsdb._dsdb_get_oid_from_attid(self, attid)
def get_attid_from_lDAPDisplayName(self, ldap_display_name,
is_schema_nc=False):
'''return the attribute ID for a LDAP attribute as an integer as found in DRSUAPI'''
return dsdb._dsdb_get_attid_from_lDAPDisplayName(self,
ldap_display_name, is_schema_nc)
def get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name):
'''return the syntax OID for a LDAP attribute as a string'''
return dsdb._dsdb_get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name)
def get_systemFlags_from_lDAPDisplayName(self, ldap_display_name):
'''return the systemFlags for a LDAP attribute as a integer'''
return dsdb._dsdb_get_systemFlags_from_lDAPDisplayName(self, ldap_display_name)
def get_linkId_from_lDAPDisplayName(self, ldap_display_name):
'''return the linkID for a LDAP attribute as a integer'''
return dsdb._dsdb_get_linkId_from_lDAPDisplayName(self, ldap_display_name)
def get_lDAPDisplayName_by_attid(self, attid):
'''return the lDAPDisplayName from an integer DRS attribute ID'''
return dsdb._dsdb_get_lDAPDisplayName_by_attid(self, attid)
def get_backlink_from_lDAPDisplayName(self, ldap_display_name):
'''return the attribute name of the corresponding backlink from the name
of a forward link attribute. If there is no backlink return None'''
return dsdb._dsdb_get_backlink_from_lDAPDisplayName(self, ldap_display_name)
def set_ntds_settings_dn(self, ntds_settings_dn):
"""Set the NTDS Settings DN, as would be returned on the dsServiceName
rootDSE attribute.
This allows the DN to be set before the database fully exists
:param ntds_settings_dn: The new DN to use
"""
dsdb._samdb_set_ntds_settings_dn(self, ntds_settings_dn)
def get_ntds_GUID(self):
"""Get the NTDS objectGUID"""
return dsdb._samdb_ntds_objectGUID(self)
def server_site_name(self):
"""Get the server site name"""
return dsdb._samdb_server_site_name(self)
def host_dns_name(self):
"""return the DNS name of this host"""
res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])
return res[0]['dNSHostName'][0]
def domain_dns_name(self):
"""return the DNS name of the domain root"""
domain_dn = self.get_default_basedn()
return domain_dn.canonical_str().split('/')[0]
def forest_dns_name(self):
"""return the DNS name of the forest root"""
forest_dn = self.get_root_basedn()
return forest_dn.canonical_str().split('/')[0]
def load_partition_usn(self, base_dn):
return dsdb._dsdb_load_partition_usn(self, base_dn)
def set_schema(self, schema):
self.set_schema_from_ldb(schema.ldb)
def set_schema_from_ldb(self, ldb_conn):
dsdb._dsdb_set_schema_from_ldb(self, ldb_conn)
def dsdb_DsReplicaAttribute(self, ldb, ldap_display_name, ldif_elements):
'''convert a list of attribute values to a DRSUAPI DsReplicaAttribute'''
return dsdb._dsdb_DsReplicaAttribute(ldb, ldap_display_name, ldif_elements)
def dsdb_normalise_attributes(self, ldb, ldap_display_name, ldif_elements):
'''normalise a list of attribute values'''
return dsdb._dsdb_normalise_attributes(ldb, ldap_display_name, ldif_elements)
def get_attribute_from_attid(self, attid):
""" Get from an attid the associated attribute
:param attid: The attribute id for searched attribute
:return: The name of the attribute associated with this id
"""
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
if self.hash_oid_name.has_key(self.get_oid_from_attid(attid)):
return self.hash_oid_name[self.get_oid_from_attid(attid)]
else:
return None
def _populate_oid_attid(self):
"""Populate the hash hash_oid_name.
This hash contains the oid of the attribute as a key and
its display name as a value
"""
self.hash_oid_name = {}
res = self.search(expression="objectClass=attributeSchema",
controls=["search_options:1:2"],
attrs=["attributeID",
"lDAPDisplayName"])
if len(res) > 0:
for e in res:
strDisplay = str(e.get("lDAPDisplayName"))
self.hash_oid_name[str(e.get("attributeID"))] = strDisplay
def get_attribute_replmetadata_version(self, dn, att):
"""Get the version field trom the replPropertyMetaData for
the given field
:param dn: The on which we want to get the version
:param att: The name of the attribute
:return: The value of the version field in the replPropertyMetaData
for the given attribute. None if the attribute is not replicated
"""
res = self.search(expression="distinguishedName=%s" % dn,
scope=ldb.SCOPE_SUBTREE,
controls=["search_options:1:2"],
attrs=["replPropertyMetaData"])
if len(res) == 0:
return None
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(res[0]["replPropertyMetaData"]))
ctr = repl.ctr
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
for o in ctr.array:
# Search for Description
att_oid = self.get_oid_from_attid(o.attid)
if self.hash_oid_name.has_key(att_oid) and\
att.lower() == self.hash_oid_name[att_oid].lower():
return o.version
return None
def set_attribute_replmetadata_version(self, dn, att, value,
addifnotexist=False):
res = self.search(expression="distinguishedName=%s" % dn,
scope=ldb.SCOPE_SUBTREE,
controls=["search_options:1:2"],
attrs=["replPropertyMetaData"])
if len(res) == 0:
return None
repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
str(res[0]["replPropertyMetaData"]))
ctr = repl.ctr
now = samba.unix2nttime(int(time.time()))
found = False
if len(self.hash_oid_name.keys()) == 0:
self._populate_oid_attid()
for o in ctr.array:
# Search for Description
att_oid = self.get_oid_from_attid(o.attid)
if self.hash_oid_name.has_key(att_oid) and\
att.lower() == self.hash_oid_name[att_oid].lower():
found = True
seq = self.sequence_number(ldb.SEQ_NEXT)
o.version = value
o.originating_change_time = now
o.originating_invocation_id = misc.GUID(self.get_invocation_id())
o.originating_usn = seq
o.local_usn = seq
if not found and addifnotexist and len(ctr.array) >0:
o2 = drsblobs.replPropertyMetaData1()
o2.attid = 589914
att_oid = self.get_oid_from_attid(o2.attid)
seq = self.sequence_number(ldb.SEQ_NEXT)
o2.version = value
o2.originating_change_time = now
o2.originating_invocation_id = misc.GUID(self.get_invocation_id())
o2.originating_usn = seq
o2.local_usn = seq
found = True
tab = ctr.array
tab.append(o2)
ctr.count = ctr.count + 1
ctr.array = tab
if found :
replBlob = ndr_pack(repl)
msg = ldb.Message()
msg.dn = res[0].dn
msg["replPropertyMetaData"] = ldb.MessageElement(replBlob,
ldb.FLAG_MOD_REPLACE,
"replPropertyMetaData")
self.modify(msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
def write_prefixes_from_schema(self):
dsdb._dsdb_write_prefixes_from_schema_to_ldb(self)
def get_partitions_dn(self):
return dsdb._dsdb_get_partitions_dn(self)
def set_minPwdAge(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["minPwdAge"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdAge")
self.modify(m)
def get_minPwdAge(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdAge"])
if len(res) == 0:
return None
elif not "minPwdAge" in res[0]:
return None
else:
return res[0]["minPwdAge"][0]
def set_minPwdLength(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["minPwdLength"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdLength")
self.modify(m)
def get_minPwdLength(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdLength"])
if len(res) == 0:
return None
elif not "minPwdLength" in res[0]:
return None
else:
return res[0]["minPwdLength"][0]
def set_pwdProperties(self, value):
m = ldb.Message()
m.dn = ldb.Dn(self, self.domain_dn())
m["pwdProperties"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "pwdProperties")
self.modify(m)
def get_pwdProperties(self):
res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["pwdProperties"])
if len(res) == 0:
return None
elif not "pwdProperties" in res[0]:
return None
else:
return res[0]["pwdProperties"][0]
def set_dsheuristics(self, dsheuristics):
m = ldb.Message()
m.dn = ldb.Dn(self, "CN=Directory Service,CN=Windows NT,CN=Services,%s"
% self.get_config_basedn().get_linearized())
if dsheuristics is not None:
m["dSHeuristics"] = ldb.MessageElement(dsheuristics,
ldb.FLAG_MOD_REPLACE, "dSHeuristics")
else:
m["dSHeuristics"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE,
"dSHeuristics")
self.modify(m)
def get_dsheuristics(self):
res = self.search("CN=Directory Service,CN=Windows NT,CN=Services,%s"
% self.get_config_basedn().get_linearized(),
scope=ldb.SCOPE_BASE, attrs=["dSHeuristics"])
if len(res) == 0:
dsheuristics = None
elif "dSHeuristics" in res[0]:
dsheuristics = res[0]["dSHeuristics"][0]
else:
dsheuristics = None
return dsheuristics
def create_ou(self, ou_dn, description=None, name=None, sd=None):
"""Creates an organizationalUnit object
:param ou_dn: dn of the new object
:param description: description attribute
:param name: name atttribute
:param sd: security descriptor of the object, can be
an SDDL string or security.descriptor type
"""
m = {"dn": ou_dn,
"objectClass": "organizationalUnit"}
if description:
m["description"] = description
if name:
m["name"] = name
if sd:
m["nTSecurityDescriptor"] = ndr_pack(sd)
self.add(m)
def sequence_number(self, seq_type):
"""Returns the value of the sequence number according to the requested type
:param seq_type: type of sequence number
"""
self.transaction_start()
try:
seq = super(SamDB, self).sequence_number(seq_type)
except:
self.transaction_cancel()
raise
else:
self.transaction_commit()
return seq
def get_dsServiceName(self):
'''get the NTDS DN from the rootDSE'''
res = self.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
return res[0]["dsServiceName"][0]
def get_serverName(self):
'''get the server DN from the rootDSE'''
res = self.search(base="", scope=ldb.SCOPE_BASE, attrs=["serverName"])
return res[0]["serverName"][0]
| gpl-3.0 | -3,569,424,283,100,842,500 | 36.091015 | 158 | 0.588542 | false |
eayunstack/python-neutronclient | neutronclient/shell.py | 1 | 41584 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Command-line interface to the Neutron APIs
"""
from __future__ import print_function
import argparse
import inspect
import itertools
import logging
import os
import sys
from keystoneauth1 import session
import os_client_config
from oslo_utils import encodeutils
from cliff import app
from cliff import command
from cliff import commandmanager
from neutronclient._i18n import _
from neutronclient.common import clientmanager
from neutronclient.common import exceptions as exc
from neutronclient.common import extension as client_extension
from neutronclient.common import utils
from neutronclient.neutron.v2_0 import address_scope
from neutronclient.neutron.v2_0 import agent
from neutronclient.neutron.v2_0 import agentscheduler
from neutronclient.neutron.v2_0 import auto_allocated_topology
from neutronclient.neutron.v2_0 import availability_zone
from neutronclient.neutron.v2_0.bgp import dragentscheduler as bgp_drsched
from neutronclient.neutron.v2_0.bgp import peer as bgp_peer
from neutronclient.neutron.v2_0.bgp import speaker as bgp_speaker
from neutronclient.neutron.v2_0 import extension
from neutronclient.neutron.v2_0.flavor import flavor
from neutronclient.neutron.v2_0.flavor import flavor_profile
from neutronclient.neutron.v2_0 import floatingip
from neutronclient.neutron.v2_0.fw import firewall
from neutronclient.neutron.v2_0.fw import firewallpolicy
from neutronclient.neutron.v2_0.fw import firewallrule
from neutronclient.neutron.v2_0.lb import healthmonitor as lb_healthmonitor
from neutronclient.neutron.v2_0.lb import member as lb_member
from neutronclient.neutron.v2_0.lb import pool as lb_pool
from neutronclient.neutron.v2_0.lb.v2 import healthmonitor as lbaas_healthmon
from neutronclient.neutron.v2_0.lb.v2 import l7policy as lbaas_l7policy
from neutronclient.neutron.v2_0.lb.v2 import l7rule as lbaas_l7rule
from neutronclient.neutron.v2_0.lb.v2 import listener as lbaas_listener
from neutronclient.neutron.v2_0.lb.v2 import loadbalancer as lbaas_loadbalancer
from neutronclient.neutron.v2_0.lb.v2 import member as lbaas_member
from neutronclient.neutron.v2_0.lb.v2 import pool as lbaas_pool
from neutronclient.neutron.v2_0.lb import vip as lb_vip
from neutronclient.neutron.v2_0 import metering
from neutronclient.neutron.v2_0 import network
from neutronclient.neutron.v2_0 import network_ip_availability
from neutronclient.neutron.v2_0 import port
from neutronclient.neutron.v2_0 import purge
from neutronclient.neutron.v2_0.qos import bandwidth_limit_rule
from neutronclient.neutron.v2_0.qos import dscp_marking_rule
from neutronclient.neutron.v2_0.qos import policy as qos_policy
from neutronclient.neutron.v2_0.qos import rule as qos_rule
from neutronclient.neutron.v2_0 import quota
from neutronclient.neutron.v2_0 import rbac
from neutronclient.neutron.v2_0 import router
from neutronclient.neutron.v2_0 import securitygroup
from neutronclient.neutron.v2_0 import servicetype
from neutronclient.neutron.v2_0 import subnet
from neutronclient.neutron.v2_0 import subnetpool
from neutronclient.neutron.v2_0 import tag
from neutronclient.neutron.v2_0.vpn import endpoint_group
from neutronclient.neutron.v2_0.vpn import ikepolicy
from neutronclient.neutron.v2_0.vpn import ipsec_site_connection
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.neutron.v2_0.vpn import vpnservice
from neutronclient.version import __version__
VERSION = '2.0'
NEUTRON_API_VERSION = '2.0'
def run_command(cmd, cmd_parser, sub_argv):
_argv = sub_argv
index = -1
values_specs = []
if '--' in sub_argv:
index = sub_argv.index('--')
_argv = sub_argv[:index]
values_specs = sub_argv[index:]
known_args, _values_specs = cmd_parser.parse_known_args(_argv)
if(isinstance(cmd, subnet.CreateSubnet) and not known_args.cidr):
cidr = get_first_valid_cidr(_values_specs)
if cidr:
known_args.cidr = cidr
_values_specs.remove(cidr)
cmd.values_specs = (index == -1 and _values_specs or values_specs)
return cmd.run(known_args)
def get_first_valid_cidr(value_specs):
# Bug 1442771, argparse does not allow optional positional parameter
# to be separated from previous positional parameter.
# When cidr was separated from network, the value will not be able
# to be parsed into known_args, but saved to _values_specs instead.
for value in value_specs:
if utils.is_valid_cidr(value):
return value
def env(*_vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def check_non_negative_int(value):
try:
value = int(value)
except ValueError:
raise argparse.ArgumentTypeError(_("invalid int value: %r") % value)
if value < 0:
raise argparse.ArgumentTypeError(_("input value %d is negative") %
value)
return value
class BashCompletionCommand(command.Command):
"""Prints all of the commands and options for bash-completion."""
def take_action(self, parsed_args):
pass
COMMAND_V2 = {
'bash-completion': BashCompletionCommand,
'net-list': network.ListNetwork,
'net-external-list': network.ListExternalNetwork,
'net-show': network.ShowNetwork,
'net-create': network.CreateNetwork,
'net-delete': network.DeleteNetwork,
'net-update': network.UpdateNetwork,
'subnet-list': subnet.ListSubnet,
'subnet-show': subnet.ShowSubnet,
'subnet-create': subnet.CreateSubnet,
'subnet-delete': subnet.DeleteSubnet,
'subnet-update': subnet.UpdateSubnet,
'subnetpool-list': subnetpool.ListSubnetPool,
'subnetpool-show': subnetpool.ShowSubnetPool,
'subnetpool-create': subnetpool.CreateSubnetPool,
'subnetpool-delete': subnetpool.DeleteSubnetPool,
'subnetpool-update': subnetpool.UpdateSubnetPool,
'port-list': port.ListPort,
'port-show': port.ShowPort,
'port-create': port.CreatePort,
'port-delete': port.DeletePort,
'port-update': port.UpdatePort,
'purge': purge.Purge,
'quota-list': quota.ListQuota,
'quota-show': quota.ShowQuota,
'quota-default-show': quota.ShowQuotaDefault,
'quota-delete': quota.DeleteQuota,
'quota-update': quota.UpdateQuota,
'ext-list': extension.ListExt,
'ext-show': extension.ShowExt,
'router-list': router.ListRouter,
'router-port-list': port.ListRouterPort,
'router-show': router.ShowRouter,
'router-create': router.CreateRouter,
'router-delete': router.DeleteRouter,
'router-update': router.UpdateRouter,
'router-interface-add': router.AddInterfaceRouter,
'router-interface-delete': router.RemoveInterfaceRouter,
'router-gateway-set': router.SetGatewayRouter,
'router-gateway-clear': router.RemoveGatewayRouter,
'floatingip-list': floatingip.ListFloatingIP,
'floatingip-show': floatingip.ShowFloatingIP,
'floatingip-create': floatingip.CreateFloatingIP,
'floatingip-delete': floatingip.DeleteFloatingIP,
'floatingip-associate': floatingip.AssociateFloatingIP,
'floatingip-disassociate': floatingip.DisassociateFloatingIP,
'security-group-list': securitygroup.ListSecurityGroup,
'security-group-show': securitygroup.ShowSecurityGroup,
'security-group-create': securitygroup.CreateSecurityGroup,
'security-group-delete': securitygroup.DeleteSecurityGroup,
'security-group-update': securitygroup.UpdateSecurityGroup,
'security-group-rule-list': securitygroup.ListSecurityGroupRule,
'security-group-rule-show': securitygroup.ShowSecurityGroupRule,
'security-group-rule-create': securitygroup.CreateSecurityGroupRule,
'security-group-rule-delete': securitygroup.DeleteSecurityGroupRule,
'lbaas-loadbalancer-list': lbaas_loadbalancer.ListLoadBalancer,
'lbaas-loadbalancer-show': lbaas_loadbalancer.ShowLoadBalancer,
'lbaas-loadbalancer-create': lbaas_loadbalancer.CreateLoadBalancer,
'lbaas-loadbalancer-update': lbaas_loadbalancer.UpdateLoadBalancer,
'lbaas-loadbalancer-delete': lbaas_loadbalancer.DeleteLoadBalancer,
'lbaas-loadbalancer-stats': lbaas_loadbalancer.RetrieveLoadBalancerStats,
'lbaas-loadbalancer-status': lbaas_loadbalancer.RetrieveLoadBalancerStatus,
'lbaas-listener-list': lbaas_listener.ListListener,
'lbaas-listener-show': lbaas_listener.ShowListener,
'lbaas-listener-create': lbaas_listener.CreateListener,
'lbaas-listener-update': lbaas_listener.UpdateListener,
'lbaas-listener-delete': lbaas_listener.DeleteListener,
'lbaas-l7policy-list': lbaas_l7policy.ListL7Policy,
'lbaas-l7policy-show': lbaas_l7policy.ShowL7Policy,
'lbaas-l7policy-create': lbaas_l7policy.CreateL7Policy,
'lbaas-l7policy-update': lbaas_l7policy.UpdateL7Policy,
'lbaas-l7policy-delete': lbaas_l7policy.DeleteL7Policy,
'lbaas-l7rule-list': lbaas_l7rule.ListL7Rule,
'lbaas-l7rule-show': lbaas_l7rule.ShowL7Rule,
'lbaas-l7rule-create': lbaas_l7rule.CreateL7Rule,
'lbaas-l7rule-update': lbaas_l7rule.UpdateL7Rule,
'lbaas-l7rule-delete': lbaas_l7rule.DeleteL7Rule,
'lbaas-pool-list': lbaas_pool.ListPool,
'lbaas-pool-show': lbaas_pool.ShowPool,
'lbaas-pool-create': lbaas_pool.CreatePool,
'lbaas-pool-update': lbaas_pool.UpdatePool,
'lbaas-pool-delete': lbaas_pool.DeletePool,
'lbaas-healthmonitor-list': lbaas_healthmon.ListHealthMonitor,
'lbaas-healthmonitor-show': lbaas_healthmon.ShowHealthMonitor,
'lbaas-healthmonitor-create': lbaas_healthmon.CreateHealthMonitor,
'lbaas-healthmonitor-update': lbaas_healthmon.UpdateHealthMonitor,
'lbaas-healthmonitor-delete': lbaas_healthmon.DeleteHealthMonitor,
'lbaas-member-list': lbaas_member.ListMember,
'lbaas-member-show': lbaas_member.ShowMember,
'lbaas-member-create': lbaas_member.CreateMember,
'lbaas-member-update': lbaas_member.UpdateMember,
'lbaas-member-delete': lbaas_member.DeleteMember,
'lb-vip-list': lb_vip.ListVip,
'lb-vip-show': lb_vip.ShowVip,
'lb-vip-create': lb_vip.CreateVip,
'lb-vip-update': lb_vip.UpdateVip,
'lb-vip-delete': lb_vip.DeleteVip,
'lb-pool-list': lb_pool.ListPool,
'lb-pool-show': lb_pool.ShowPool,
'lb-pool-create': lb_pool.CreatePool,
'lb-pool-update': lb_pool.UpdatePool,
'lb-pool-delete': lb_pool.DeletePool,
'lb-pool-stats': lb_pool.RetrievePoolStats,
'lb-member-list': lb_member.ListMember,
'lb-member-show': lb_member.ShowMember,
'lb-member-create': lb_member.CreateMember,
'lb-member-update': lb_member.UpdateMember,
'lb-member-delete': lb_member.DeleteMember,
'lb-healthmonitor-list': lb_healthmonitor.ListHealthMonitor,
'lb-healthmonitor-show': lb_healthmonitor.ShowHealthMonitor,
'lb-healthmonitor-create': lb_healthmonitor.CreateHealthMonitor,
'lb-healthmonitor-update': lb_healthmonitor.UpdateHealthMonitor,
'lb-healthmonitor-delete': lb_healthmonitor.DeleteHealthMonitor,
'lb-healthmonitor-associate': lb_healthmonitor.AssociateHealthMonitor,
'lb-healthmonitor-disassociate': (
lb_healthmonitor.DisassociateHealthMonitor
),
'agent-list': agent.ListAgent,
'agent-show': agent.ShowAgent,
'agent-delete': agent.DeleteAgent,
'agent-update': agent.UpdateAgent,
'dhcp-agent-network-add': agentscheduler.AddNetworkToDhcpAgent,
'dhcp-agent-network-remove': agentscheduler.RemoveNetworkFromDhcpAgent,
'net-list-on-dhcp-agent': agentscheduler.ListNetworksOnDhcpAgent,
'dhcp-agent-list-hosting-net': agentscheduler.ListDhcpAgentsHostingNetwork,
'l3-agent-router-add': agentscheduler.AddRouterToL3Agent,
'l3-agent-router-remove': agentscheduler.RemoveRouterFromL3Agent,
'router-list-on-l3-agent': agentscheduler.ListRoutersOnL3Agent,
'l3-agent-list-hosting-router': agentscheduler.ListL3AgentsHostingRouter,
'lb-pool-list-on-agent': agentscheduler.ListPoolsOnLbaasAgent,
'lb-agent-hosting-pool': agentscheduler.GetLbaasAgentHostingPool,
'lbaas-loadbalancer-list-on-agent':
agentscheduler.ListLoadBalancersOnLbaasAgent,
'lbaas-agent-hosting-loadbalancer':
agentscheduler.GetLbaasAgentHostingLoadBalancer,
'service-provider-list': servicetype.ListServiceProvider,
'firewall-rule-list': firewallrule.ListFirewallRule,
'firewall-rule-show': firewallrule.ShowFirewallRule,
'firewall-rule-create': firewallrule.CreateFirewallRule,
'firewall-rule-update': firewallrule.UpdateFirewallRule,
'firewall-rule-delete': firewallrule.DeleteFirewallRule,
'firewall-policy-list': firewallpolicy.ListFirewallPolicy,
'firewall-policy-show': firewallpolicy.ShowFirewallPolicy,
'firewall-policy-create': firewallpolicy.CreateFirewallPolicy,
'firewall-policy-update': firewallpolicy.UpdateFirewallPolicy,
'firewall-policy-delete': firewallpolicy.DeleteFirewallPolicy,
'firewall-policy-insert-rule': firewallpolicy.FirewallPolicyInsertRule,
'firewall-policy-remove-rule': firewallpolicy.FirewallPolicyRemoveRule,
'firewall-list': firewall.ListFirewall,
'firewall-show': firewall.ShowFirewall,
'firewall-create': firewall.CreateFirewall,
'firewall-update': firewall.UpdateFirewall,
'firewall-delete': firewall.DeleteFirewall,
'ipsec-site-connection-list': (
ipsec_site_connection.ListIPsecSiteConnection
),
'ipsec-site-connection-show': (
ipsec_site_connection.ShowIPsecSiteConnection
),
'ipsec-site-connection-create': (
ipsec_site_connection.CreateIPsecSiteConnection
),
'ipsec-site-connection-update': (
ipsec_site_connection.UpdateIPsecSiteConnection
),
'ipsec-site-connection-delete': (
ipsec_site_connection.DeleteIPsecSiteConnection
),
'vpn-endpoint-group-list': endpoint_group.ListEndpointGroup,
'vpn-endpoint-group-show': endpoint_group.ShowEndpointGroup,
'vpn-endpoint-group-create': endpoint_group.CreateEndpointGroup,
'vpn-endpoint-group-update': endpoint_group.UpdateEndpointGroup,
'vpn-endpoint-group-delete': endpoint_group.DeleteEndpointGroup,
'vpn-service-list': vpnservice.ListVPNService,
'vpn-service-show': vpnservice.ShowVPNService,
'vpn-service-create': vpnservice.CreateVPNService,
'vpn-service-update': vpnservice.UpdateVPNService,
'vpn-service-delete': vpnservice.DeleteVPNService,
'vpn-ipsecpolicy-list': ipsecpolicy.ListIPsecPolicy,
'vpn-ipsecpolicy-show': ipsecpolicy.ShowIPsecPolicy,
'vpn-ipsecpolicy-create': ipsecpolicy.CreateIPsecPolicy,
'vpn-ipsecpolicy-update': ipsecpolicy.UpdateIPsecPolicy,
'vpn-ipsecpolicy-delete': ipsecpolicy.DeleteIPsecPolicy,
'vpn-ikepolicy-list': ikepolicy.ListIKEPolicy,
'vpn-ikepolicy-show': ikepolicy.ShowIKEPolicy,
'vpn-ikepolicy-create': ikepolicy.CreateIKEPolicy,
'vpn-ikepolicy-update': ikepolicy.UpdateIKEPolicy,
'vpn-ikepolicy-delete': ikepolicy.DeleteIKEPolicy,
'meter-label-create': metering.CreateMeteringLabel,
'meter-label-list': metering.ListMeteringLabel,
'meter-label-show': metering.ShowMeteringLabel,
'meter-label-delete': metering.DeleteMeteringLabel,
'meter-label-rule-create': metering.CreateMeteringLabelRule,
'meter-label-rule-list': metering.ListMeteringLabelRule,
'meter-label-rule-show': metering.ShowMeteringLabelRule,
'meter-label-rule-delete': metering.DeleteMeteringLabelRule,
'rbac-create': rbac.CreateRBACPolicy,
'rbac-update': rbac.UpdateRBACPolicy,
'rbac-list': rbac.ListRBACPolicy,
'rbac-show': rbac.ShowRBACPolicy,
'rbac-delete': rbac.DeleteRBACPolicy,
'address-scope-list': address_scope.ListAddressScope,
'address-scope-show': address_scope.ShowAddressScope,
'address-scope-create': address_scope.CreateAddressScope,
'address-scope-delete': address_scope.DeleteAddressScope,
'address-scope-update': address_scope.UpdateAddressScope,
'qos-policy-list': qos_policy.ListQoSPolicy,
'qos-policy-show': qos_policy.ShowQoSPolicy,
'qos-policy-create': qos_policy.CreateQoSPolicy,
'qos-policy-update': qos_policy.UpdateQoSPolicy,
'qos-policy-delete': qos_policy.DeleteQoSPolicy,
'qos-bandwidth-limit-rule-create': (
bandwidth_limit_rule.CreateQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-show': (
bandwidth_limit_rule.ShowQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-list': (
bandwidth_limit_rule.ListQoSBandwidthLimitRules
),
'qos-bandwidth-limit-rule-update': (
bandwidth_limit_rule.UpdateQoSBandwidthLimitRule
),
'qos-bandwidth-limit-rule-delete': (
bandwidth_limit_rule.DeleteQoSBandwidthLimitRule
),
'qos-dscp-marking-rule-create': (
dscp_marking_rule.CreateQoSDscpMarkingRule
),
'qos-dscp-marking-rule-show': (
dscp_marking_rule.ShowQoSDscpMarkingRule
),
'qos-dscp-marking-rule-list': (
dscp_marking_rule.ListQoSDscpMarkingRules
),
'qos-dscp-marking-rule-update': (
dscp_marking_rule.UpdateQoSDscpMarkingRule
),
'qos-dscp-marking-rule-delete': (
dscp_marking_rule.DeleteQoSDscpMarkingRule
),
'qos-available-rule-types': qos_rule.ListQoSRuleTypes,
'flavor-list': flavor.ListFlavor,
'flavor-show': flavor.ShowFlavor,
'flavor-create': flavor.CreateFlavor,
'flavor-delete': flavor.DeleteFlavor,
'flavor-update': flavor.UpdateFlavor,
'flavor-associate': flavor.AssociateFlavor,
'flavor-disassociate': flavor.DisassociateFlavor,
'flavor-profile-list': flavor_profile.ListFlavorProfile,
'flavor-profile-show': flavor_profile.ShowFlavorProfile,
'flavor-profile-create': flavor_profile.CreateFlavorProfile,
'flavor-profile-delete': flavor_profile.DeleteFlavorProfile,
'flavor-profile-update': flavor_profile.UpdateFlavorProfile,
'availability-zone-list': availability_zone.ListAvailabilityZone,
'auto-allocated-topology-show': (
auto_allocated_topology.ShowAutoAllocatedTopology),
'bgp-dragent-speaker-add': (
bgp_drsched.AddBGPSpeakerToDRAgent
),
'bgp-dragent-speaker-remove': (
bgp_drsched.RemoveBGPSpeakerFromDRAgent
),
'bgp-speaker-list-on-dragent': (
bgp_drsched.ListBGPSpeakersOnDRAgent
),
'bgp-dragent-list-hosting-speaker': (
bgp_drsched.ListDRAgentsHostingBGPSpeaker
),
'bgp-speaker-list': bgp_speaker.ListSpeakers,
'bgp-speaker-advertiseroute-list': (
bgp_speaker.ListRoutesAdvertisedBySpeaker
),
'bgp-speaker-show': bgp_speaker.ShowSpeaker,
'bgp-speaker-create': bgp_speaker.CreateSpeaker,
'bgp-speaker-update': bgp_speaker.UpdateSpeaker,
'bgp-speaker-delete': bgp_speaker.DeleteSpeaker,
'bgp-speaker-peer-add': bgp_speaker.AddPeerToSpeaker,
'bgp-speaker-peer-remove': bgp_speaker.RemovePeerFromSpeaker,
'bgp-speaker-network-add': bgp_speaker.AddNetworkToSpeaker,
'bgp-speaker-network-remove': bgp_speaker.RemoveNetworkFromSpeaker,
'bgp-peer-list': bgp_peer.ListPeers,
'bgp-peer-show': bgp_peer.ShowPeer,
'bgp-peer-create': bgp_peer.CreatePeer,
'bgp-peer-update': bgp_peer.UpdatePeer,
'bgp-peer-delete': bgp_peer.DeletePeer,
'net-ip-availability-list': network_ip_availability.ListIpAvailability,
'net-ip-availability-show': network_ip_availability.ShowIpAvailability,
'tag-add': tag.AddTag,
'tag-replace': tag.ReplaceTag,
'tag-remove': tag.RemoveTag,
}
COMMANDS = {'2.0': COMMAND_V2}
class HelpAction(argparse.Action):
"""Print help message including sub-commands
Provide a custom action so the -h and --help options
to the main app will print a list of the commands.
The commands are determined by checking the CommandManager
instance, passed in as the "default" value for the action.
"""
def __call__(self, parser, namespace, values, option_string=None):
outputs = []
max_len = 0
app = self.default
parser.print_help(app.stdout)
app.stdout.write(_('\nCommands for API v%s:\n') % app.api_version)
command_manager = app.command_manager
for name, ep in sorted(command_manager):
factory = ep.load()
cmd = factory(self, None)
one_liner = cmd.get_description().split('\n')[0]
outputs.append((name, one_liner))
max_len = max(len(name), max_len)
for (name, one_liner) in outputs:
app.stdout.write(' %s %s\n' % (name.ljust(max_len), one_liner))
sys.exit(0)
class NeutronShell(app.App):
# verbose logging levels
WARNING_LEVEL = 0
INFO_LEVEL = 1
DEBUG_LEVEL = 2
CONSOLE_MESSAGE_FORMAT = '%(message)s'
DEBUG_MESSAGE_FORMAT = '%(levelname)s: %(name)s %(message)s'
log = logging.getLogger(__name__)
def __init__(self, apiversion):
super(NeutronShell, self).__init__(
description=__doc__.strip(),
version=VERSION,
command_manager=commandmanager.CommandManager('neutron.cli'), )
self.commands = COMMANDS
for k, v in self.commands[apiversion].items():
self.command_manager.add_command(k, v)
self._register_extensions(VERSION)
# Pop the 'complete' to correct the outputs of 'neutron help'.
self.command_manager.commands.pop('complete')
# This is instantiated in initialize_app() only when using
# password flow auth
self.auth_client = None
self.api_version = apiversion
def build_option_parser(self, description, version):
"""Return an argparse option parser for this application.
Subclasses may override this method to extend
the parser with more global options.
:param description: full description of the application
:paramtype description: str
:param version: version number for the application
:paramtype version: str
"""
parser = argparse.ArgumentParser(
description=description,
add_help=False, )
parser.add_argument(
'--version',
action='version',
version=__version__, )
parser.add_argument(
'-v', '--verbose', '--debug',
action='count',
dest='verbose_level',
default=self.DEFAULT_VERBOSE_LEVEL,
help=_('Increase verbosity of output and show tracebacks on'
' errors. You can repeat this option.'))
parser.add_argument(
'-q', '--quiet',
action='store_const',
dest='verbose_level',
const=0,
help=_('Suppress output except warnings and errors.'))
parser.add_argument(
'-h', '--help',
action=HelpAction,
nargs=0,
default=self, # tricky
help=_("Show this help message and exit."))
parser.add_argument(
'-r', '--retries',
metavar="NUM",
type=check_non_negative_int,
default=0,
help=_("How many times the request to the Neutron server should "
"be retried if it fails."))
# FIXME(bklei): this method should come from keystoneauth1
self._append_global_identity_args(parser)
return parser
def _append_global_identity_args(self, parser):
# FIXME(bklei): these are global identity (Keystone) arguments which
# should be consistent and shared by all service clients. Therefore,
# they should be provided by keystoneauth1. We will need to
# refactor this code once this functionality is available in
# keystoneauth1.
#
# Note: At that time we'll need to decide if we can just abandon
# the deprecated args (--service-type and --endpoint-type).
parser.add_argument(
'--os-service-type', metavar='<os-service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('Defaults to env[OS_NETWORK_SERVICE_TYPE] or network.'))
parser.add_argument(
'--os-endpoint-type', metavar='<os-endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='public'),
help=_('Defaults to env[OS_ENDPOINT_TYPE] or public.'))
# FIXME(bklei): --service-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--service-type', metavar='<service-type>',
default=env('OS_NETWORK_SERVICE_TYPE', default='network'),
help=_('DEPRECATED! Use --os-service-type.'))
# FIXME(bklei): --endpoint-type is deprecated but kept in for
# backward compatibility.
parser.add_argument(
'--endpoint-type', metavar='<endpoint-type>',
default=env('OS_ENDPOINT_TYPE', default='public'),
help=_('DEPRECATED! Use --os-endpoint-type.'))
parser.add_argument(
'--os-auth-strategy', metavar='<auth-strategy>',
default=env('OS_AUTH_STRATEGY', default='keystone'),
help=_('DEPRECATED! Only keystone is supported.'))
parser.add_argument(
'--os_auth_strategy',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-cloud', metavar='<cloud>',
default=env('OS_CLOUD', default=None),
help=_('Defaults to env[OS_CLOUD].'))
parser.add_argument(
'--os-auth-url', metavar='<auth-url>',
default=env('OS_AUTH_URL'),
help=_('Authentication URL, defaults to env[OS_AUTH_URL].'))
parser.add_argument(
'--os_auth_url',
help=argparse.SUPPRESS)
project_name_group = parser.add_mutually_exclusive_group()
project_name_group.add_argument(
'--os-tenant-name', metavar='<auth-tenant-name>',
default=env('OS_TENANT_NAME'),
help=_('Authentication tenant name, defaults to '
'env[OS_TENANT_NAME].'))
project_name_group.add_argument(
'--os-project-name',
metavar='<auth-project-name>',
default=utils.env('OS_PROJECT_NAME'),
help=_('Another way to specify tenant name. '
'This option is mutually exclusive with '
' --os-tenant-name. '
'Defaults to env[OS_PROJECT_NAME].'))
parser.add_argument(
'--os_tenant_name',
help=argparse.SUPPRESS)
project_id_group = parser.add_mutually_exclusive_group()
project_id_group.add_argument(
'--os-tenant-id', metavar='<auth-tenant-id>',
default=env('OS_TENANT_ID'),
help=_('Authentication tenant ID, defaults to '
'env[OS_TENANT_ID].'))
project_id_group.add_argument(
'--os-project-id',
metavar='<auth-project-id>',
default=utils.env('OS_PROJECT_ID'),
help=_('Another way to specify tenant ID. '
'This option is mutually exclusive with '
' --os-tenant-id. '
'Defaults to env[OS_PROJECT_ID].'))
parser.add_argument(
'--os-username', metavar='<auth-username>',
default=utils.env('OS_USERNAME'),
help=_('Authentication username, defaults to env[OS_USERNAME].'))
parser.add_argument(
'--os_username',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-id', metavar='<auth-user-id>',
default=env('OS_USER_ID'),
help=_('Authentication user ID (Env: OS_USER_ID)'))
parser.add_argument(
'--os_user_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-id',
metavar='<auth-user-domain-id>',
default=utils.env('OS_USER_DOMAIN_ID'),
help=_('OpenStack user domain ID. '
'Defaults to env[OS_USER_DOMAIN_ID].'))
parser.add_argument(
'--os_user_domain_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-user-domain-name',
metavar='<auth-user-domain-name>',
default=utils.env('OS_USER_DOMAIN_NAME'),
help=_('OpenStack user domain name. '
'Defaults to env[OS_USER_DOMAIN_NAME].'))
parser.add_argument(
'--os_user_domain_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--os_project_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-project-domain-id',
metavar='<auth-project-domain-id>',
default=utils.env('OS_PROJECT_DOMAIN_ID'),
help=_('Defaults to env[OS_PROJECT_DOMAIN_ID].'))
parser.add_argument(
'--os-project-domain-name',
metavar='<auth-project-domain-name>',
default=utils.env('OS_PROJECT_DOMAIN_NAME'),
help=_('Defaults to env[OS_PROJECT_DOMAIN_NAME].'))
parser.add_argument(
'--os-cert',
metavar='<certificate>',
default=utils.env('OS_CERT'),
help=_("Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key. Defaults "
"to env[OS_CERT]."))
parser.add_argument(
'--os-cacert',
metavar='<ca-certificate>',
default=env('OS_CACERT', default=None),
help=_("Specify a CA bundle file to use in "
"verifying a TLS (https) server certificate. "
"Defaults to env[OS_CACERT]."))
parser.add_argument(
'--os-key',
metavar='<key>',
default=utils.env('OS_KEY'),
help=_("Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your certificate "
"file. Defaults to env[OS_KEY]."))
parser.add_argument(
'--os-password', metavar='<auth-password>',
default=utils.env('OS_PASSWORD'),
help=_('Authentication password, defaults to env[OS_PASSWORD].'))
parser.add_argument(
'--os_password',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-region-name', metavar='<auth-region-name>',
default=env('OS_REGION_NAME'),
help=_('Authentication region name, defaults to '
'env[OS_REGION_NAME].'))
parser.add_argument(
'--os_region_name',
help=argparse.SUPPRESS)
parser.add_argument(
'--os-token', metavar='<token>',
default=env('OS_TOKEN'),
help=_('Authentication token, defaults to env[OS_TOKEN].'))
parser.add_argument(
'--os_token',
help=argparse.SUPPRESS)
parser.add_argument(
'--http-timeout', metavar='<seconds>',
default=env('OS_NETWORK_TIMEOUT', default=None), type=float,
help=_('Timeout in seconds to wait for an HTTP response. Defaults '
'to env[OS_NETWORK_TIMEOUT] or None if not specified.'))
parser.add_argument(
'--os-url', metavar='<url>',
default=env('OS_URL'),
help=_('Defaults to env[OS_URL].'))
parser.add_argument(
'--os_url',
help=argparse.SUPPRESS)
parser.add_argument(
'--insecure',
action='store_true',
default=env('NEUTRONCLIENT_INSECURE', default=False),
help=_("Explicitly allow neutronclient to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution."))
def _bash_completion(self):
"""Prints all of the commands and options for bash-completion."""
commands = set()
options = set()
for option, _action in self.parser._option_string_actions.items():
options.add(option)
for _name, _command in self.command_manager:
commands.add(_name)
cmd_factory = _command.load()
cmd = cmd_factory(self, None)
cmd_parser = cmd.get_parser('')
for option, _action in cmd_parser._option_string_actions.items():
options.add(option)
print(' '.join(commands | options))
def _register_extensions(self, version):
for name, module in itertools.chain(
client_extension._discover_via_entry_points()):
self._extend_shell_commands(name, module, version)
def _extend_shell_commands(self, name, module, version):
classes = inspect.getmembers(module, inspect.isclass)
for cls_name, cls in classes:
if (issubclass(cls, client_extension.NeutronClientExtension) and
hasattr(cls, 'shell_command')):
cmd = cls.shell_command
if hasattr(cls, 'versions'):
if version not in cls.versions:
continue
try:
name_prefix = "[%s]" % name
cls.__doc__ = ("%s %s" % (name_prefix, cls.__doc__) if
cls.__doc__ else name_prefix)
self.command_manager.add_command(cmd, cls)
self.commands[version][cmd] = cls
except TypeError:
pass
def run(self, argv):
"""Equivalent to the main program for the application.
:param argv: input arguments and options
:paramtype argv: list of str
"""
try:
index = 0
command_pos = -1
help_pos = -1
help_command_pos = -1
for arg in argv:
if arg == 'bash-completion' and help_command_pos == -1:
self._bash_completion()
return 0
if arg in self.commands[self.api_version]:
if command_pos == -1:
command_pos = index
elif arg in ('-h', '--help'):
if help_pos == -1:
help_pos = index
elif arg == 'help':
if help_command_pos == -1:
help_command_pos = index
index = index + 1
if command_pos > -1 and help_pos > command_pos:
argv = ['help', argv[command_pos]]
if help_command_pos > -1 and command_pos == -1:
argv[help_command_pos] = '--help'
self.options, remainder = self.parser.parse_known_args(argv)
self.configure_logging()
self.interactive_mode = not remainder
self.initialize_app(remainder)
except Exception as err:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception(err)
raise
else:
self.log.error(err)
return 1
if self.interactive_mode:
_argv = [sys.argv[0]]
sys.argv = _argv
return self.interact()
return self.run_subcommand(remainder)
def run_subcommand(self, argv):
subcommand = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = subcommand
cmd = cmd_factory(self, self.options)
try:
self.prepare_to_run_command(cmd)
full_name = (cmd_name
if self.interactive_mode
else ' '.join([self.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
return run_command(cmd, cmd_parser, sub_argv)
except SystemExit:
print(_("Try 'neutron help %s' for more information.") %
cmd_name, file=sys.stderr)
raise
except Exception as e:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception("%s", e)
raise
self.log.error("%s", e)
return 1
def authenticate_user(self):
"""Confirm user authentication
Make sure the user has provided all of the authentication
info we need.
"""
cloud_config = os_client_config.OpenStackConfig().get_one_cloud(
cloud=self.options.os_cloud, argparse=self.options,
network_api_version=self.api_version,
verify=not self.options.insecure)
verify, cert = cloud_config.get_requests_verify_args()
# TODO(singhj): Remove dependancy on HTTPClient
# for the case of token-endpoint authentication
# When using token-endpoint authentication legacy
# HTTPClient will be used, otherwise SessionClient
# will be used.
if self.options.os_token and self.options.os_url:
auth = None
auth_session = None
else:
auth = cloud_config.get_auth()
auth_session = session.Session(
auth=auth, verify=verify, cert=cert,
timeout=self.options.http_timeout)
interface = self.options.os_endpoint_type or self.endpoint_type
if interface.endswith('URL'):
interface = interface[:-3]
self.client_manager = clientmanager.ClientManager(
retries=self.options.retries,
raise_errors=False,
session=auth_session,
url=self.options.os_url,
token=self.options.os_token,
region_name=cloud_config.get_region_name(),
api_version=cloud_config.get_api_version('network'),
service_type=cloud_config.get_service_type('network'),
service_name=cloud_config.get_service_name('network'),
endpoint_type=interface,
auth=auth,
insecure=not verify,
log_credentials=True)
return
def initialize_app(self, argv):
"""Global app init bits:
* set up API versions
* validate authentication info
"""
super(NeutronShell, self).initialize_app(argv)
self.api_version = {'network': self.api_version}
# If the user is not asking for help, make sure they
# have given us auth.
cmd_name = None
if argv:
cmd_info = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = cmd_info
if self.interactive_mode or cmd_name != 'help':
self.authenticate_user()
def configure_logging(self):
"""Create logging handlers for any log output."""
root_logger = logging.getLogger('')
# Set up logging to a file
root_logger.setLevel(logging.DEBUG)
# Send higher-level messages to the console via stderr
console = logging.StreamHandler(self.stderr)
console_level = {self.WARNING_LEVEL: logging.WARNING,
self.INFO_LEVEL: logging.INFO,
self.DEBUG_LEVEL: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
# The default log level is INFO, in this situation, set the
# log level of the console to WARNING, to avoid displaying
# useless messages. This equals using "--quiet"
if console_level == logging.INFO:
console.setLevel(logging.WARNING)
else:
console.setLevel(console_level)
if logging.DEBUG == console_level:
formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)
else:
formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
def main(argv=sys.argv[1:]):
try:
return NeutronShell(NEUTRON_API_VERSION).run(
list(map(encodeutils.safe_decode, argv)))
except KeyboardInterrupt:
print(_("... terminating neutron client"), file=sys.stderr)
return 130
except exc.NeutronClientException:
return 1
except Exception as e:
print(e)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 2,772,146,299,183,681,000 | 40.459621 | 79 | 0.645897 | false |
mwrlabs/veripy | contrib/rfc3633/dr/renew_message.py | 1 | 1596 | from contrib.rfc3315.constants import *
from contrib.rfc3633.dhcpv6_pd import DHCPv6PDHelper
from scapy.all import *
from veripy.assertions import *
class RenewMessageTestCase(DHCPv6PDHelper):
"""
Requesting Router Initiated: Renew Message
Verify that a device can properly interoperate while using DHCPv6-PD
@private
Source: IPv6 Ready DHCPv6 Interoperability Test Suite (Section 4.2)
"""
def run(self):
prefix, p = self.do_dhcpv6_pd_handshake_as_client(self.target(1), self.node(1))
self.logger.info("Acquired the prefix %s from the DR (T1=%d)." % (prefix, p[DHCP6OptIA_PD].T1))
for i in range(0, 2):
self.ui.wait(p[DHCP6OptIA_PD].T1)
self.node(1).clear_received()
self.logger.info("Sending a DHCPv6 Renew message...")
self.node(1).send(
IPv6(src=str(self.node(1).link_local_ip()), dst=str(AllDHCPv6RelayAgentsAndServers))/
UDP(sport=DHCPv6SourcePort, dport=DHCPv6DestPort)/
self.build_dhcpv6_pd_renew(p, self.target(1), self.node(1)))
self.logger.info("Checking for a DHCPv6 Reply message...")
r1 = self.node(1).received(src=self.target(1).link_local_ip(), type=DHCP6_Reply)
assertEqual(1, len(r1), "expected to receive a DHCPv6 Reply message")
assertHasLayer(DHCP6OptIA_PD, r1[0], "expected the DHCPv6 Reply to contain an IA for Prefix Delegation")
assertHasLayer(DHCP6OptIAPrefix, r1[0], "expected the DHCPv6 Reply to contain an IA Prefix")
| gpl-3.0 | 802,280,280,625,709,000 | 41.026316 | 116 | 0.644737 | false |
fr34kyn01535/PyForum | server.py | 1 | 2451 | # coding:utf-8
import os.path
import cherrypy
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
from app import themen,diskussionen,beitraege,login,logout,administration,templates
def error_page(status, message, traceback, version):
return templates.RenderTemplate("error.html",title="Error",status=status,message=message,traceback=traceback,version=version);
cherrypy.config.update({'error_page.default': error_page})
cherrypy.config.update({'error_page.401': error_page})
cherrypy.config.update({'error_page.402': error_page})
cherrypy.config.update({'error_page.403': error_page})
cherrypy.config.update({'error_page.404': error_page})
cherrypy.config.update({'error_page.500': error_page})
def main():
cherrypy.Application.currentDir_s = os.path.dirname(os.path.abspath(__file__))
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': 8082,
})
cherrypy.engine.autoreload.unsubscribe()
cherrypy.engine.timeout_monitor.unsubscribe()
dynamic = {'/': {
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
'tools.sessions.on': True,
'request.dispatch': cherrypy.dispatch.MethodDispatcher()
}};
cherrypy.tree.mount(themen.Request(), '/', dynamic)
cherrypy.tree.mount(diskussionen.Request(), '/diskussionen', dynamic)
cherrypy.tree.mount(beitraege.Request(), '/beitraege', dynamic)
cherrypy.tree.mount(login.Request(), '/login', dynamic)
cherrypy.tree.mount(logout.Request(), '/logout', dynamic)
cherrypy.tree.mount(administration.Request(), '/administration', dynamic)
cherrypy.tree.mount(None, '/js', {'/': {
'tools.gzip.on' : True,
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(cherrypy.Application.currentDir_s, 'js'),
'tools.expires.on' : True,
'tools.expires.secs' : 0
}})
cherrypy.tree.mount(None, '/css', {'/': {
'tools.gzip.on' : True,
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(cherrypy.Application.currentDir_s, 'css'),
'tools.expires.on' : True,
'tools.expires.secs' : 0
}})
cherrypy.tree.mount(None, '/fonts', {'/': {
'tools.gzip.on' : True,
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(cherrypy.Application.currentDir_s, 'fonts'),
'tools.expires.on' : True,
'tools.expires.secs' : 0
}})
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == '__main__':
main()
| gpl-2.0 | 7,957,577,721,202,830,000 | 30.423077 | 130 | 0.684619 | false |
CauldronDevelopmentLLC/buildbot | buildbot/status/web/slaves.py | 1 | 6579 |
import time, urllib
from twisted.python import log
from twisted.web import html
from twisted.web.util import Redirect
from buildbot.status.web.base import HtmlResource, abbreviate_age, OneLineMixin, path_to_slave
from buildbot import version, util
# /buildslaves/$slavename
class OneBuildSlaveResource(HtmlResource, OneLineMixin):
addSlash = False
def __init__(self, slavename):
HtmlResource.__init__(self)
self.slavename = slavename
def getTitle(self, req):
return "Buildbot: %s" % html.escape(self.slavename)
def getChild(self, path, req):
if path == "shutdown":
s = self.getStatus(req)
slave = s.getSlave(self.slavename)
slave.setGraceful(True)
return Redirect(path_to_slave(req, slave))
def body(self, req):
s = self.getStatus(req)
slave = s.getSlave(self.slavename)
my_builders = []
for bname in s.getBuilderNames():
b = s.getBuilder(bname)
for bs in b.getSlaves():
slavename = bs.getName()
if bs.getName() == self.slavename:
my_builders.append(b)
# Current builds
current_builds = []
for b in my_builders:
for cb in b.getCurrentBuilds():
if cb.getSlavename() == self.slavename:
current_builds.append(cb)
data = []
projectName = s.getProjectName()
data.append("<a href=\"%s\">%s</a>\n" % (self.path_to_root(req), projectName))
data.append("<h1>Build Slave: %s</h1>\n" % html.escape(self.slavename))
shutdown_url = req.childLink("shutdown")
if not slave.isConnected():
data.append("<h2>NOT CONNECTED</h2>\n")
elif not slave.getGraceful():
data.append('''<form method="POST" action="%s">
<input type="submit" value="Gracefully Shutdown">
</form>''' % shutdown_url)
else:
data.append("Gracefully shutting down...\n")
if current_builds:
data.append("<h2>Currently building:</h2>\n")
data.append("<ul>\n")
for build in current_builds:
data.append("<li>%s</li>\n" % self.make_line(req, build, True))
data.append("</ul>\n")
else:
data.append("<h2>no current builds</h2>\n")
# Recent builds
data.append("<h2>Recent builds:</h2>\n")
data.append("<ul>\n")
n = 0
try:
max_builds = int(req.args.get('builds')[0])
except:
max_builds = 10
for build in s.generateFinishedBuilds(builders=[b.getName() for b in my_builders]):
if build.getSlavename() == self.slavename:
n += 1
data.append("<li>%s</li>\n" % self.make_line(req, build, True))
if n > max_builds:
break
data.append("</ul>\n")
projectURL = s.getProjectURL()
projectName = s.getProjectName()
data.append('<hr /><div class="footer">\n')
welcomeurl = self.path_to_root(req) + "index.html"
data.append("[<a href=\"%s\">welcome</a>]\n" % welcomeurl)
data.append("<br />\n")
data.append('<a href="http://buildbot.sourceforge.net/">Buildbot</a>')
data.append("-%s " % version)
if projectName:
data.append("working for the ")
if projectURL:
data.append("<a href=\"%s\">%s</a> project." % (projectURL,
projectName))
else:
data.append("%s project." % projectName)
data.append("<br />\n")
data.append("Page built: " +
time.strftime("%a %d %b %Y %H:%M:%S",
time.localtime(util.now()))
+ "\n")
data.append("</div>\n")
return "".join(data)
# /buildslaves
class BuildSlavesResource(HtmlResource):
title = "BuildSlaves"
addSlash = True
def body(self, req):
s = self.getStatus(req)
data = ""
data += "<h1>Build Slaves</h1>\n"
used_by_builder = {}
for bname in s.getBuilderNames():
b = s.getBuilder(bname)
for bs in b.getSlaves():
slavename = bs.getName()
if slavename not in used_by_builder:
used_by_builder[slavename] = []
used_by_builder[slavename].append(bname)
data += "<ol>\n"
for name in util.naturalSort(s.getSlaveNames()):
slave = s.getSlave(name)
slave_status = s.botmaster.slaves[name].slave_status
isBusy = len(slave_status.getRunningBuilds())
data += " <li><a href=\"%s\">%s</a>:\n" % (req.childLink(urllib.quote(name,'')), name)
data += " <ul>\n"
builder_links = ['<a href="%s">%s</a>'
% (req.childLink("../builders/%s" % bname),bname)
for bname in used_by_builder.get(name, [])]
if builder_links:
data += (" <li>Used by Builders: %s</li>\n" %
", ".join(builder_links))
else:
data += " <li>Not used by any Builders</li>\n"
if slave.isConnected():
data += " <li>Slave is currently connected</li>\n"
admin = slave.getAdmin()
if admin:
# munge it to avoid feeding the spambot harvesters
admin = admin.replace("@", " -at- ")
data += " <li>Admin: %s</li>\n" % admin
last = slave.lastMessageReceived()
if last:
lt = time.strftime("%Y-%b-%d %H:%M:%S",
time.localtime(last))
age = abbreviate_age(time.time() - last)
data += " <li>Last heard from: %s " % age
data += '<font size="-1">(%s)</font>' % lt
data += "</li>\n"
if isBusy:
data += "<li>Slave is currently building.</li>"
else:
data += "<li>Slave is idle.</li>"
else:
data += " <li><b>Slave is NOT currently connected</b></li>\n"
data += " </ul>\n"
data += " </li>\n"
data += "\n"
data += "</ol>\n"
return data
def getChild(self, path, req):
return OneBuildSlaveResource(path)
| gpl-2.0 | 1,440,367,016,623,047,700 | 35.348066 | 98 | 0.495212 | false |
chop-dbhi/varify-data-warehouse | vdw/assessments/migrations/0005_copy_categories.py | 1 | 21123 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.core.management import call_command
class Migration(DataMigration):
def forwards(self, orm):
call_command("loaddata", "assessment_categories.json")
for assessment in orm.Assessment.objects.all():
if assessment.category:
ac = orm.AssessmentCategory.objects.get(pk=assessment.category.id)
assessment.assessment_category = ac
assessment.save()
def backwards(self, orm):
call_command("loaddata", "categories.json")
for assessment in orm.Assessment.objects.all():
if assessment.assessment_category:
c = orm.Category.objects.get(pk=assessment.assessment_category.id)
assessment.category = c
assessment.save()
models = {
'assessments.assessment': {
'Meta': {'object_name': 'Assessment', 'db_table': "'assessment'"},
'assessment_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.AssessmentCategory']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.Category']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'evidence_details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'father_result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'father'", 'to': "orm['assessments.ParentalResult']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mother_result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mother'", 'to': "orm['assessments.ParentalResult']"}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pathogenicity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.Pathogenicity']"}),
'sample_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Result']"}),
'sanger_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sanger_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.SangerResult']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'assessments.assessmentcategory': {
'Meta': {'object_name': 'AssessmentCategory', 'db_table': "'assessment_category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.category': {
'Meta': {'object_name': 'Category', 'db_table': "'category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.parentalresult': {
'Meta': {'object_name': 'ParentalResult', 'db_table': "'parental_result'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.pathogenicity': {
'Meta': {'object_name': 'Pathogenicity', 'db_table': "'pathogenicity'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.sangerresult': {
'Meta': {'object_name': 'SangerResult', 'db_table': "'sanger_result'"},
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'genome.chromosome': {
'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'})
},
'genome.genotype': {
'Meta': {'object_name': 'Genotype', 'db_table': "'genotype'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'literature.pubmed': {
'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"},
'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'phenotypes.phenotype': {
'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hpo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'})
},
'samples.batch': {
'Meta': {'ordering': "('project', 'label')", 'unique_together': "(('project', 'name'),)", 'object_name': 'Batch', 'db_table': "'batch'"},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'to': "orm['samples.Project']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'samples.person': {
'Meta': {'object_name': 'Person', 'db_table': "'person'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mrn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'proband': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Person']", 'through': "orm['samples.Relation']", 'symmetrical': 'False'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'samples.project': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'Project', 'db_table': "'project'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'samples.relation': {
'Meta': {'ordering': "('person', '-generation')", 'object_name': 'Relation', 'db_table': "'relation'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'generation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'family'", 'to': "orm['samples.Person']"}),
'relative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relative_of'", 'to': "orm['samples.Person']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'samples.result': {
'Meta': {'unique_together': "(('sample', 'variant'),)", 'object_name': 'Result', 'db_table': "'sample_result'"},
'base_counts': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'baseq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'coverage_alt': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'coverage_ref': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'downsampling': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fisher_strand': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genotype']", 'null': 'True', 'blank': 'True'}),
'genotype_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'haplotype_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'homopolymer_run': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_dbsnp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mq': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq0': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phred_scaled_likelihood': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'quality_by_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'raw_read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'read_pos_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sample': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['samples.Sample']"}),
'spanning_deletions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'strand_bias': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"})
},
'samples.sample': {
'Meta': {'ordering': "('project', 'batch', 'label')", 'unique_together': "(('batch', 'name', 'version'),)", 'object_name': 'Sample', 'db_table': "'sample'"},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Batch']"}),
'bio_sample': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'samples'", 'null': 'True', 'to': "orm['samples.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Project']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
'variants.variant': {
'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"},
'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}),
'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['variants.VariantPhenotype']", 'symmetrical': 'False'}),
'pos': ('django.db.models.fields.IntegerField', [], {}),
'ref': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'rsid': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.VariantType']", 'null': 'True'})
},
'variants.variantphenotype': {
'Meta': {'object_name': 'VariantPhenotype', 'db_table': "'variant_phenotype'"},
'hgmd_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variant_phenotypes'", 'to': "orm['variants.Variant']"})
},
'variants.varianttype': {
'Meta': {'ordering': "['order']", 'object_name': 'VariantType', 'db_table': "'variant_type'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['assessments']
symmetrical = True
| bsd-2-clause | 3,351,375,113,810,633,000 | 80.555985 | 192 | 0.546561 | false |
watchtower/asynctest | asynctest/__init__.py | 1 | 2295 | import functools
class TestStatus: # FIXME should really be an Enum
pending = -1
failure = 0
success = 1
class Test:
def __init__(self, func, description):
self.func = func
self.description = description
self.status = TestStatus.pending
self._callback = None
self.manager = None
def callback(self, f):
self._callback = f
return f
def success(self):
if self.status == TestStatus.pending:
self.status = TestStatus.success
self.manager._test_complete(self)
def failure(self):
if self.status == TestStatus.pending:
self.status = TestStatus.failure
self.manager._test_complete(self)
def succeed_if(self, condition):
if condition:
self.success()
else:
self.failure()
def __call__(self):
if self.func is not None:
self.func()
if self._callback:
self._callback()
class test:
def __init__(self, description):
self.description = description
def __call__(self, f):
return Test(f, self.description)
class TestManager:
def __init__(self, tests):
self.tests = tests
self.test_status = []
if any(not isinstance(i, Test) for i in self.tests):
raise TypeError("Non-test passed to TestManager")
for t in self.tests:
t.manager = self
def add_test(self, t):
if not isinstance(t, Test):
raise TypeError("Non-test passed to TestManager")
t.manager = self
def _all_tests_complete(self):
print("{} tests complete.".format(len(self.tests)))
success = len([t for t in self.tests if t.status])
self.successes = success
print("There were {} successes, {} failures.".format(success, len(self.tests) - success))
def _test_complete(self, t):
self.test_status.append((t.description, t.status))
print("{}: {}".format(t.description, "success" if t.status else "failure"))
if len(self.test_status) == len(self.tests):
self._all_tests_complete()
def run_all(self):
for t in self.tests:
t()
return sum([t.status == TestStatus.failure for t in self.tests])
| mit | 2,558,572,069,989,067,300 | 27.6875 | 97 | 0.578214 | false |
dahiro/shotgun-replica | shotgun_replica/python/tests/shotgun_replica_tests/sync/shotgun_to_local/test_link.py | 1 | 10024 | # -*- coding: utf-8 -*-
'''
Created on 25.06.2012
@author: bach
'''
from shotgun_replica.connectors import DatabaseModificator, PostgresEntityType
from shotgun_replica.sync.shotgun_to_local import EventProcessor
from shotgun_replica.entities import InOut
from tests_elefant import testProjectID, testShotID, testTaskID, testTaskID_2
from shotgun_api3 import shotgun
import unittest
from shotgun_replica import config, factories
from shotgun_replica.utilities import debug, entityNaming
import pprint
from shotgun_replica._entity_mgmt import _ShotgunEntity
from shotgun_replica.sync import shotgun_to_local
NEWVALUE = "rdy"
OLDVALUE = "wtg"
class Test( unittest.TestCase ):
def setUp( self ):
self.sg = shotgun.Shotgun( config.SHOTGUN_URL,
config.SHOTGUN_SYNC_SKRIPT,
config.SHOTGUN_SYNC_KEY )
self.src = DatabaseModificator()
self.ep = EventProcessor( self.src, self.sg )
self.deleteEntities = []
self.shotgun2local = shotgun_to_local.EventSpooler()
def tearDown( self ):
for entry in self.deleteEntities:
if type( entry ) == dict:
self.sg.delete( entry["type"],
entry["id"] )
elif isinstance( entry, PostgresEntityType ):
self.sg.delete( entry.type,
entry.remote_id )
elif isinstance( entry, _ShotgunEntity ):
self.sg.delete( entry.getType(),
entry.getRemoteID() )
else:
raise Exception( "%s not handled" % type( entry ) )
self.assertTrue( self.shotgun2local.connectAndRun(), "synch not successful" )
def _getNewEvents( self ):
newevents = self.sg.find( "EventLogEntry",
filters = [['id', 'greater_than', self.lastID]],
fields = ['id', 'event_type', 'attribute_name', 'meta', 'entity'],
order = [{'column':'id', 'direction':'asc'}],
filter_operator = 'all',
limit = 100 )
debug.debug( newevents )
return newevents
def testAddOutput( self ):
lastevent = self.sg.find(
"EventLogEntry",
filters = [],
fields = ['id', 'event_type', 'attribute_name', 'meta', 'entity'],
order = [{'column':'id', 'direction':'desc'}],
filter_operator = 'all',
limit = 1
)[0]
self.lastID = lastevent["id"]
data = {
"project": {"type": "Project",
"id": testProjectID
},
"code": "newoutput",
"sg_link": {"type": "Task",
"id": testTaskID
},
}
newOutputDict = self.sg.create( InOut().getType(), data, [] )
self.deleteEntities.append( newOutputDict )
newevents = self._getNewEvents()
self.assertEqual( newevents[0]["event_type"], "Shotgun_CustomEntity02_New", "event not as expected" )
self.assertEqual( newevents[1]["event_type"], "Shotgun_CustomEntity02_Change", "event not as expected" )
self.assertEqual( newevents[2]["event_type"], "Shotgun_CustomEntity02_Change", "event not as expected" )
self.assertEqual( newevents[3]["event_type"], "Shotgun_CustomEntity02_Change", "event not as expected" )
self._processEvents( newevents )
newOutput = factories.getObject( "CustomEntity02", remote_id = newOutputDict["id"] )
changedData = {
'sg_sink_tasks': [
{
"type": "Task",
"id": testTaskID
},
{
"type": "Task",
"id": testTaskID_2
},
]
}
self.sg.update( newOutputDict["type"], newOutputDict["id"], changedData )
newevents = self._getNewEvents()
self.assertEqual( len( newevents ), 5 )
self.assertEqual( newevents[0]["event_type"], "Shotgun_CustomEntity02_sg_sink_tasks_Connection_New" )
self.assertEqual( newevents[1]["event_type"], "Shotgun_CustomEntity02_sg_sink_tasks_Connection_New" )
self.assertEqual( newevents[2]["event_type"], "Shotgun_CustomEntity02_Change" )
self.assertEqual( newevents[3]["event_type"], "Shotgun_Task_Change" )
self.assertEqual( newevents[4]["event_type"], "Shotgun_Task_Change" )
self._processEvents( newevents )
# check if Connection-Entities are available
filters = "custom_entity02=%s and task=ANY(%s)"
taskSgObj1 = PostgresEntityType( "Task", remote_id = testTaskID )
taskSgObj2 = PostgresEntityType( "Task", remote_id = testTaskID_2 )
outputSgObj = PostgresEntityType( newOutputDict["type"], remote_id = newOutputDict["id"] )
filterValues = [ outputSgObj, [ taskSgObj1, taskSgObj2 ] ]
connObjs = factories.getObjects( "CustomEntity02_sg_sink_tasks_Connection", filters, filterValues )
self.assertEqual( len( connObjs ), 2, "no conn-objs: %s" % pprint.pformat( connObjs, indent = 2 ) )
# check if return attribute of Task contains this CustomEntity02
retAttr = entityNaming.getReverseAttributeName( "CustomEntity02", "sg_sink_tasks" )
for taskID in [ testTaskID, testTaskID_2 ]:
taskTmpObj = factories.getObject( "Task", remote_id = taskID )
retOutputs = taskTmpObj.__getattribute__( retAttr )
self.assertTrue( newOutput in retOutputs )
changedData["sg_sink_tasks"] = []
self.sg.update( newOutputDict["type"], newOutputDict["id"], changedData )
newevents = self._getNewEvents()
# unfortunately there are two events missing:
# see: https://support.shotgunsoftware.com/requests/7380
self.assertEqual( len( newevents ), 3 )
self.assertEqual( newevents[0]["event_type"], "Shotgun_CustomEntity02_Change" )
self.assertEqual( newevents[1]["event_type"], "Shotgun_Task_Change" )
self.assertEqual( newevents[2]["event_type"], "Shotgun_Task_Change" )
self._processEvents( newevents )
retAttr = entityNaming.getReverseAttributeName( "CustomEntity02", "sg_sink_tasks" )
for taskID in [ testTaskID, testTaskID_2 ]:
taskTmpObj = factories.getObject( "Task", remote_id = taskID )
retOutputs = taskTmpObj.__getattribute__( retAttr )
if retOutputs:
self.assertFalse( newOutput in retOutputs )
# check if Connection-Entities are deleted
connObjs = factories.getObjects( "CustomEntity02_sg_sink_tasks_Connection", filters, filterValues )
self.assertEqual( len( connObjs ), 0, "conn-objs still available: %s" % pprint.pformat( connObjs, indent = 2 ) )
def _processEvents( self, newevents ):
self.shotgun2local.connectAndRun()
self.lastID = newevents[len( newevents ) - 1]["id"]
def testAddTask( self ):
lastevent = self.sg.find(
"EventLogEntry",
filters = [],
fields = ['id', 'event_type', 'attribute_name', 'meta', 'entity'],
order = [{'column':'id', 'direction':'desc'}],
filter_operator = 'all',
limit = 1
)[0]
self.lastID = lastevent["id"]
data = {
"project": {"type": "Project",
"id": testProjectID
},
"content": "TEST TASK (delete me)"
}
newTaskDict = self.sg.create( "Task", data, [] )
self.deleteEntities.append( newTaskDict )
newevents = self._getNewEvents()
# self.assertEqual(len(newevents), 4, "not the same amount of events uppon creation of Task")
self.assertEqual( newevents[0]["event_type"], "Shotgun_Task_New", "event not as expected" )
self._processEvents( newevents )
newTaskObj = factories.getObject( "Task", remote_id = newTaskDict["id"] )
self.sg.update( "Task", newTaskDict["id"], {"entity": {"type": "Shot",
"id": testShotID}} )
newevents = self._getNewEvents()
self.assertTrue( newevents[0]["event_type"] in ["Shotgun_Task_Change",
"Shotgun_Shot_Change"] )
self.assertTrue( newevents[1]["event_type"] in ["Shotgun_Task_Change",
"Shotgun_Shot_Change"] )
self._processEvents( newevents )
# check if tasks-property of Shot contains this task
shotObj = factories.getObject( "Shot", remote_id = testShotID )
self.assertTrue( newTaskObj in shotObj.tasks )
self.sg.delete( "Task", newTaskDict["id"] )
newevents = self._getNewEvents()
self.assertTrue( newevents[0]["event_type"] == "Shotgun_Task_Retirement" )
self.assertTrue( newevents[1]["event_type"] == "Shotgun_Task_Change" )
self._processEvents( newevents )
# check if tasks-property of Shot does not contain this task anymore
shotObj = factories.getObject( "Shot", remote_id = testShotID )
self.assertFalse( newTaskObj in shotObj.tasks )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| bsd-3-clause | -2,459,416,019,199,603,000 | 41.295359 | 120 | 0.545192 | false |
linktlh/Toontown-journey | toontown/distributed/HoodMgr.py | 1 | 13166 | from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from pandac.PandaModules import *
import random
from toontown.hood import ZoneUtil
from toontown.toonbase import ToontownGlobals
class HoodMgr(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('HoodMgr')
ToontownCentralInitialDropPoints = (
[-90.7, -60, 0.025, 102.575, 0, 0],
[-91.4, -40.5, -3.948, 125.763, 0, 0],
[-107.8, -17.8, -1.937, 149.456, 0, 0],
[-108.7, 12.8, -1.767, 158.756, 0, 0],
[-42.1, -22.8, -1.328, -248.1, 0, 0],
[-35.2, -60.2, 0.025, -265.639, 0, 0]
)
ToontownCentralHQDropPoints = (
[-43.5, 42.6, -0.55, -100.454, 0, 0],
[-53.0, 12.5, -2.948, 281.502, 0, 0],
[-40.3, -18.5, -0.913, -56.674, 0, 0],
[-1.9, -37.0, 0.025, -23.43, 0, 0],
[1.9, -5.9, 4, -37.941, 0, 0]
)
ToontownCentralTunnelDropPoints = (
[-28.3, 40.1, 0.25, 17.25, 0, 0],
[-63.75, 58.96, -0.5, -23.75, 0, 0],
[-106.93, 17.66, -2.2, 99, 0, 0],
[-116.0, -21.5, -0.038, 50, 0, 0],
[74.88, -115, 2.53, -224.41, 0, 0],
[30.488, -101.5, 2.53, -179.23, 0, 0]
)
dropPoints = {
ToontownGlobals.DonaldsDock: (
[-28, -2.5, 5.8, 120, 0, 0],
[-22, 13, 5.8, 155.6, 0, 0],
[67, 47, 5.7, 134.7, 0, 0],
[62, 19, 5.7, 97, 0, 0],
[66, -27, 5.7, 80.5, 0, 0],
[-114, -7, 5.7, -97, 0, 0],
[-108, 36, 5.7, -153.8, 0, 0],
[-116, -46, 5.7, -70.1, 0, 0],
[-63, -79, 5.7, -41.2, 0, 0],
[-2, -79, 5.7, 57.4, 0, 0],
[-38, -78, 5.7, 9.1, 0, 0]
),
ToontownGlobals.ToontownCentral: (
[-60, -8, 1.3, -90, 0, 0],
[-66, -9, 1.3, -274, 0, 0],
[17, -28, 4.1, -44, 0, 0],
[87.7, -22, 4, 66, 0, 0],
[-9.6, 61.1, 0, 132, 0, 0],
[-109.0, -2.5, -1.656, -90, 0, 0],
[-35.4, -81.3, 0.5, -4, 0, 0],
[-103, 72, 0, -141, 0, 0],
[93.5, -148.4, 2.5, 43, 0, 0],
[25, 123.4, 2.55, 272, 0, 0],
[48, 39, 4, 201, 0, 0],
[-80, -61, 0.1, -265, 0, 0],
[-46.875, 43.68, -1.05, 124, 0, 0],
[34, -105, 2.55, 45, 0, 0],
[16, -75, 2.55, 56, 0, 0],
[-27, -56, 0.1, 45, 0, 0],
[100, 27, 4.1, 150, 0, 0],
[-70, 4.6, -1.9, 90, 0, 0],
[-130.7, 50, 0.55, -111, 0, 0]
),
ToontownGlobals.TheBrrrgh: (
[35, -32, 6.2, 138, 0, 0],
[26, -105, 6.2, -339, 0, 0],
[-29, -139, 6.2, -385, 0, 0],
[-79, -123, 6.2, -369, 0, 0],
[-114, -86, 3, -54, 0, 0],
[-136, 9, 6.2, -125, 0, 0],
[-75, 92, 6.2, -187, 0, 0],
[-7, 75, 6.2, -187, 0, 0],
[-106, -42, 8.6, -111, 0, 0],
[-116, -44, 8.3, -20, 0, 0]
),
ToontownGlobals.MinniesMelodyland: (
[86, 44, -13.5, 121.1, 0, 0],
[88, -8, -13.5, 91, 0, 0],
[92, -76, -13.5, 62.5, 0, 0],
[53, -112, 6.5, 65.8, 0, 0],
[-69, -71, 6.5, -67.2, 0, 0],
[-75, 21, 6.5, -100.9, 0, 0],
[-21, 72, 6.5, -129.5, 0, 0],
[56, 72, 6.5, 138.2, 0, 0],
[-41, 47, 6.5, -98.9, 0, 0]
),
ToontownGlobals.DaisyGardens: (
[0, 0, 0, -10.5, 0, 0],
[76, 35, 0, -30.2, 0, 0],
[97, 106, 0, 51.4, 0, 0],
[51, 180, 10, 22.6, 0, 0],
[-14, 203, 10, 85.6, 0, 0],
[-58, 158, 10, -146.9, 0, 0],
[-86, 128, 0, -178.9, 0, 0],
[-64, 65, 0, 17.7, 0, 0],
[-13, 39, 0, -15.7, 0, 0],
[-12, 193, 0, -112.4, 0, 0],
[87, 128, 0, 45.4, 0, 0]
),
ToontownGlobals.DonaldsDreamland: (
[77, 91, 0, 124.4, 0, 0],
[29, 92, 0, -154.5, 0, 0],
[-28, 49, -16.4, -142, 0, 0],
[21, 40, -16, -65.1, 0, 0],
[48, 27, -15.4, -161, 0, 0],
[-2, -22, -15.2, -132.1, 0, 0],
[-92, -88, 0, -116.3, 0, 0],
[-56, -93, 0, -21.5, 0, 0],
[20, -88, 0, -123.4, 0, 0],
[76, -90, 0, 11, 0, 0]
),
ToontownGlobals.GoofySpeedway: (
[-0.7, 62, 0.08, 182, 0, 0],
[-1, -30, 0.06, 183, 0, 0],
[-13, -120, 0, 307, 0, 0],
[16.4, -120, 0, 65, 0, 0],
[-0.5, -90, 0, 182, 0, 0],
[-30, -25, -0.373, 326, 0, 0],
[29, -17, -0.373, 32, 0, 0]
),
ToontownGlobals.GolfZone: (
[-49.6, 102, 0, 162, 0, 0],
[-22.8, 36.6, 0, 157.5, 0, 0],
[40, 51, 0, 185, 0, 0],
[48.3, 122.2, 0, 192, 0, 0],
[106.3, 69.2, 0, 133, 0, 0],
[-81.5, 47.2, 0, 183, 0, 0],
[-80.5, -84.2, 0, 284, 0, 0],
[73, -111, 0, 354, 0, 0]
),
ToontownGlobals.OutdoorZone: (
[-165.8, 108, 0.025, 252, 0, 0],
[21, 130, 0.16, 170, 0, 0],
[93, 78.5, 0.23, 112, 0, 0],
[79, -1.6, 0.75, 163, 0, 0],
[10, 33, 5.32, 130.379, 0, 0],
[-200, -42, 0.025, 317.543, 0, 0],
[-21, -65, 0.335, -18, 0, 0],
[23, 68.5, 4.51, -22.808, 0, 0]
),
ToontownGlobals.Tutorial: (
[130.9, -8.6, -1.3, 105.5, 0, 0],
),
ToontownGlobals.SellbotHQ: (
[-15.1324, -197.522, -19.5944, 4.92024, 0, 0],
[35.9713, -193.266, -19.5944, 4.38194, 0, 0],
[136.858, -155.959, -0.139187, 88.4705, 0, 0],
[0.2818, -281.656, 0.883273, 355.735, 0, 0],
[53.7832, -160.498, -4.33266, 397.602, 0, 0],
[-55.1619, -184.358, -3.06033, 342.677, 0, 0]
),
ToontownGlobals.CashbotHQ: (
[102, -437, -23.439, 0, 0, 0],
[124, -437, -23.439, 0, 0, 0],
[110, -446, -23.439, 0, 0, 0],
[132, -446, -23.439, 0, 0, 0]
),
ToontownGlobals.LawbotHQ: (
[77.5, 129.13, -68.4, -166.6, 0, 0],
[-57.7, 80.75, -68.4, -139.2, 0, 0],
[203.3, 46.36, -68.4, -213.37, 0, 0],
[88.2, -336.52, -68.4, -720.4, 0, 0],
[232.77, -305.33, -68.4, -651, 0, 0],
[-20.16, -345.76, -68.4, -777.98, 0, 0]
),
ToontownGlobals.BossbotHQ: (
[65, 45, 0.025, 0, 0, 0],
[-0.045, 125.9, 0.025, 558, 0, 0],
[138,110, 0.025, 497, 0, 0],
[172, 3, 0.025, 791, 0, 0]
)
}
DefaultDropPoint = [0, 0, 0, 0, 0, 0]
hoodName2Id = {
'dd': ToontownGlobals.DonaldsDock,
'tt': ToontownGlobals.ToontownCentral,
'br': ToontownGlobals.TheBrrrgh,
'mm': ToontownGlobals.MinniesMelodyland,
'dg': ToontownGlobals.DaisyGardens,
'oz': ToontownGlobals.OutdoorZone,
'ff': ToontownGlobals.FunnyFarm,
'gs': ToontownGlobals.GoofySpeedway,
'dl': ToontownGlobals.DonaldsDreamland,
'bosshq': ToontownGlobals.BossbotHQ,
'sellhq': ToontownGlobals.SellbotHQ,
'cashhq': ToontownGlobals.CashbotHQ,
'lawhq': ToontownGlobals.LawbotHQ,
'gz': ToontownGlobals.GolfZone
}
hoodId2Name = {
ToontownGlobals.DonaldsDock: 'dd',
ToontownGlobals.ToontownCentral: 'tt',
ToontownGlobals.Tutorial: 'tt',
ToontownGlobals.TheBrrrgh: 'br',
ToontownGlobals.MinniesMelodyland: 'mm',
ToontownGlobals.DaisyGardens: 'dg',
ToontownGlobals.OutdoorZone: 'oz',
ToontownGlobals.FunnyFarm: 'ff',
ToontownGlobals.GoofySpeedway: 'gs',
ToontownGlobals.DonaldsDreamland: 'dl',
ToontownGlobals.BossbotHQ: 'bosshq',
ToontownGlobals.SellbotHQ: 'sellhq',
ToontownGlobals.CashbotHQ: 'cashhq',
ToontownGlobals.LawbotHQ: 'lawhq',
ToontownGlobals.GolfZone: 'gz'
}
dbgDropMode = 0
currentDropPoint = 0
def __init__(self, cr):
self.cr = cr
def getDropPoint(self, dropPointList):
if self.dbgDropMode == 0:
return random.choice(dropPointList)
else:
droppnt = self.currentDropPoint % len(dropPointList)
self.currentDropPoint = (self.currentDropPoint + 1) % len(dropPointList)
return dropPointList[droppnt]
def getAvailableZones(self):
if base.launcher == None:
return self.getZonesInPhase(4) + self.getZonesInPhase(6) + self.getZonesInPhase(8) + self.getZonesInPhase(9) + self.getZonesInPhase(10) + self.getZonesInPhase(11) + self.getZonesInPhase(12) + self.getZonesInPhase(13)
else:
zones = []
for phase in set(ToontownGlobals.phaseMap.values()):
if base.launcher.getPhaseComplete(phase):
zones = zones + self.getZonesInPhase(phase)
return zones
def getZonesInPhase(self, phase):
p = []
for i in ToontownGlobals.phaseMap.items():
if i[1] == phase:
p.append(i[0])
return p
def getPhaseFromHood(self, hoodId):
hoodId = ZoneUtil.getCanonicalHoodId(hoodId)
return ToontownGlobals.phaseMap[hoodId]
def getPlaygroundCenterFromId(self, hoodId):
dropPointList = self.dropPoints.get(hoodId, None)
if dropPointList:
return self.getDropPoint(dropPointList)
else:
self.notify.warning('getPlaygroundCenterFromId: No such hood name as: ' + str(hoodId))
return self.DefaultDropPoint
def getIdFromName(self, hoodName):
id = self.hoodName2Id.get(hoodName)
if id:
return id
else:
self.notify.error('No such hood name as: %s' % hoodName)
def getNameFromId(self, hoodId):
name = self.hoodId2Name.get(hoodId)
if name:
return name
else:
self.notify.error('No such hood id as: %s' % hoodId)
def getFullnameFromId(self, hoodId):
hoodId = ZoneUtil.getCanonicalZoneId(hoodId)
return ToontownGlobals.hoodNameMap[hoodId][-1]
def addLinkTunnelHooks(self, hoodPart, nodeList, currentZoneId):
tunnelOriginList = []
for i in nodeList:
linkTunnelNPC = i.findAllMatches('**/linktunnel*')
for p in xrange(linkTunnelNPC.getNumPaths()):
linkTunnel = linkTunnelNPC.getPath(p)
name = linkTunnel.getName()
nameParts = name.split('_')
hoodStr = nameParts[1]
zoneStr = nameParts[2]
hoodId = self.getIdFromName(hoodStr)
zoneId = int(zoneStr)
hoodId = ZoneUtil.getTrueZoneId(hoodId, currentZoneId)
zoneId = ZoneUtil.getTrueZoneId(zoneId, currentZoneId)
linkSphere = linkTunnel.find('**/tunnel_trigger')
if linkSphere.isEmpty():
linkSphere = linkTunnel.find('**/tunnel_sphere')
if not linkSphere.isEmpty():
cnode = linkSphere.node()
cnode.setName('tunnel_trigger_' + hoodStr + '_' + zoneStr)
cnode.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.GhostBitmask)
else:
linkSphere = linkTunnel.find('**/tunnel_trigger_' + hoodStr + '_' + zoneStr)
if linkSphere.isEmpty():
self.notify.error('tunnel_trigger not found')
tunnelOrigin = linkTunnel.find('**/tunnel_origin')
if tunnelOrigin.isEmpty():
self.notify.error('tunnel_origin not found')
tunnelOriginPlaceHolder = render.attachNewNode('toph_' + hoodStr + '_' + zoneStr)
tunnelOriginList.append(tunnelOriginPlaceHolder)
tunnelOriginPlaceHolder.setPos(tunnelOrigin.getPos(render))
tunnelOriginPlaceHolder.setHpr(tunnelOrigin.getHpr(render))
hood = base.localAvatar.cr.playGame.hood
if ZoneUtil.tutorialDict:
how = 'teleportIn'
tutorialFlag = 1
else:
how = 'tunnelIn'
tutorialFlag = 0
hoodPart.accept('enter' + linkSphere.getName(), hoodPart.handleEnterTunnel, [{'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': how,
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'tunnelOrigin': tunnelOriginPlaceHolder,
'tutorial': tutorialFlag}])
return tunnelOriginList
def extractGroupName(self, groupFullName):
return groupFullName.split(':', 1)[0]
def makeLinkTunnelName(self, hoodId, currentZone):
return '**/toph_' + self.getNameFromId(hoodId) + '_' + str(currentZone)
| apache-2.0 | -2,892,375,891,941,455,400 | 39.018237 | 228 | 0.478353 | false |
shiquanwang/numba | numba/control_flow/cfstats.py | 1 | 4105 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import nodes
from numba.reporting import getpos
class StatementDescr(object):
is_assignment = False
class LoopDescr(object):
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
class ExceptionDescr(object):
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
class NameAssignment(object):
is_assignment = True
def __init__(self, lhs, rhs, entry, assignment_node, warn_unused=True):
if not hasattr(lhs, 'cf_state'):
lhs.cf_state = set()
if not hasattr(lhs, 'cf_is_null'):
lhs.cf_is_null = False
self.lhs = lhs
self.rhs = rhs
self.assignment_node = assignment_node
self.entry = entry
self.pos = getpos(lhs)
self.refs = set()
self.is_arg = False
self.is_deletion = False
# NOTE: this is imperfect, since it means warnings are disabled for
# *all* definitions in the function...
self.entry.warn_unused = warn_unused
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
def infer_type(self, scope):
return self.rhs.infer_type(scope)
def type_dependencies(self, scope):
return self.rhs.type_dependencies(scope)
class AttributeAssignment(object):
"""
Assignment to some attribute. We need to detect assignments in the
constructor of extension types.
"""
def __init__(self, assmnt):
self.assignment_node = assmnt
self.lhs = assmnt.targets[0]
self.rhs = assmnt.value
class Argument(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
class PhiNode(nodes.Node):
def __init__(self, block, variable):
self.block = block
# Unrenamed variable. This will be replaced by the renamed version
self.variable = variable
self.type = None
# self.incoming_blocks = []
# Set of incoming variables
self.incoming = set()
self.phis = set()
self.assignment_node = self
@property
def entry(self):
return self.variable
def add_incoming_block(self, block):
self.incoming_blocks.append(block)
def add(self, block, assmnt):
if assmnt is not self:
self.phis.add((block, assmnt))
def __repr__(self):
lhs = self.variable.name
if self.variable.renamed_name:
lhs = self.variable.unmangled_name
incoming = ", ".join("var(%s, %s)" % (var_in.unmangled_name, var_in.type)
for var_in in self.incoming)
if self.variable.type:
type = str(self.variable.type)
else:
type = ""
return "%s %s = phi(%s)" % (type, lhs, incoming)
def find_incoming(self):
for parent_block in self.block.parents:
name = self.variable.name
incoming_var = parent_block.symtab.lookup_most_recent(name)
yield parent_block, incoming_var
class NameDeletion(NameAssignment):
def __init__(self, lhs, entry):
NameAssignment.__init__(self, lhs, lhs, entry)
self.is_deletion = True
class Uninitialized(object):
pass
class NameReference(object):
def __init__(self, node, entry):
if not hasattr(node, 'cf_state'):
node.cf_state = set()
self.node = node
self.entry = entry
self.pos = getpos(node)
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
| bsd-2-clause | -3,426,023,315,412,407,300 | 27.908451 | 81 | 0.607065 | false |
endrebak/epic | tests/run/test_merge_chip_and_input.py | 1 | 4225 | import pytest
import pandas as pd
import numpy as np
import logging
from io import StringIO
from joblib import delayed, Parallel
@pytest.fixture
def input_data():
pass
@pytest.fixture
def expected_result():
pass
def merge_chip_and_input(windows, nb_cpu):
"""Merge lists of chromosome bin df chromosome-wise.
Returns a list of dataframes, one per chromosome, with the collective count
per bin for all files.
Keyword Arguments:
windows -- OrderedDict where the keys are files, the values are lists of
dfs, one per chromosome.
nb_cpu -- cores to use
"""
windows = iter(windows)
merged = next(windows)
for chromosome_dfs in windows:
merged = merge_two_bin_dfs(merged, chromosome_dfs, nb_cpu)
return merged
# @pytest.mark.unit
# def test_merge_two_bin_files(sample1_dfs, sample2_dfs):
# """TODO: Need to test that the lists might not have the same/all chromosomes.
# It might be possible that there are no sig islands on one chromosome in one
# file, while there are in the others. Solve by taking in dict with chromos
# instead of list with files?
# You will probably be asked about a bug due to this some time.
# """
# print("Read run epic code. Begin there!\n" * 5)
# result = merge_chip_and_input([sample2_dfs, sample2_dfs], 1)
# print(result)
# assert 1
def merge_two_bin_dfs(sample1_dfs, sample2_dfs, nb_cpu):
merged_chromosome_dfs = Parallel(n_jobs=nb_cpu)(
delayed(_merge_two_bin_dfs)(df1, df2)
for df1, df2 in zip(sample1_dfs, sample2_dfs))
return merged_chromosome_dfs
def _merge_two_bin_dfs(df1, df2):
merged_df = df1.merge(df2, how="outer", on=["Chromosome", "Bin"]) #,
# suffixes=("_x", "_y"))
print(merged_df)
raise
merged_df = merged_df.fillna(0)
merged_df["Count"] = merged_df["Count_x"] + merged_df["Count_y"]
merged_df = merged_df.drop(["Count_x", "Count_y"], axis=1)
return merged_df
@pytest.fixture
def sample1_dfs():
return [pd.read_table(
StringIO(u"""
Count Chromosome Bin
1 chrM 400
1 chrM 2600
1 chrM 3600
1 chrM 3800
1 chrM 12800
1 chrM 14200"""),
sep="\s+",
header=0), pd.read_table(
StringIO(u"""Count Chromosome Bin
1 chrX 2820000
1 chrX 2854800
1 chrX 3001400
1 chrX 3354400
1 chrX 3489400
1 chrX 3560200
1 chrX 4011200
1 chrX 4644600
1 chrX 4653600
1 chrX 4793400
1 chrX 5136800
1 chrX 5572800
1 chrX 5589400
1 chrX 5792000
1 chrX 5961800
1 chrX 6951000
1 chrX 7125800
1 chrX 7199000
1 chrX 7443200
1 chrX 7606000
1 chrX 7627800
1 chrX 8035600
1 chrX 8073600
1 chrX 8367800
1 chrX 9021000
1 chrX 9472400
1 chrX 9620800
1 chrX 9652000
1 chrX 9801000
1 chrX 9953800"""),
sep="\s+",
header=0)]
@pytest.fixture
def sample2_dfs():
return [pd.read_table(
StringIO(u"""
Count Chromosome Bin
1 chrM 400
1 chrM 2600
1 chrM 3600
1 chrM 3800
1 chrM 12800
1 chrM 14200"""),
header=0,
sep="\s+", ), pd.read_table(
StringIO(u"""Count Chromosome Bin
1 chrX 2820000
1 chrX 2854800
1 chrX 3001400
1 chrX 3354400
1 chrX 3489400
1 chrX 3560200
1 chrX 4011200
1 chrX 4644600
1 chrX 4653600
1 chrX 4793400
1 chrX 5136800
1 chrX 5572800
1 chrX 5589400
1 chrX 5792000
1 chrX 5961800
1 chrX 6951000
1 chrX 7125800
1 chrX 7199000
1 chrX 7443200
1 chrX 7606000
1 chrX 7627800
1 chrX 8035600
1 chrX 8073600
1 chrX 8367800
1 chrX 9021000
1 chrX 9472400
1 chrX 9620800
1 chrX 9652000
1 chrX 9801000
1 chrX 9953800"""),
sep="\s+",
header=0)]
| mit | -4,803,652,179,347,965,000 | 23.005682 | 83 | 0.569704 | false |
pope/SublimeYetAnotherCodeSearch | tests/test_csearch.py | 1 | 2481 | import sublime
import os.path
import shutil
import textwrap
import time
import uuid
from YetAnotherCodeSearch.tests import CommandTestCase
_NEEDLE_IN_HAYSTACK = 'cc5b252b-e7fb-5145-bf8a-ed272e3aa7bf'
class CsearchCommandTest(CommandTestCase):
def setUp(self):
super(CsearchCommandTest, self).setUp()
if os.path.isfile(self.index):
return
self.window.run_command('cindex', {'index_project': True})
self._wait_for_status(self.view)
assert os.path.isfile(self.index)
def test_csearch_exists(self):
self.assertIsNotNone(shutil.which('csearch'))
def test_csearch(self):
results_view = self._search(_NEEDLE_IN_HAYSTACK)
expected = textwrap.dedent("""\
Searching for "{0}"
{1}/test_csearch.py:
12: _NEEDLE_IN_HAYSTACK = '{0}'
1 matches across 1 files
""").format(_NEEDLE_IN_HAYSTACK, self.project_path)
actual = results_view.substr(sublime.Region(0, results_view.size()))
self.assertEquals(expected, actual)
def test_csearch_no_matches(self):
query = str(uuid.uuid4())
results_view = self._search(query)
expected = textwrap.dedent("""\
Searching for "{0}"
No matches found
""").format(query, self.project_path)
actual = results_view.substr(sublime.Region(0, results_view.size()))
self.assertEquals(expected, actual)
def test_csearch_go_to_file(self):
results_view = self._search(_NEEDLE_IN_HAYSTACK)
pt = results_view.text_point(3, 10) # Line 4, 10 characters in
results_view.sel().clear()
results_view.sel().add(sublime.Region(pt))
self.window.run_command('code_search_results_go_to_file')
self.assertEquals('{0}/test_csearch.py'.format(self.project_path),
self.window.active_view().file_name())
def _wait_for_status(self, view):
max_iters = 10
while max_iters > 0 and view.get_status('YetAnotherCodeSearch') != '':
time.sleep(0.1)
max_iters -= 1
assert '' == view.get_status('YetAnotherCodeSearch')
def _search(self, query):
self.window.run_command('csearch', {'query': query})
results_view = next((view for view in self.window.views()
if view.name() == 'Code Search Results'))
self._wait_for_status(results_view)
return results_view
| mit | -3,462,537,003,884,839,400 | 32.527027 | 78 | 0.612253 | false |
awong1900/platformio | platformio/builder/scripts/frameworks/mbed.py | 1 | 7172 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
"""
mbed
The mbed framework The mbed SDK has been designed to provide enough
hardware abstraction to be intuitive and concise, yet powerful enough to
build complex projects. It is built on the low-level ARM CMSIS APIs,
allowing you to code down to the metal if needed. In addition to RTOS,
USB and Networking libraries, a cookbook of hundreds of reusable
peripheral and module libraries have been built on top of the SDK by
the mbed Developer Community.
http://mbed.org/
"""
import re
import xml.etree.ElementTree as ElementTree
from binascii import crc32
from os import walk
from os.path import basename, isfile, join, normpath
from SCons.Script import DefaultEnvironment, Exit
env = DefaultEnvironment()
BOARD_OPTS = env.get("BOARD_OPTIONS", {}).get("build", {})
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-mbed")
)
MBED_VARIANTS = {
"stm32f3discovery": "DISCO_F303VC",
"stm32f4discovery": "DISCO_F407VG",
"stm32f429discovery": "DISCO_F429ZI",
"blueboard_lpc11u24": "LPC11U24",
"dipcortexm0": "LPC11U24",
"seeeduinoArchPro": "ARCH_PRO",
"ubloxc027": "UBLOX_C027",
"lpc1114fn28": "LPC1114",
"lpc11u35": "LPC11U35_401",
"mbuino": "LPC11U24",
"nrf51_mkit": "NRF51822",
"seeedTinyBLE": "SEEED_TINY_BLE",
"redBearLab": "RBLAB_NRF51822",
"nrf51-dt": "NRF51_DK",
"redBearLabBLENano": "RBLAB_NRF51822",
"wallBotBLE": "NRF51822",
"frdm_kl25z": "KL25Z",
"frdm_kl46z": "KL46Z",
"frdm_k64f": "K64F",
"frdm_kl05z": "KL05Z",
"frdm_k20d50m": "K20D50M",
"frdm_k22f": "K22F"
}
MBED_LIBS_MAP = {
"dsp": {"ar": ["dsp", "cmsis_dsp"]},
"eth": {"ar": ["eth"], "deps": ["rtos"]},
"fat": {"ar": ["fat"]},
"rtos": {"ar": ["rtos", "rtx"]},
"usb": {"ar": ["USBDevice"]},
"usb_host": {"ar": ["USBHost"]}
}
def get_mbedlib_includes():
result = []
for lib in MBED_LIBS_MAP.keys():
includes = []
lib_dir = join(env.subst("$PLATFORMFW_DIR"), "libs", lib)
for _, _, files in walk(lib_dir):
for libfile in files:
if libfile.endswith(".h"):
includes.append(libfile)
result.append((lib, set(includes)))
return result
def get_used_mbedlibs():
re_includes = re.compile(r"^(#include\s+(?:\<|\")([^\r\n\"]+))",
re.M | re.I)
srcincs = []
for root, _, files in walk(env.get("PROJECTSRC_DIR")):
for pfile in files:
if not any([pfile.endswith(ext) for ext in (".h", ".c", ".cpp")]):
continue
with open(join(root, pfile)) as fp:
srcincs.extend([i[1] for i in re_includes.findall(fp.read())])
srcincs = set(srcincs)
result = {}
for libname, libincs in get_mbedlib_includes():
if libincs & srcincs and libname not in result:
result[libname] = MBED_LIBS_MAP[libname]
return result
def add_mbedlib(libname, libar):
if libar in env.get("LIBS"):
return
lib_dir = join(env.subst("$PLATFORMFW_DIR"), "libs", libname)
if not isfile(join(lib_dir, "TARGET_%s" % variant,
"TOOLCHAIN_GCC_ARM", "lib%s.a" % libar)):
Exit("Error: %s board doesn't support %s library!" %
(env.get("BOARD"), libname))
env.Append(
LIBPATH=[
join(env.subst("$PLATFORMFW_DIR"), "libs", libname,
"TARGET_%s" % variant, "TOOLCHAIN_GCC_ARM")
],
LIBS=[libar]
)
sysincdirs = (
"eth",
"include",
"ipv4",
"lwip-eth",
"lwip-sys"
)
for root, _, files in walk(lib_dir):
if (not any(f.endswith(".h") for f in files) and
basename(root) not in sysincdirs):
continue
var_dir = join("$BUILD_DIR", "FrameworkMbed%sInc%d" %
(libname.upper(), crc32(root)))
if var_dir in env.get("CPPPATH"):
continue
env.VariantDir(var_dir, root)
env.Append(CPPPATH=[var_dir])
def parse_eix_file(filename):
result = {}
paths = (
("CFLAGS", "./Target/Source/CC/Switch"),
("CXXFLAGS", "./Target/Source/CPPC/Switch"),
("CPPDEFINES", "./Target/Source/Symbols/Symbol"),
("FILES", "./Target/Files/File"),
("LINKFLAGS", "./Target/Source/LD/Switch"),
("OBJFILES", "./Target/Source/Addobjects/Addobject"),
("LIBPATH", "./Target/Linker/Librarypaths/Librarypath"),
("STDLIBS", "./Target/Source/Syslibs/Library"),
("LDSCRIPT_PATH", "./Target/Source/Scriptfile"),
("CPPPATH", "./Target/Compiler/Includepaths/Includepath")
)
tree = ElementTree.parse(filename)
for (key, path) in paths:
if key not in result:
result[key] = []
for node in tree.findall(path):
_nkeys = node.keys()
result[key].append(
node.get(_nkeys[0]) if len(_nkeys) == 1 else node.attrib)
return result
def get_build_flags(data):
flags = {}
cflags = set(data.get("CFLAGS", []))
cxxflags = set(data.get("CXXFLAGS", []))
cppflags = set(cflags & cxxflags)
flags['CPPFLAGS'] = list(cppflags)
flags['CXXFLAGS'] = list(cxxflags - cppflags)
flags['CFLAGS'] = list(cflags - cppflags)
return flags
board_type = env.subst("$BOARD")
variant = MBED_VARIANTS[
board_type] if board_type in MBED_VARIANTS else board_type.upper()
eixdata = parse_eix_file(
join(env.subst("$PLATFORMFW_DIR"), "variant", variant, "%s.eix" % variant))
build_flags = get_build_flags(eixdata)
variant_dir = join("$PLATFORMFW_DIR", "variant", variant)
env.Replace(
CPPFLAGS=build_flags.get("CPPFLAGS", []),
CFLAGS=build_flags.get("CFLAGS", []),
CXXFLAGS=build_flags.get("CXXFLAGS", []),
LINKFLAGS=eixdata.get("LINKFLAGS", []),
CPPDEFINES=[define for define in eixdata.get("CPPDEFINES", [])],
LDSCRIPT_PATH=normpath(
join(variant_dir, eixdata.get("LDSCRIPT_PATH")[0]))
)
# Hook for K64F and K22F
if board_type in ("frdm_k22f", "frdm_k64f"):
env.Append(
LINKFLAGS=["-Wl,--start-group"]
)
for lib_path in eixdata.get("CPPPATH"):
_vdir = join("$BUILD_DIR", "FrameworkMbedInc%d" % crc32(lib_path))
env.VariantDir(_vdir, join(variant_dir, lib_path))
env.Append(CPPPATH=[_vdir])
env.Append(
LIBPATH=[join(variant_dir, lib_path)
for lib_path in eixdata.get("LIBPATH", [])
if lib_path.startswith("mbed")]
)
#
# Target: Build mbed Library
#
libs = [l for l in eixdata.get("STDLIBS", []) if l not in env.get("LIBS")]
libs.extend(["mbed", "c", "gcc"])
libs.append(env.Library(
join("$BUILD_DIR", "FrameworkMbed"),
[join(variant_dir, f)
for f in eixdata.get("OBJFILES", [])]
))
env.Append(LIBS=libs)
for _libname, _libdata in get_used_mbedlibs().iteritems():
for _libar in _libdata['ar']:
add_mbedlib(_libname, _libar)
if "deps" not in _libdata:
continue
for libdep in _libdata['deps']:
for _libar in MBED_LIBS_MAP[libdep]['ar']:
add_mbedlib(libdep, _libar)
| mit | -7,872,972,686,760,434,000 | 29.134454 | 79 | 0.591049 | false |
cosmicAsymmetry/zulip | zerver/tests/test_signup.py | 1 | 51499 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from mock import patch
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm
from zerver.views import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.models import (
get_realm_by_string_id, get_prereg_user_by_email, get_user_profile_by_email,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from six.moves import urllib
from six.moves import range
import six
from typing import Any, Text
import os
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
# e.g. self.client_post(url) if method is "post"
response = getattr(self, method)(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/json/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent"],
}
# Add all files in 'templates/zerver/help' directory (except for 'main.html' and
# 'index.md') to `get_urls['200']` list.
for doc in os.listdir('./templates/zerver/help'):
if doc not in {'main.html', 'index.md', 'include'}:
get_urls[200].append('/help/' + os.path.splitext(doc)[0]) # Strip the extension.
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/exists",
"/json/subscriptions/property",
"/json/fetch_api_key",
"/json/users/me/pointer",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("client_get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("client_post", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("client_put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm_by_string_id("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test", "test")
user_profile = get_user_profile_by_email("[email protected]")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("[email protected]", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = '[email protected]'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('[email protected]')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/[email protected]')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("[email protected]")
user_profile = get_user_profile_by_email('[email protected]')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("[email protected]", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("[email protected]", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm_by_string_id("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('[email protected]')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('[email protected]')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("[email protected]")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("[email protected]")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "[email protected]"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams):
# type: (str, List[Text]) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
# type: (List[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('[email protected]')
invitees = ['[email protected]', '[email protected]']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client_post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("[email protected]")
invitee = "[email protected]"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("[email protected]")
email = "[email protected]"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("[email protected]")
email = "[email protected]"
email2 = "[email protected]"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("[email protected]")
user_profile = get_user_profile_by_email("[email protected]")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("[email protected]", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("[email protected]", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "[email protected]"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("[email protected]")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""[email protected], [email protected],
[email protected]
[email protected]""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%[email protected]" % (user,)))
self.check_sent_emails(["[email protected]", "[email protected]",
"[email protected]", "[email protected]"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("[email protected]")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "[email protected]"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "[email protected]"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("[email protected]")
self.assert_json_error(self.invite("[email protected]", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("[email protected]")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "[email protected]",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="[email protected]"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("[email protected]")
existing = ["[email protected]", "[email protected]"]
new = ["[email protected]", "[email protected]"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('[email protected]')
self.assertEqual(prereg_user.email, '[email protected]')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("[email protected]")
external_address = "[email protected]"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("[email protected]")
external_address = "[email protected]"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("[email protected]")
invitee = "[email protected]"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("[email protected]", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("[email protected]")
user = get_user_profile_by_email('[email protected]')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "[email protected]"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('[email protected]')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "[email protected]"
self.login(current_user_email)
invitee = "[email protected]"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=datetime.datetime.utcnow())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "[email protected]"
self.email2 = "[email protected]"
self.email3 = "[email protected]"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("[email protected]")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="[email protected]")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "[email protected]"
user_profile = get_user_profile_by_email("[email protected]")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "[email protected]"
user_profile = get_user_profile_by_email("[email protected]")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="[email protected]")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = 'test.com'
email = "[email protected]"
realm = get_realm_by_string_id('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_with_subdomain(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = "test.com"
email = "[email protected]"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm_by_string_id('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "[email protected]"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
username = "user1"
password = "test"
domain = "test.com"
email = "[email protected]"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'mit': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
username = "newguy"
email = "[email protected]"
password = "newpassword"
realm = get_realm_by_string_id('zulip')
domain = realm.domain
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s@%s" % (username, domain)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(username, password, domain)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_unique_completely_open_domain(self):
# type: () -> None
username = "user1"
password = "test"
email = "[email protected]"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
realm = get_realm_by_string_id('mit')
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
username = "user1"
password = "test"
email = "[email protected]"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': '[email protected]'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': '[email protected]'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': '[email protected]'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
username = "newuser"
password = "testing"
domain = "zulip.com"
email = "[email protected]"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"[email protected]"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"[email protected]"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
username = "sipbtest"
password = "test"
domain = "mit.edu"
email = "[email protected]"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = '[email protected]'
self.login(email)
user = get_user_profile_by_email('[email protected]')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('[email protected]')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = '[email protected]'
self.login(email)
user = get_user_profile_by_email('[email protected]')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('[email protected]')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = '[email protected]'
user_2 = get_user_profile_by_email('[email protected]')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
| apache-2.0 | -5,722,122,378,230,343,000 | 42.559222 | 114 | 0.574786 | false |
rlpy/rlpy | rlpy/Representations/LocalBases.py | 1 | 7491 | """
Representations which use local bases function (e.g. kernels) distributed
in the statespace according to some scheme (e.g. grid, random, on previous
samples)
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
from .Representation import Representation
import numpy as np
from rlpy.Tools.GeneralTools import addNewElementForAllActions
import matplotlib.pyplot as plt
try:
from .kernels import batch
except ImportError:
from .slow_kernels import batch
print("C-Extensions for kernels not available, expect slow runtime")
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
class LocalBases(Representation):
"""
abstract base class for representations that use local basis functions
"""
#: centers of bases
centers = None
#: widths of bases
widths = None
def __init__(self, domain, kernel, normalization=False, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param normalization: (Boolean) If true, normalize feature vector so
that sum( phi(s) ) = 1.
Associates a kernel function with each
"""
self.kernel = batch[kernel.__name__]
self.normalization = normalization
self.centers = np.zeros((0, domain.statespace_limits.shape[0]))
self.widths = np.zeros((0, domain.statespace_limits.shape[0]))
super(LocalBases, self).__init__(domain, seed=seed)
def phi_nonTerminal(self, s):
v = self.kernel(s, self.centers, self.widths)
if self.normalization and not v.sum() == 0.:
# normalize such that each vector has a l1 norm of 1
v /= v.sum()
return v
def plot_2d_feature_centers(self, d1=None, d2=None):
"""
:param d1: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
:param d2: 1 (of 2 possible) indices of dimensions to plot; ignore all
others, purely visual.
Phe centers of all features in dimension d1 and d2.
If no dimensions are specified, the first two continuous dimensions
are shown.
"""
if d1 is None and d2 is None:
# just take the first two dimensions
d1, d2 = self.domain.continuous_dims[:2]
plt.figure("Feature Dimensions {} and {}".format(d1, d2))
for i in range(self.centers.shape[0]):
plt.plot([self.centers[i, d1]],
[self.centers[i, d2]], "r", marker="x")
plt.draw()
class NonparametricLocalBases(LocalBases):
def __init__(self, domain, kernel,
max_similarity=0.9, resolution=5, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param max_similarity: threshold to allow feature to be added to
representation. Larger max_similarity makes it \"easier\" to add
more features by permitting larger values of phi(s) before
discarding. (An existing feature function in phi() with large value
at phi(s) implies that it is very representative of the true
function at *s*. i.e., the value of a feature in phi(s) is
inversely related to the \"similarity\" of a potential new feature.
:param resolution: to be used by the ``kernel()`` function, see parent.
Determines *width* of basis functions, eg sigma in Gaussian basis.
"""
self.max_similarity = max_similarity
self.common_width = old_div((domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0]), resolution)
self.features_num = 0
super(
NonparametricLocalBases,
self).__init__(
domain,
kernel,
**kwargs)
def pre_discover(self, s, terminal, a, sn, terminaln):
norm = self.normalization
expanded = 0
self.normalization = False
if not terminal:
phi_s = self.phi_nonTerminal(s)
if np.all(phi_s < self.max_similarity):
self._add_feature(s)
expanded += 1
if not terminaln:
phi_s = self.phi_nonTerminal(sn)
if np.all(phi_s < self.max_similarity):
self._add_feature(sn)
expanded += 1
self.normalization = norm
return expanded
def _add_feature(self, center):
self.features_num += 1
self.centers = np.vstack((self.centers, center))
self.widths = np.vstack((self.widths, self.common_width))
# TODO if normalized, use Q estimate for center to fill weight_vec
new = np.zeros((self.domain.actions_num, 1))
self.weight_vec = addNewElementForAllActions(
self.weight_vec,
self.domain.actions_num,
new)
class RandomLocalBases(LocalBases):
def __init__(self, domain, kernel, num=100, resolution_min=5,
resolution_max=None, seed=1, **kwargs):
"""
:param domain: domain to learn on.
:param kernel: function handle to use for kernel function evaluations.
:param num: Fixed number of feature (kernel) functions to use in
EACH dimension. (for a total of features_num=numDims * num)
:param resolution_min: resolution selected uniform random, lower bound.
:param resolution_max: resolution selected uniform random, upper bound.
:param seed: the random seed to use when scattering basis functions.
Randomly scatter ``num`` feature functions throughout the domain, with
sigma / noise parameter selected uniform random between
``resolution_min`` and ``resolution_max``. NOTE these are
sensitive to the choice of coordinate (scale with coordinate units).
"""
self.features_num = num
self.dim_widths = (domain.statespace_limits[:, 1]
- domain.statespace_limits[:, 0])
self.resolution_max = resolution_max
self.resolution_min = resolution_min
super(
RandomLocalBases,
self).__init__(
domain,
kernel,
seed=seed,
**kwargs)
self.centers = np.zeros((num, len(self.dim_widths)))
self.widths = np.zeros((num, len(self.dim_widths)))
self.init_randomization()
def init_randomization(self):
for i in range(self.features_num):
for d in range(len(self.dim_widths)):
self.centers[i, d] = self.random_state.uniform(
self.domain.statespace_limits[d, 0],
self.domain.statespace_limits[d, 1])
self.widths[i, d] = self.random_state.uniform(
old_div(self.dim_widths[d], self.resolution_max),
old_div(self.dim_widths[d], self.resolution_min))
| bsd-3-clause | 6,855,771,218,240,773,000 | 39.058824 | 80 | 0.606595 | false |
kavigupta/61a-analysis | src/analytics.py | 1 | 4920 | """
A module containing a variety of functions for analyzing the data. This is supposed to be more data
specific than statistics.
"""
import numpy as np
from tools import cached_property
def compensate_for_grader_means(evals, z_thresh=1):
"""
Compensates for grader means by subtracting each grader's average grades per problem. Eliminates
individuals for whom the graders are unusual.
"""
if not evals.evaluation_for(list(evals.emails)[0]).means_need_compensation:
return evals
problematic = set(_identify_problematic_ranges(evals, z_thresh))
filt = evals.remove(problematic)
zeroed = filt.zero_meaned()
return zeroed
class ExamPair:
"""
Structure representing a correlation between exam scores, as well as metadata on location.
"""
def __init__(self, first, second, are_time_adjacent, are_space_adjacent, are_same_room):
self.are_time_adjacent = are_time_adjacent
self.first = first
self.second = second
self.are_space_adjacent = are_space_adjacent
self.are_same_room = are_same_room
@cached_property
def correlation(self):
"""
The correlation between the two exam's rubric items
"""
return self.first.correlation(self.second)
@cached_property
def abs_score_diff(self):
"""
The absolute difference between the exam scores
"""
return abs(self.first.score - self.second.score)
def __repr__(self):
return "ExamPair(%s, %s, %r, %r, %r)" % tuple(self)
def __hash__(self):
return hash((hash(self.first) + hash(self.second), tuple(self)[2:]))
def __eq__(self, other):
align = self.first == other.first and self.second == other.second
mis_align = self.first == other.second and self.second == other.first
if not align and not mis_align:
return False
return tuple(self)[2:] == tuple(other)[2:]
def __iter__(self):
return iter((self.first,
self.second,
self.are_time_adjacent,
self.are_space_adjacent,
self.are_same_room))
def all_pairs(graded_exam, seating_chart, time_delta, progress, require_same_room,
require_not_time_adj, adjacency_type):
"""
Yields an iterable of all pairs between individuals.
"""
if require_same_room:
for _, in_room in seating_chart.emails_by_room:
yield from _pairs_per_individual(graded_exam, seating_chart, time_delta, progress,
in_room, True, require_not_time_adj, adjacency_type)
else:
emails = list(graded_exam.emails)
yield from _pairs_per_individual(graded_exam, seating_chart, time_delta, progress,
emails, False, require_not_time_adj, adjacency_type)
def _pairs_per_individual(graded_exam, seating_chart, time_delta, progress, emails, known_same_room,
require_not_time_adj, adjacency_type):
p_bar = progress(len(emails))
for index_x, email_x in enumerate(emails):
p_bar.update(index_x)
if email_x not in graded_exam.emails:
continue
eval_x = graded_exam.evaluation_for(email_x)
if not known_same_room:
room_x = seating_chart.room_for(email_x)
for email_y in emails[index_x+1:]:
if email_y not in graded_exam.emails:
continue
if not known_same_room:
same_room = room_x == seating_chart.room_for(email_y)
else:
same_room = True
time_adjacent = abs(graded_exam.time_diff(email_x, email_y)) <= time_delta
if require_not_time_adj and time_adjacent:
continue
yield ExamPair(eval_x,
graded_exam.evaluation_for(email_y),
time_adjacent,
seating_chart.are_adjacent(email_x, email_y, adjacency_type),
same_room)
def _unusualness(grader, question):
"""
Get the unusualness of a grader with respect to a graded question; i.e., the average of the
z scores from the overall mean for each rubric item.
"""
overall_mean = question.mean_score
overall_std = question.std_score
by_grader = question.for_grader(grader)
return np.mean((np.abs(by_grader.mean_score - overall_mean) / overall_std).rubric_items)
def _identify_problematic_ranges(evals, z_thresh):
"""
Ouptuts an iterable of emails for which at least one grader had an unusualness greater than the
z threshold.
"""
for _, graded_question in evals:
for grader in graded_question.graders:
if _unusualness(grader, graded_question) > z_thresh:
yield from graded_question.for_grader(grader).emails
| gpl-3.0 | -3,319,539,819,622,582,300 | 40.344538 | 100 | 0.607927 | false |
Vishakha1990/Lambdas | testing/digitalocean/test.py | 1 | 2597 | #!/usr/bin/python
import os, requests, time, json, argparse
API = "https://api.digitalocean.com/v2/droplets"
DROPLET_NAME = "ol-tester"
HEADERS = {
"Authorization": "Bearer "+os.environ['TOKEN'],
"Content-Type": "application/json"
}
def post(args):
r = requests.post(API, data=args, headers=HEADERS)
return r.json()
def get(args):
r = requests.get(API, data=args, headers=HEADERS)
return r.json()
def start():
r = requests.get("https://api.digitalocean.com/v2/account/keys", headers=HEADERS)
keys = map(lambda row: row['id'], r.json()['ssh_keys'])
args = {
"name":DROPLET_NAME,
"region":"nyc2",
"size":"512mb",
"image":"ubuntu-14-04-x64",
"ssh_keys":keys
}
r = requests.post(API, data=json.dumps(args), headers=HEADERS)
return r.json()
def kill():
args = {}
droplets = get(args)['droplets']
for d in droplets:
if d['name'] == DROPLET_NAME:
print 'Deleting %s (%d)' % (d['name'], d['id'])
print requests.delete(API+'/'+str(d['id']), headers=HEADERS)
def lookup(droplet_id):
r = requests.get(API+'/'+str(droplet_id), headers=HEADERS)
return r.json()['droplet']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--quickstart', default=False, action='store_true')
args = parser.parse_args()
global TEST_SCRIPT
if args.quickstart:
TEST_SCRIPT = "qs_test.sh"
else:
TEST_SCRIPT = "test.sh"
# cleanup just in case
kill()
# create new droplet and wait for it
droplet = start()['droplet']
print droplet
while True:
droplet = lookup(droplet['id'])
# status
s = droplet['status']
assert(s in ['active', 'new'])
# addr
ip = None
for addr in droplet["networks"]["v4"]:
if addr["type"] == "public":
ip = addr["ip_address"]
print 'STATUS: %s, IP: %s' % (str(s), str(ip))
if s == 'active' and ip != None:
break
time.sleep(3)
time.sleep(30) # give SSH some time
scp = 'scp -o "StrictHostKeyChecking no" %s root@%s:/tmp' % (TEST_SCRIPT, ip)
print 'RUN ' + scp
rv = os.system(scp)
assert(rv == 0)
cmds = 'bash /tmp/%s' % TEST_SCRIPT
ssh = 'echo "<CMDS>" | ssh -o "StrictHostKeyChecking no" root@<IP>'
ssh = ssh.replace('<CMDS>', cmds).replace('<IP>', ip)
print 'RUN ' + ssh
rv = os.system(ssh)
assert(rv == 0)
# make sure we cleanup everything!
kill()
if __name__ == '__main__':
main()
| apache-2.0 | 2,783,647,885,308,764,000 | 24.97 | 85 | 0.560647 | false |
klmitch/nova | nova/tests/functional/compute/test_resource_tracker.py | 1 | 30750 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import fixtures
import mock
import os_resource_classes as orc
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
import yaml
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conf
from nova import context
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.virt import driver as virt_driver
CONF = conf.CONF
VCPU = orc.VCPU
MEMORY_MB = orc.MEMORY_MB
DISK_GB = orc.DISK_GB
COMPUTE_HOST = 'compute-host'
class IronicResourceTrackerTest(test.TestCase):
"""Tests the behaviour of the resource tracker with regards to the
transitional period between adding support for custom resource classes in
the placement API and integrating inventory and allocation records for
Ironic baremetal nodes with those custom resource classes.
"""
FLAVOR_FIXTURES = {
'CUSTOM_SMALL_IRON': objects.Flavor(
name='CUSTOM_SMALL_IRON',
flavorid=42,
vcpus=4,
memory_mb=4096,
root_gb=1024,
swap=0,
ephemeral_gb=0,
extra_specs={},
),
'CUSTOM_BIG_IRON': objects.Flavor(
name='CUSTOM_BIG_IRON',
flavorid=43,
vcpus=16,
memory_mb=65536,
root_gb=1024,
swap=0,
ephemeral_gb=0,
extra_specs={},
),
}
COMPUTE_NODE_FIXTURES = {
uuids.cn1: objects.ComputeNode(
uuid=uuids.cn1,
hypervisor_hostname='cn1',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=4,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=4096,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=1024,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
uuids.cn2: objects.ComputeNode(
uuid=uuids.cn2,
hypervisor_hostname='cn2',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=4,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=4096,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=1024,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
uuids.cn3: objects.ComputeNode(
uuid=uuids.cn3,
hypervisor_hostname='cn3',
hypervisor_type='ironic',
hypervisor_version=0,
cpu_info="",
host=COMPUTE_HOST,
vcpus=16,
vcpus_used=0,
cpu_allocation_ratio=1.0,
memory_mb=65536,
memory_mb_used=0,
ram_allocation_ratio=1.0,
local_gb=2048,
local_gb_used=0,
disk_allocation_ratio=1.0,
),
}
INSTANCE_FIXTURES = {
uuids.instance1: objects.Instance(
uuid=uuids.instance1,
flavor=FLAVOR_FIXTURES['CUSTOM_SMALL_IRON'],
vm_state=vm_states.BUILDING,
task_state=task_states.SPAWNING,
power_state=power_state.RUNNING,
project_id='project',
user_id=uuids.user,
),
}
def _set_client(self, client):
"""Set up embedded report clients to use the direct one from the
interceptor.
"""
self.report_client = client
self.rt.reportclient = client
def setUp(self):
super(IronicResourceTrackerTest, self).setUp()
self.flags(
reserved_host_memory_mb=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0,
)
self.ctx = context.RequestContext('user', 'project')
driver = mock.MagicMock(autospec=virt_driver.ComputeDriver)
driver.node_is_available.return_value = True
def fake_upt(provider_tree, nodename, allocations=None):
inventory = {
'CUSTOM_SMALL_IRON': {
'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
},
}
provider_tree.update_inventory(nodename, inventory)
driver.update_provider_tree.side_effect = fake_upt
self.driver_mock = driver
self.rt = resource_tracker.ResourceTracker(COMPUTE_HOST, driver)
self.instances = self.create_fixtures()
def create_fixtures(self):
for flavor in self.FLAVOR_FIXTURES.values():
# Clone the object so the class variable isn't
# modified by reference.
flavor = flavor.obj_clone()
flavor._context = self.ctx
flavor.obj_set_defaults()
flavor.create()
# We create some compute node records in the Nova cell DB to simulate
# data before adding integration for Ironic baremetal nodes with the
# placement API...
for cn in self.COMPUTE_NODE_FIXTURES.values():
# Clone the object so the class variable isn't
# modified by reference.
cn = cn.obj_clone()
cn._context = self.ctx
cn.obj_set_defaults()
cn.create()
instances = {}
for instance in self.INSTANCE_FIXTURES.values():
# Clone the object so the class variable isn't
# modified by reference.
instance = instance.obj_clone()
instance._context = self.ctx
instance.obj_set_defaults()
instance.create()
instances[instance.uuid] = instance
return instances
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
def test_node_stats_isolation(self):
"""Regression test for bug 1784705 introduced in Ocata.
The ResourceTracker.stats field is meant to track per-node stats
so this test registers three compute nodes with a single RT where
each node has unique stats, and then makes sure that after updating
usage for an instance, the nodes still have their unique stats and
nothing is leaked from node to node.
"""
self.useFixture(func_fixtures.PlacementFixture())
# Before the resource tracker is "initialized", we shouldn't have
# any compute nodes or stats in the RT's cache...
self.assertEqual(0, len(self.rt.compute_nodes))
self.assertEqual(0, len(self.rt.stats))
# Now "initialize" the resource tracker. This is what
# nova.compute.manager.ComputeManager does when "initializing" the
# nova-compute service. Do this in a predictable order so cn1 is
# first and cn3 is last.
for cn in sorted(self.COMPUTE_NODE_FIXTURES.values(),
key=lambda _cn: _cn.hypervisor_hostname):
nodename = cn.hypervisor_hostname
# Fake that each compute node has unique extra specs stats and
# the RT makes sure those are unique per node.
stats = {'node:%s' % nodename: nodename}
self.driver_mock.get_available_resource.return_value = {
'hypervisor_hostname': nodename,
'hypervisor_type': 'ironic',
'hypervisor_version': 0,
'vcpus': cn.vcpus,
'vcpus_used': cn.vcpus_used,
'memory_mb': cn.memory_mb,
'memory_mb_used': cn.memory_mb_used,
'local_gb': cn.local_gb,
'local_gb_used': cn.local_gb_used,
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
}
self.rt.update_available_resource(self.ctx, nodename)
self.assertEqual(3, len(self.rt.compute_nodes))
self.assertEqual(3, len(self.rt.stats))
def _assert_stats():
# Make sure each compute node has a unique set of stats and
# they don't accumulate across nodes.
for _cn in self.rt.compute_nodes.values():
node_stats_key = 'node:%s' % _cn.hypervisor_hostname
self.assertIn(node_stats_key, _cn.stats)
node_stat_count = 0
for stat in _cn.stats:
if stat.startswith('node:'):
node_stat_count += 1
self.assertEqual(1, node_stat_count, _cn.stats)
_assert_stats()
# Now "spawn" an instance to the first compute node by calling the
# RT's instance_claim().
cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
cn1_nodename = cn1_obj.hypervisor_hostname
inst = self.instances[uuids.instance1]
with self.rt.instance_claim(self.ctx, inst, cn1_nodename, {}):
_assert_stats()
class TestUpdateComputeNodeReservedAndAllocationRatio(
integrated_helpers.ProviderUsageBaseTestCase):
"""Tests reflecting reserved and allocation ratio inventory from
nova-compute to placement
"""
compute_driver = 'fake.FakeDriver'
@staticmethod
def _get_reserved_host_values_from_config():
return {
'VCPU': CONF.reserved_host_cpus,
'MEMORY_MB': CONF.reserved_host_memory_mb,
'DISK_GB': compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb)
}
def _assert_reserved_inventory(self, inventories):
reserved = self._get_reserved_host_values_from_config()
for rc, res in reserved.items():
self.assertIn('reserved', inventories[rc])
self.assertEqual(res, inventories[rc]['reserved'],
'Unexpected resource provider inventory '
'reserved value for %s' % rc)
def test_update_inventory_reserved_and_allocation_ratio_from_conf(self):
# Start a compute service which should create a corresponding resource
# provider in the placement service.
compute_service = self._start_compute('fake-host')
# Assert the compute node resource provider exists in placement with
# the default reserved and allocation ratio values from config.
rp_uuid = self._get_provider_uuid_by_host('fake-host')
inventories = self._get_provider_inventory(rp_uuid)
# The default allocation ratio config values are all 0.0 and get
# defaulted to real values in the ComputeNode object, so we need to
# check our defaults against what is in the ComputeNode object.
ctxt = context.get_admin_context()
# Note that the CellDatabases fixture usage means we don't need to
# target the context to cell1 even though the compute_nodes table is
# in the cell1 database.
cn = objects.ComputeNode.get_by_uuid(ctxt, rp_uuid)
ratios = {
'VCPU': cn.cpu_allocation_ratio,
'MEMORY_MB': cn.ram_allocation_ratio,
'DISK_GB': cn.disk_allocation_ratio
}
for rc, ratio in ratios.items():
self.assertIn(rc, inventories)
self.assertIn('allocation_ratio', inventories[rc])
self.assertEqual(ratio, inventories[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
self._assert_reserved_inventory(inventories)
# Now change the configuration values, restart the compute service,
# and ensure the changes are reflected in the resource provider
# inventory records. We use 2.0 since disk_allocation_ratio defaults
# to 1.0.
self.flags(cpu_allocation_ratio=2.0)
self.flags(ram_allocation_ratio=2.0)
self.flags(disk_allocation_ratio=2.0)
self.flags(reserved_host_cpus=2)
self.flags(reserved_host_memory_mb=1024)
self.flags(reserved_host_disk_mb=8192)
self.restart_compute_service(compute_service)
# The ratios should now come from config overrides rather than the
# defaults in the ComputeNode object.
ratios = {
'VCPU': CONF.cpu_allocation_ratio,
'MEMORY_MB': CONF.ram_allocation_ratio,
'DISK_GB': CONF.disk_allocation_ratio
}
attr_map = {
'VCPU': 'cpu',
'MEMORY_MB': 'ram',
'DISK_GB': 'disk',
}
cn = objects.ComputeNode.get_by_uuid(ctxt, rp_uuid)
inventories = self._get_provider_inventory(rp_uuid)
for rc, ratio in ratios.items():
# Make sure the config is what we expect.
self.assertEqual(2.0, ratio,
'Unexpected config allocation ratio for %s' % rc)
# Make sure the values in the DB are updated.
self.assertEqual(
ratio, getattr(cn, '%s_allocation_ratio' % attr_map[rc]),
'Unexpected ComputeNode allocation ratio for %s' % rc)
# Make sure the values in placement are updated.
self.assertEqual(ratio, inventories[rc]['allocation_ratio'],
'Unexpected resource provider inventory '
'allocation ratio for %s' % rc)
# The reserved host values should also come from config.
self._assert_reserved_inventory(inventories)
def test_allocation_ratio_create_with_initial_allocation_ratio(self):
# The xxx_allocation_ratio is set to None by default, and we use
# 16.1/1.6/1.1 since disk_allocation_ratio defaults to 16.0/1.5/1.0.
self.flags(initial_cpu_allocation_ratio=16.1)
self.flags(initial_ram_allocation_ratio=1.6)
self.flags(initial_disk_allocation_ratio=1.1)
# Start a compute service which should create a corresponding resource
# provider in the placement service.
self._start_compute('fake-host')
# Assert the compute node resource provider exists in placement with
# the default reserved and allocation ratio values from config.
rp_uuid = self._get_provider_uuid_by_host('fake-host')
inventories = self._get_provider_inventory(rp_uuid)
ctxt = context.get_admin_context()
# Note that the CellDatabases fixture usage means we don't need to
# target the context to cell1 even though the compute_nodes table is
# in the cell1 database.
cn = objects.ComputeNode.get_by_uuid(ctxt, rp_uuid)
ratios = {
'VCPU': cn.cpu_allocation_ratio,
'MEMORY_MB': cn.ram_allocation_ratio,
'DISK_GB': cn.disk_allocation_ratio
}
initial_ratio_conf = {
'VCPU': CONF.initial_cpu_allocation_ratio,
'MEMORY_MB': CONF.initial_ram_allocation_ratio,
'DISK_GB': CONF.initial_disk_allocation_ratio
}
for rc, ratio in ratios.items():
self.assertIn(rc, inventories)
self.assertIn('allocation_ratio', inventories[rc])
# Check the allocation_ratio values come from the new
# CONF.initial_xxx_allocation_ratio
self.assertEqual(initial_ratio_conf[rc], ratio,
'Unexpected allocation ratio for %s' % rc)
# Check the initial allocation ratio is updated to inventories
self.assertEqual(ratio, inventories[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
def test_allocation_ratio_overwritten_from_config(self):
# NOTE(yikun): This test case includes below step:
# 1. Overwrite the allocation_ratio via the placement API directly -
# run the RT.update_available_resource periodic and assert the
# allocation ratios are not overwritten from config.
#
# 2. Set the CONF.*_allocation_ratio, run the periodic, and assert
# that the config overwrites what was set via the placement API.
compute_service = self._start_compute('fake-host')
rp_uuid = self._get_provider_uuid_by_host('fake-host')
ctxt = context.get_admin_context()
rt = compute_service.manager.rt
inv = self.placement.get(
'/resource_providers/%s/inventories' % rp_uuid).body
ratios = {'VCPU': 16.1, 'MEMORY_MB': 1.6, 'DISK_GB': 1.1}
for rc, ratio in ratios.items():
inv['inventories'][rc]['allocation_ratio'] = ratio
# Overwrite the allocation_ratio via the placement API directly
self._update_inventory(rp_uuid, inv)
inv = self._get_provider_inventory(rp_uuid)
# Check inventories is updated to ratios
for rc, ratio in ratios.items():
self.assertIn(rc, inv)
self.assertIn('allocation_ratio', inv[rc])
self.assertEqual(ratio, inv[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
# Make sure xxx_allocation_ratio is None by default
self.assertIsNone(CONF.cpu_allocation_ratio)
self.assertIsNone(CONF.ram_allocation_ratio)
self.assertIsNone(CONF.disk_allocation_ratio)
# run the RT.update_available_resource periodic
rt.update_available_resource(ctxt, 'fake-host')
# assert the allocation ratios are not overwritten from config
inv = self._get_provider_inventory(rp_uuid)
for rc, ratio in ratios.items():
self.assertIn(rc, inv)
self.assertIn('allocation_ratio', inv[rc])
self.assertEqual(ratio, inv[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
# set the CONF.*_allocation_ratio
self.flags(cpu_allocation_ratio=15.9)
self.flags(ram_allocation_ratio=1.4)
self.flags(disk_allocation_ratio=0.9)
# run the RT.update_available_resource periodic
rt.update_available_resource(ctxt, 'fake-host')
inv = self._get_provider_inventory(rp_uuid)
ratios = {
'VCPU': CONF.cpu_allocation_ratio,
'MEMORY_MB': CONF.ram_allocation_ratio,
'DISK_GB': CONF.disk_allocation_ratio
}
# assert that the config overwrites what was set via the placement API.
for rc, ratio in ratios.items():
self.assertIn(rc, inv)
self.assertIn('allocation_ratio', inv[rc])
self.assertEqual(ratio, inv[rc]['allocation_ratio'],
'Unexpected allocation ratio for %s' % rc)
class TestProviderConfig(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests for adding inventories and traits to resource providers using
provider config files described in spec provider-config-file.
"""
compute_driver = 'fake.FakeDriver'
BASE_CONFIG = {
"meta": {
"schema_version": "1.0"
},
"providers": []
}
EMPTY_PROVIDER = {
"identification": {
},
"inventories": {
"additional": []
},
"traits": {
"additional": []
}
}
def setUp(self):
super().setUp()
# make a new temp dir and configure nova-compute to look for provider
# config files there
self.pconf_loc = self.useFixture(fixtures.TempDir()).path
self.flags(provider_config_location=self.pconf_loc, group='compute')
def _create_config_entry(self, id_value, id_method="uuid", cfg_file=None):
"""Adds an entry in the config file for the provider using the
requested identification method [uuid, name] with additional traits
and inventories.
"""
# if an existing config file was not passed, create a new one
if not cfg_file:
cfg_file = copy.deepcopy(self.BASE_CONFIG)
provider = copy.deepcopy(self.EMPTY_PROVIDER)
# create identification method
provider['identification'] = {id_method: id_value}
# create entries for additional traits and inventories using values
# unique to this provider entry
provider['inventories']['additional'].append({
orc.normalize_name(id_value): {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
})
provider['traits']['additional'].append(
os_traits.normalize_name(id_value))
# edit cfg_file in place, but return it in case this is the first call
cfg_file['providers'].append(provider)
return cfg_file
def _assert_inventory_and_traits(self, provider, config):
"""Asserts that the inventory and traits on the provider include those
defined in the provided config file. If the provider was identified
explicitly, also asserts that the $COMPUTE_NODE values are not included
on the provider.
Testing for specific inventory values is done in depth in unit tests
so here we are just checking for keys.
"""
# retrieve actual inventory and traits for the provider
actual_inventory = list(
self._get_provider_inventory(provider['uuid']).keys())
actual_traits = self._get_provider_traits(provider['uuid'])
# search config file data for expected inventory and traits
# since we also want to check for unexpected inventory,
# we also need to track compute node entries
expected_inventory, expected_traits = [], []
cn_expected_inventory, cn_expected_traits = [], []
for p_config in config['providers']:
_pid = p_config['identification']
# check for explicit uuid/name match
if _pid.get("uuid") == provider['uuid'] \
or _pid.get("name") == provider['name']:
expected_inventory = list(p_config.get(
"inventories", {}).get("additional", [])[0].keys())
expected_traits = p_config.get(
"traits", {}).get("additional", [])
# check for uuid==$COMPUTE_NODE match
elif _pid.get("uuid") == "$COMPUTE_NODE":
cn_expected_inventory = list(p_config.get(
"inventories", {}).get("additional", [])[0].keys())
cn_expected_traits = p_config.get(
"traits", {}).get("additional", [])
# if expected inventory or traits are found,
# test that they all exist in the actual inventory/traits
missing_inventory, missing_traits = None, None
unexpected_inventory, unexpected_traits = None, None
if expected_inventory or expected_traits:
missing_inventory = [key for key in expected_inventory
if key not in actual_inventory]
missing_traits = [key for key in expected_traits
if key not in actual_traits]
# if $COMPUTE_NODE values are also found,
# test that they do not exist
if cn_expected_inventory or cn_expected_traits:
unexpected_inventory = [
key for key in actual_inventory
if key in cn_expected_inventory and key
not in expected_inventory]
missing_traits = [
trait for trait in cn_expected_traits
if trait in actual_traits and trait
not in expected_traits]
# if no explicit values were found, test for $COMPUTE_NODE values
elif cn_expected_inventory or cn_expected_traits:
missing_inventory = [key for key in cn_expected_inventory
if key not in actual_inventory]
missing_traits = [trait for trait in cn_expected_traits
if trait not in actual_traits]
# if no values were found, the test is broken
else:
self.fail("No expected values were found, the test is broken.")
self.assertFalse(missing_inventory,
msg="Missing inventory: %s" % missing_inventory)
self.assertFalse(unexpected_inventory,
msg="Unexpected inventory: %s" % unexpected_inventory)
self.assertFalse(missing_traits,
msg="Missing traits: %s" % missing_traits)
self.assertFalse(unexpected_traits,
msg="Unexpected traits: %s" % unexpected_traits)
def _place_config_file(self, file_name, file_data):
"""Creates a file in the provider config directory using file_name and
dumps file_data to it in yaml format.
NOTE: The file name should end in ".yaml" for Nova to recognize and
load it.
"""
with open(os.path.join(self.pconf_loc, file_name), "w") as open_file:
yaml.dump(file_data, open_file)
def test_single_config_file(self):
"""Tests that additional inventories and traits defined for a provider
are applied to the correct provider.
"""
# create a config file with both explicit name and uuid=$COMPUTE_NODE
config = self._create_config_entry("fake-host", id_method="name")
self._place_config_file("provider_config1.yaml", config)
# start nova-compute
self._start_compute("fake-host")
# test that only inventory from the explicit entry exists
provider = self._get_resource_provider_by_uuid(
self._get_provider_uuid_by_host("fake-host"))
self._assert_inventory_and_traits(provider, config)
def test_multiple_config_files(self):
"""This performs the same test as test_single_config_file but splits
the configurations into separate files.
"""
# create a config file with uuid=$COMPUTE_NODE
config1 = self._create_config_entry("$COMPUTE_NODE", id_method="uuid")
self._place_config_file("provider_config1.yaml", config1)
# create a second config file with explicit name
config2 = self._create_config_entry("fake-host", id_method="name")
self._place_config_file("provider_config2.yaml", config2)
# start nova-compute
self._start_compute("fake-host")
# test that only inventory from the explicit entry exists
provider1 = self._get_resource_provider_by_uuid(
self._get_provider_uuid_by_host("fake-host"))
self._assert_inventory_and_traits(provider1, config2)
def test_multiple_compute_nodes(self):
"""This test mimics an ironic-like environment with multiple compute
nodes. Some nodes will be updated with the uuid=$COMPUTE_NODE provider
config entries and others will use explicit name matching.
"""
# get some uuids to use as compute host names
provider_names = [uuids.cn2, uuids.cn3, uuids.cn4,
uuids.cn5, uuids.cn6, uuids.cn7]
# create config file with $COMPUTE_NODE entry
config = self._create_config_entry("$COMPUTE_NODE", id_method="uuid")
# add three explicit name entries
for provider_name in provider_names[-3:]:
self._create_config_entry(provider_name, id_method="name",
cfg_file=config)
self._place_config_file("provider.yaml", config)
# start the compute services
for provider_name in provider_names:
self._start_compute(provider_name)
# test for expected inventory and traits on each provider
for provider_name in provider_names:
self._assert_inventory_and_traits(
self._get_resource_provider_by_uuid(
self._get_provider_uuid_by_host(provider_name)),
config)
def test_end_to_end(self):
"""This test emulates a full end to end test showing that without this
feature a vm cannot be spawning using a custom trait and then start a
compute service that provides that trait.
"""
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
# Start nova services.
self.api = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1')).admin_api
self.api.microversion = 'latest'
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.start_service('conductor')
# start nova-compute that will not have the additional trait.
self._start_compute("fake-host-1")
node_name = "fake-host-2"
# create a config file with explicit name
provider_config = self._create_config_entry(
node_name, id_method="name")
self._place_config_file("provider_config.yaml", provider_config)
self._create_flavor(
name='CUSTOM_Flavor', id=42, vcpu=4, memory_mb=4096,
disk=1024, swap=0, extra_spec={
f"trait:{os_traits.normalize_name(node_name)}": "required"
})
self._create_server(
flavor_id=42, expected_state='ERROR',
networks=[{'port': self.neutron.port_1['id']}])
# start compute node that will report the custom trait.
self._start_compute("fake-host-2")
self._create_server(
flavor_id=42, expected_state='ACTIVE',
networks=[{'port': self.neutron.port_1['id']}])
| apache-2.0 | 8,476,263,967,089,812,000 | 40.950887 | 79 | 0.597951 | false |
yehudagale/fuzzyJoiner | old/TripletLossFacenetLSTM-angular.py | 1 | 21847 | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# """
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=True
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
# if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=modified_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| epl-1.0 | 8,191,197,515,958,977,000 | 37.395431 | 167 | 0.663249 | false |
samlaudev/LeetCode | Python/Insert Delete GetRandom O(1) - Duplicates allowed/Solution.py | 1 | 2970 | # Problem: Insert Delete GetRandom O(1) - Duplicates allowed
#
# Design a data structure that supports all following operations in average O(1) time.
#
# Note: Duplicate elements are allowed.
# 1. insert(val): Inserts an item val to the collection.
# 2. remove(val): Removes an item val from the collection if present.
# 3. getRandom: Returns a random element from current collection of elements. The probability of each element being returned is linearly related to the number of same value the collection contains.
#
# Example:
#
# // Init an empty collection.
# RandomizedCollection collection = new RandomizedCollection();
#
# // Inserts 1 to the collection. Returns true as the collection did not contain 1.
# collection.insert(1);
#
# // Inserts another 1 to the collection. Returns false as the collection contained 1. Collection now contains [1,1].
# collection.insert(1);
#
# // Inserts 2 to the collection, returns true. Collection now contains [1,1,2].
# collection.insert(2);
#
# // getRandom should return 1 with the probability 2/3, and returns 2 with the probability 1/3.
# collection.getRandom();
#
# // Removes 1 from the collection, returns true. Collection now contains [1,2].
# collection.remove(1);
#
# // getRandom should return 1 and 2 both equally likely.
# collection.getRandom();
#
################################################################################
from random import randint
from collections import defaultdict
class RandomizedCollection(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.__list = []
self.__used = defaultdict(list)
def insert(self, val):
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
:type val: int
:rtype: bool
"""
has = val in self.__used
self.__list += val,
self.__used[val] += len(self.__list) - 1,
return not has
def remove(self, val):
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.__used:
return False
last = self.__list.pop()
self.__used[last].remove(len(self.__list))
if val != last:
index = self.__used[val].pop()
self.__used[last].append(index)
self.__list[index] = last
if not self.__used[val]:
del self.__used[val]
return True
def getRandom(self):
"""
Get a random element from the collection.
:rtype: int
"""
return self.__list[randint(0, len(self.__list) - 1)]
# Your RandomizedCollection object will be instantiated and called as such:
# obj = RandomizedCollection()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| mit | -2,704,729,365,606,001,700 | 30.595745 | 199 | 0.624916 | false |
egabancho/invenio | invenio/legacy/search_engine/__init__.py | 1 | 332135 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301,W0703
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import os
import re
import time
import string
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except ImportError:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from six import iteritems, string_types
## import Invenio stuff:
from invenio.base.globals import cfg
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_SCOAP3_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_BIBSORT_ENABLED, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BASE_URL, \
CFG_BIBFORMAT_HIDDEN_TAGS
try:
from invenio.config import CFG_BIBSORT_DEFAULT_FIELD, \
CFG_BIBSORT_DEFAULT_FIELD_ORDER
except ImportError:
CFG_BIBSORT_DEFAULT_FIELD = 'latest first'
CFG_BIBSORT_DEFAULT_FIELD_ORDER = 'd'
from invenio.modules.search.errors import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError
from invenio.legacy.bibrecord import (get_fieldvalues,
get_fieldvalues_alephseq_like)
from .utils import record_exists
from invenio.legacy.bibrecord import create_record, record_xml_output
from invenio.legacy.bibrank.record_sorter import (
get_bibrank_methods,
is_method_valid,
rank_records as rank_records_bibrank,
rank_by_citations)
from invenio.legacy.bibrank.downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.legacy.bibindex.engine_stemmer import stem
from invenio.modules.indexer.tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.modules.indexer.tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.legacy.bibindex.engine_utils import author_name_requires_phrase_search, \
get_field_tags
from invenio.legacy.bibindex.engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.legacy.bibindex.engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.legacy.bibindex.adminlib import get_idx_indexer
from invenio.modules.formatter import format_record, format_records, get_output_format_content_type, create_excel
from invenio.legacy.bibrank.downloads_grapher import create_download_history_graph_and_box
from invenio.modules.knowledge.api import get_kbr_values
from invenio.legacy.miscutil.data_cacher import DataCacher
from invenio.legacy.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.modules.access.control import acc_get_action_id
from invenio.modules.access.local_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.legacy.websearch.adminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from intbitset import intbitset
from invenio.legacy.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.modules.access.engine import acc_authorize_action
from invenio.ext.logging import register_exception
from invenio.ext.cache import cache
from invenio.utils.text import encode_for_xml, wash_for_utf8, strip_accents
from invenio.utils.html import get_mathjax_header
from invenio.utils.html import nmtoken_from_string
from invenio.legacy import bibrecord
import invenio.legacy.template
webstyle_templates = invenio.legacy.template.load('webstyle')
webcomment_templates = invenio.legacy.template.load('webcomment')
websearch_templates = invenio.legacy.template.load('websearch')
from invenio.legacy.bibrank.citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, \
get_refersto_hitset, get_citedby_hitset, get_cited_by_list, \
get_refers_to_list, get_citers_log
from invenio.legacy.bibrank.citation_grapher import create_citation_history_graph_and_box
from invenio.legacy.bibrank.selfcites_searcher import get_self_cited_by_list, \
get_self_cited_by, \
get_self_refers_to_list
from invenio.legacy.dbquery import run_sql, run_sql_with_limit, \
wash_table_column_name, get_table_update_time
from invenio.legacy.webuser import getUid, collect_user_info, session_param_set
from invenio.legacy.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.base.i18n import gettext_set_language
from invenio.legacy.search_engine.query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio.utils import apache
from invenio.legacy.miscutil.solrutils_bibindex_searcher import solr_get_bitset
from invenio.legacy.miscutil.xapianutils_bibindex_searcher import xapian_get_bitset
from invenio.modules.search import services
from invenio.legacy.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.legacy.websearch_external_collections.config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.legacy.bibauthorid.config import LIMIT_TO_COLLECTIONS as BIBAUTHORID_LIMIT_TO_COLLECTIONS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile(r'[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile(r'\sand\s', re.I)
re_logical_or = re.compile(r'\sor\s', re.I)
re_logical_not = re.compile(r'\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile(r"\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile(r"\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + r'\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt",
"search_services": "SER"};
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,), run_on_slave=True)
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except NameError:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
auths = acc_authorize_action(
user_info,
'viewrestrcoll',
batch_args=True,
collection=restricted_collection_cache.cache
)
for collection, auth in zip(restricted_collection_cache.cache, auths):
if auth[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
if CFG_CERN_SITE:
#the egroup might be in the form [email protected]
if email_or_group.replace('@cern.ch', ' [CERN]') in user_info['group']:
return True
return False
###FIXME: This method needs to be refactorized
def is_user_viewer_of_record(user_info, recid):
"""
Check if the user is allow to view the record based in the marc tags
inside CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
i.e. his email is inside the 506__m tag or he is inside an e-group listed
in the 506__m tag
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'allow to view' the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
if is_user_viewer_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if not restricted_collections and record_public_p(recid):
## The record is public and not part of any restricted collection
return (0, '')
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in restricted_collections:
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class FieldTokenizerDataCacher(DataCacher):
"""
Provides cache for tokenizer information for fields corresponding to indexes.
This class is not to be used directly; use function
get_field_tokenizer_type() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT fld.code, ind.tokenizer FROM idxINDEX AS ind, field AS fld, idxINDEX_field AS indfld WHERE ind.id = indfld.id_idxINDEX AND indfld.id_field = fld.id""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
field_tokenizer_cache.is_ok_p
except Exception:
field_tokenizer_cache = FieldTokenizerDataCacher()
def get_field_tokenizer_type(field_name, recreate_cache_if_needed=True):
"""Return tokenizer type for given field corresponding to an index if applicable."""
if recreate_cache_if_needed:
field_tokenizer_cache.recreate_cache_if_needed()
tokenizer = None
try:
tokenizer = field_tokenizer_cache.cache[field_name]
except KeyError:
return None
return tokenizer
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
res = run_sql("SELECT name FROM collection")
for name in res:
ret[name[0]] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if coll not in collection_reclist_cache.cache:
return intbitset() # collection does not exist; return empty set
if not collection_reclist_cache.cache[coll]:
# collection's reclist not in the cache yet, so calculate it
# and fill the cache:
reclist = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
if res and res[0][1]:
reclist = intbitset(res[0][1])
collection_reclist_cache.cache[coll] = reclist
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({'value': code,
'text': name
})
else:
formats.append({'value': 'hb',
'text': "HTML brief"
})
return formats
# Flask cache for search results.
from invenio.modules.search.cache import search_results_cache, get_search_results_cache_key
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if c not in ret:
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if f not in ret:
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT name FROM collection ORDER BY name ASC")
for c_name in res:
c_name = c_name[0]
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score ASC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"""
Returns list of whitespace-separated words from pattern, removing any
trailing punctuation-like signs from words in pattern.
"""
words = {}
# clean trailing punctuation signs inside pattern
pattern = re_punctuation_followed_by_space.sub(' ', pattern)
for word in pattern.split():
if word not in words:
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
write_warning("Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning", req=req)
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and p.find(',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: x.group(1).replace(' ', '__SPACE__'), p)
# wash argument:
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in p.split(): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if pi.find(":") > 0:
fi, pi = pi.split(":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = pi.replace('"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = pi.replace("'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
write_warning("Ignoring standalone wildcard word.", "Warning", req=req)
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
write_warning("Ignoring empty <em>%s</em> search term." % fi, "Warning", req=req)
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o, p, wash_field(f), t] for o, p, f, t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p='', em=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = get_output_format_content_type(of, 'text/xml')
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "intbitset":
req.content_type = "application/octet-stream"
req.send_http_header()
elif of == "recjson":
req.content_type = "application/json"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
# Add metadata in meta tags for Google scholar-esque harvesting...
# only if we have a detailed meta format and we are looking at a
# single record
if recID != -1 and CFG_WEBSEARCH_DETAILED_META_FORMAT and \
record_exists(recID) == 1:
metaheaderadd += format_record(recID,
CFG_WEBSEARCH_DETAILED_META_FORMAT,
ln=ln)
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_BASE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.', '_').replace('-', '_').replace(':', '_')
body_css_classes.append(css)
## finally, print page header:
if em == '' or EM_REPOSITORY["header"] in em:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
else:
req.content_type = content_type
req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG, em=""):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if of == "intbitset":
return intbitset()
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
if em == "" or EM_REPOSITORY["footer"] in em:
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_add_to_search_pattern(p, p1, f1, m1, op1):
"""Create the search pattern """
if not p1:
return p
init_search_pattern = p
# operation: AND, OR, AND NOT
if op1 == 'a' and p: # we don't want '+' at the begining of the query
op = ' +'
elif op1 == 'o':
op = ' |'
elif op1 == 'n':
op = ' -'
else:
op = ''
# field
field = ''
if f1:
field = f1 + ':'
# type of search
pattern = p1
start = '('
end = ')'
if m1 == 'e':
start = end = '"'
elif m1 == 'p':
start = end = "'"
elif m1 == 'r':
start = end = '/'
else: # m1 == 'o' or m1 =='a'
words = p1.strip().split(' ')
if len(words) == 1:
start = end = ''
pattern = field + words[0]
elif m1 == 'o':
pattern = ' |'.join([field + word for word in words])
else:
pattern = ' '.join([field + word for word in words])
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + start + pattern + end
if not pattern:
return ''
#avoid having field:(word1 word2) since this is not currently correctly working
return init_search_pattern + op + field + start + pattern + end
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")),
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")),
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action="", em=""):
"""Create search box for 'search again in the results page' functionality."""
if em != "" and EM_REPOSITORY["search_box"] not in em:
if EM_REPOSITORY["body"] in em and cc != CFG_SITE_NAME:
return '''
<h1 class="headline">%(ccname)s</h1>''' % {'ccname' : cgi.escape(cc), }
else:
return ""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({'value': cx,
'text': cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({'value': CFG_SITE_NAME,
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({'value': '',
'text': '*** %s ***' % (CFG_SCOAP3_SITE and _("remove this publisher or journal") or _("remove this collection"))
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({'value': val['value'],
'text': val['text'],
'selected' : (c == re.sub(r"^[\s\-]*", "", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{'value': '',
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("add another publisher or journal") or _("add another collection"))
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{'value': CFG_SITE_NAME,
'text' : '*** %s ***' % (CFG_SCOAP3_SITE and _("any publisher or journal") or _("any public collection"))
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower(), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value': code,
'text': name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title and (em=="" or EM_REPOSITORY["body"] in em)
)
def create_exact_author_browse_help_link(p=None, p1=None, p2=None, p3=None, f=None, f1=None, f2=None, f3=None,
rm=None, cc=None, ln=None, jrec=None, rg=None, aas=0, action=""):
"""Creates a link to help switch from author to exact author while browsing"""
if action == 'browse':
search_fields = (f, f1, f2, f3)
if 'author' in search_fields or 'firstauthor' in search_fields:
def add_exact(field):
if field == 'author' or field == 'firstauthor':
return 'exact' + field
return field
fe, f1e, f2e, f3e = [add_exact(field) for field in search_fields]
link_name = f or f1
link_name = (link_name == 'firstauthor' and 'exact first author') or 'exact author'
return websearch_templates.tmpl_exact_author_browse_help_link(p=p, p1=p1, p2=p2, p3=p3, f=fe, f1=f1e, f2=f2e, f3=f3e,
rm=rm, cc=cc, ln=ln, jrec=jrec, rg=rg, aas=aas, action=action,
link_name=link_name)
return ""
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value': '',
'text': _(CFG_BIBSORT_DEFAULT_FIELD)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({'value': field_code,
'text': get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if ci in collection_reclist_cache.cache:
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if cc not in collection_reclist_cache.cache:
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if coll in collection_reclist_cache.cache:
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def get_synonym_terms(term, kbr_name, match_type, use_memoise=False):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@param use_memoise: can we memoise while doing lookups?
@type use_memoise: bool
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_comma']:
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == CFG_BIBINDEX_SYNONYM_MATCH_TYPE['leading_to_number']:
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e',
use_memoise=use_memoise):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(ouput_format):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(ouput_format[0:3]).isdigit() and len(ouput_format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
else:
return ouput_format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+x.group(1).replace(' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+x.group(1).replace(' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+x.group(1).replace(' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = p.strip()
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
if not res or not res[0][0]:
return False
try:
return res[0][0].startswith("hostedcollection:")
except IndexError:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
res = run_sql("SELECT name FROM collection WHERE name=%s", (c,))
if res:
return res[0][0]
else:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, coll_type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'coll_type' for collection 'coll'.
If coll_type = '*', both regular and virtual collections will be returned.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
if coll_type == '*':
coll_type_query = " IN ('r', 'v')"
query_params = (coll, )
else:
coll_type_query = "=%s"
query_params = (coll_type, coll)
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type%s AND ccc.name=%%s" % coll_type_query
query += " ORDER BY cc.score ASC"
res = run_sql(query, query_params)
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
class CollectionAllChildrenDataCacher(DataCacher):
"""Cache for all children of a collection (regular & virtual, public & private)"""
def __init__(self):
def cache_filler():
def get_all_children(coll, coll_type='r', public_only=1, d_internal_coll_sons=None):
"""Return a list of all children of type 'coll_type' for collection 'coll'.
If public_only, then return only non-restricted child collections.
If coll_type='*', then return both regular and virtual collections.
d_internal_coll_sons is an internal dictionary used in recursion for
minimizing the number of database calls and should not be used outside
this scope.
"""
if not d_internal_coll_sons:
d_internal_coll_sons = {}
children = []
if coll not in d_internal_coll_sons:
d_internal_coll_sons[coll] = get_coll_sons(coll, coll_type, public_only)
for child in d_internal_coll_sons[coll]:
children.append(child)
children.extend(get_all_children(child, coll_type, public_only, d_internal_coll_sons)[0])
return children, d_internal_coll_sons
ret = {}
d_internal_coll_sons = None
collections = collection_reclist_cache.cache.keys()
for collection in collections:
ret[collection], d_internal_coll_sons = get_all_children(collection, '*', public_only=0, d_internal_coll_sons=d_internal_coll_sons)
return ret
def timestamp_verifier():
return max(get_table_update_time('collection'), get_table_update_time('collection_collection'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_allchildren_cache.is_ok_p:
raise Exception
except Exception:
collection_allchildren_cache = CollectionAllChildrenDataCacher()
def get_collection_allchildren(coll, recreate_cache_if_needed=True):
"""Returns the list of all children of a collection."""
if recreate_cache_if_needed:
collection_allchildren_cache.recreate_cache_if_needed()
if coll not in collection_allchildren_cache.cache:
return [] # collection does not exist; return empty list
return collection_allchildren_cache.cache[coll]
def get_coll_real_descendants(coll, coll_type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score ASC""",
(coll, coll_type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern_phrases(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Returns either biliographic phrases or words indexes."""
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and p.find(":") > 0: # does 'p' contain ':'?
f, p = p.split(":", 1)
## do we search in words indexes?
# FIXME uncomment this
#if not f:
# return browse_in_bibwords(req, p, f)
coll_hitset = intbitset()
for coll_name in colls:
coll_hitset |= get_collection_reclist(coll_name)
index_id = get_index_id_from_field(f)
if index_id != 0:
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll_hitset)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
register_exception(req=req, alert_admin=True)
# probably there are no hits at all:
#req.write(_("No values found."))
return []
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#write_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
nbhits = get_nbhits_in_bibxxx(phrase, f, coll_hitset)
if nbhits > 0:
browsed_phrases_in_colls.append([phrase, nbhits])
return browsed_phrases_in_colls
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Displays either biliographic phrases or words indexes."""
# load the right message language
_ = gettext_set_language(ln)
browsed_phrases_in_colls = browse_pattern_phrases(req, colls, p, f, rg, ln)
if len(browsed_phrases_in_colls) == 0:
req.write(_("No values found."))
return
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
'ap' is also internally used for allowing hidden tag search
(for requests coming from webcoll, for example). In this
case ap=-9
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)), req=req)
write_warning("Search stage 1: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = cfg['CFG_BIBFORMAT_HIDDEN_TAGS']
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if not req and ap == -9: # special request, coming from webcoll
can_see_hidden = True
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for dummy_o, p, f, m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
write_warning(_("Full-text search is currently available for all arXiv papers, many theses, a few report series and some journal articles"), req=req)
elif 'caption' in fields_to_be_searched:
write_warning(_("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") %
{'x_range_from_year': '2008',
'x_range_to_year': '2012'}, req=req)
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
write_warning(_("There is no index %(x_name)s. Searching for %(x_text)s in all fields.", x_name=bsu_f, x_text=bsu_p), req=req)
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
write_warning(_('Instead searching %(x_name)s.', x_name=str([bsu_o, bsu_p, bsu_f, bsu_m])), req=req)
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError as excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
write_warning(_("Search term too generic, displaying only partial results..."), req=req)
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
write_warning(_("No phrase index available for fulltext yet, looking for word combination..."), req=req)
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
write_warning("Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(cgi.escape(repr(bsu_p)), repr(myhiddens)), req=req)
display_nearest_terms_box = False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
write_warning("Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset), req=req)
if len(basic_search_unit_hitset) > 0 or \
ap<1 or \
bsu_o in ("|", "-") or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
write_warning("Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)), req=req)
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
write_warning(_("No exact match found for %(x_query1)s, using %(x_query2)s instead...") %
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"}, req=req)
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
write_warning(_("Requested record does not seem to exist."), req=req)
else:
write_warning(create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln), req=req)
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
write_warning("Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])), req=req)
write_warning("Search stage 2: execution took %.2f seconds." % (t2 - t1), req=req)
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(this_unit_operation), "Error", req=req)
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
write_warning(text, req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection), req=req)
write_warning("Search stage 3: execution took %.2f seconds." % (t2 - t1), req=req)
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1) but still call it for searches like ('U(1)' | 'U(2)'):
if not re_pattern_parens.search(re_pattern_parens_quotes.sub('_', p)):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
write_warning("Search stage 1: search_pattern_parenthesised() searched %s." % repr(p), req=req)
write_warning("Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result), req=req)
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box = False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
write_warning(_("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."), req=req)
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0, ignore_synonyms=None):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
Parameter 'ignore_synonyms' is a list of terms for which we
should not try to further find a synonym.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
tokenizer = get_field_tokenizer_type(f)
hitset_cjk = intbitset()
if tokenizer == "BibIndexCJKTokenizer":
if is_there_any_CJK_character_in_text(p):
cjk_tok = BibIndexCJKTokenizer()
chars = cjk_tok.tokenize_for_words(p)
for char in chars:
hitset_cjk |= search_unit_in_bibwords(char, f, wl)
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f or 'anyfield'):
if ignore_synonyms is None:
ignore_synonyms = []
ignore_synonyms.append(p)
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f or 'anyfield'][1]):
if p_synonym != p and \
not p_synonym in ignore_synonyms:
hitset_synonyms |= search_unit(p_synonym, f, m, wl,
ignore_synonyms)
## look up hits:
if f == 'fulltext' and get_idx_indexer('fulltext') == 'SOLR' and CFG_SOLR_URL:
# redirect to Solr
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
elif f == 'fulltext' and get_idx_indexer('fulltext') == 'XAPIAN' and CFG_XAPIAN_ENABLED:
# redirect to Xapian
try:
return search_unit_in_xapian(p, f, m)
except:
# There were troubles with getting full-text search
# results from Xapian. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception()
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'referstoexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_refersto_excluding_selfcites(p)
elif f == 'cataloguer':
# we are doing search by the cataloguer nickname
hitset = search_unit_in_record_history(p)
elif f == 'rawref':
from invenio.legacy.refextract.api import search_from_reference
field, pattern = search_from_reference(p)
return search_unit(pattern, field)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif f == 'collection':
# we are doing search by the collection name or MARC field
hitset = search_unit_collection(p, m, wl=wl)
elif f == 'tag':
module_found = False
try:
from invenio.modules.tags.search_units import search_unit_in_tags
module_found = True
except:
# WebTag module is disabled, so ignore 'tag' selector
pass
if module_found:
return search_unit_in_tags(p)
elif f == 'citedbyexcludingselfcites':
# we are doing search by the citation count
hitset = search_unit_citedby_excluding_selfcites(p)
elif m == 'a' or m == 'r' or f == 'subject':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
if m == 'a' and index_id in get_idxpair_field_ids():
#for exact match on the admin configured fields we are searching in the pair tables
hitset = search_unit_in_idxpairs(p, f, m, wl)
else:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
# if not hitset and m == 'a' and (p[0] != '%' and p[-1] != '%'):
# #if we have no results by doing exact matching, do partial matching
# #for removing the distinction between simple and double quotes
# hitset = search_unit_in_bibxxx('%' + p + '%', f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
elif p.startswith("citedexcludingselfcites:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:], exclude_selfcites=True)
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
hitset |= hitset_cjk
return hitset
def get_idxpair_field_ids():
"""Returns the list of ids for the fields that idxPAIRS should be used on"""
index_dict = dict(run_sql("SELECT name, id FROM idxINDEX"))
return [index_dict[field] for field in index_dict if field in cfg['CFG_WEBSEARCH_IDXPAIRS_FIELDS']]
def search_unit_in_bibwords(word, f, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f.endswith('count') and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = word.replace('*', '%') # we now use '*' as the truncation character
words = word.split("->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
# We remove trailing truncation character before stemming
if word0.endswith('%'):
word0 = stem(word0[:-1], stemming_language) + '%'
else:
word0 = stem(word0, stemming_language)
if word1.endswith('%'):
word1 = stem(word1[:-1], stemming_language) + '%'
else:
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f.endswith('count'):
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
# We remove trailing truncation character before stemming
if word.endswith('%'):
word = stem(word[:-1], stemming_language) + '%'
else:
word = stem(word, stemming_language)
if word.find('%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibwrd)
else:
hitset = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_idxpairs(p, f, search_type, wl=0):
"""Searches for pair 'p' inside idxPAIR table for field 'f' and
returns hitset of recIDs found."""
limit_reached = 0 # flag for knowing if the query limit has been reached
do_exact_search = True # flag to know when it makes sense to try to do exact matching
result_set = intbitset()
#determine the idxPAIR table to read from
index_id = get_index_id_from_field(f)
if not index_id:
return intbitset()
stemming_language = get_index_stemming_language(index_id)
pairs_tokenizer = BibIndexDefaultTokenizer(stemming_language)
idxpair_table_washed = wash_table_column_name("idxPAIR%02dF" % index_id)
if p.startswith("%") and p.endswith("%"):
p = p[1:-1]
original_pattern = p
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
queries_releated_vars = [] # contains tuples of (query_addons, query_params, use_query_limit)
#is it a span query?
ps = p.split("->", 1)
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
#so we are dealing with a span query
pairs_left = pairs_tokenizer.tokenize_for_pairs(ps[0])
pairs_right = pairs_tokenizer.tokenize_for_pairs(ps[1])
if not pairs_left or not pairs_right:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
elif len(pairs_left) != len(pairs_right):
# it is kind of hard to know what the user actually wanted
# we have to do: foo bar baz -> qux xyz, so let's swith to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
elif len(pairs_left) > 1 and \
len(pairs_right) > 1 and \
pairs_left[:-1] != pairs_right[:-1]:
# again we have something like: foo bar baz -> abc xyz qux
# so we'd better switch to phrase
return search_unit_in_idxphrases(original_pattern, f, search_type, wl)
else:
# finally, we can treat the search using idxPairs
# at this step we have either: foo bar -> abc xyz
# or foo bar abc -> foo bar xyz
queries_releated_vars = [("BETWEEN %s AND %s", (pairs_left[-1], pairs_right[-1]), True)]
for pair in pairs_left[:-1]:# which should be equal with pairs_right[:-1]
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False # no exact search for span queries
elif p.find('%') > -1:
#tokenizing p will remove the '%', so we have to make sure it stays
replacement = 'xxxxxxxxxx' #hopefuly this will not clash with anything in the future
p = string.replace(p, '%', replacement)
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
if string.find(pair, replacement) > -1:
pair = string.replace(pair, replacement, '%') #we replace back the % sign
queries_releated_vars.append(("LIKE %s", (pair, ), True))
else:
queries_releated_vars.append(("= %s", (pair, ), False))
do_exact_search = False
else:
#normal query
pairs = pairs_tokenizer.tokenize_for_pairs(p)
if not pairs:
# we are not actually dealing with pairs but with words
return search_unit_in_bibwords(original_pattern, f, wl=wl)
queries_releated_vars = []
for pair in pairs:
queries_releated_vars.append(("= %s", (pair, ), False))
first_results = 1 # flag to know if it's the first set of results or not
for query_var in queries_releated_vars:
query_addons = query_var[0]
query_params = query_var[1]
use_query_limit = query_var[2]
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params, wildcard_limit=wl) #kwalitee:disable=sql
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term, hitlist FROM %s WHERE term %s"
% (idxpair_table_washed, query_addons), query_params) #kwalitee:disable=sql
if not res:
return intbitset()
for pair, hitlist in res:
hitset_idxpairs = intbitset(hitlist)
if first_results:
result_set = hitset_idxpairs
first_results = 0
else:
result_set.intersection_update(hitset_idxpairs)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(result_set)
# check if we need to eliminate the false positives
if cfg['CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH'] and do_exact_search:
# we need to eliminate the false positives
idxphrase_table_washed = wash_table_column_name("idxPHRASE%02dR" % index_id)
not_exact_search = intbitset()
for recid in result_set:
res = run_sql("SELECT termlist FROM %s WHERE id_bibrec %s" %(idxphrase_table_washed, '=%s'), (recid, )) #kwalitee:disable=sql
if res:
termlist = deserialize_via_marshal(res[0][0])
if not [term for term in termlist if term.lower().find(p.lower()) > -1]:
not_exact_search.add(recid)
else:
not_exact_search.add(recid)
# remove the recs that are false positives from the final result
result_set.difference_update(not_exact_search)
return result_set
def search_unit_in_idxphrases(p, f, search_type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
hitset = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if search_type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = p.replace('*', '%') # we now use '*' as the truncation character
ps = p.split("->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if p.find('%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor', 'authorityauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for dummy_word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
hitset.union_update(hitset_bibphrase)
else:
hitset = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
# okay, return result set:
return hitset
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f.endswith('count'):
return search_unit_in_bibwords(p, f, wl=wl)
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError as excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
hitset = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(hitset)
return hitset
def search_unit_in_solr(p, f=None, m=None):
"""
Query a Solr index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(f, p)
def search_unit_in_xapian(p, f=None, m=None):
"""
Query a Xapian index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return xapian_get_bitset(f, p)
def search_unit_in_bibrec(datetext1, datetext2, search_type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
hitset = intbitset()
if search_type and search_type.startswith("m"):
search_type = "modification_date"
else:
search_type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (search_type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (search_type, search_type),
(datetext1, datetext2))
for row in res:
hitset += row[0]
return hitset
def search_unit_by_times_cited(p, exclude_selfcites=False):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs,
exclude_selfcites=exclude_selfcites)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
return get_refersto_hitset(ahitset)
else:
return intbitset([])
def search_unit_refersto_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citers = intbitset()
citations = get_cited_by_list(ahitset)
selfcitations = get_self_cited_by_list(ahitset)
for cites, selfcites in zip(citations, selfcitations):
# cites is in the form [(citee, citers), ...]
citers += cites[1] - selfcites[1]
return citers
else:
return intbitset([])
def search_unit_in_record_history(query):
"""
Return hitset of recIDs that were modified by the given cataloguer
"""
if query:
try:
cataloguer_name, modification_date = query.split(":")
except ValueError:
cataloguer_name = query
modification_date = ""
if modification_date:
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
modification_date = spires_syntax_converter.convert_date(modification_date)
parts = modification_date.split('->', 1)
if len(parts) > 1:
start_date, end_date = parts
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date>=%s AND job_date<=%s",
(cataloguer_name, start_date, end_date))
else:
res = run_sql("SELECT id_bibrec FROM hstRECORD WHERE job_person=%s AND job_date LIKE %s",
(cataloguer_name, modification_date + '%',))
return intbitset(res)
else:
sql = "SELECT id_bibrec FROM hstRECORD WHERE job_person=%s"
res = intbitset(run_sql(sql, (cataloguer_name,)))
return res
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_citedby_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_collection(query, m, wl=None):
"""
Search for records satisfying the query (e.g. collection:"BOOK" or
collection:"Books") and return list of records in the collection.
"""
if len(query):
ahitset = get_collection_reclist(query)
if not ahitset:
return search_unit_in_bibwords(query, 'collection', m, wl=wl)
return ahitset
else:
return intbitset([])
def search_unit_citedby_excluding_selfcites(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
citees = intbitset()
references = get_refers_to_list(ahitset)
selfreferences = get_self_refers_to_list(ahitset)
for refs, selfrefs in zip(references, selfreferences):
# refs is in the form [(citer, citees), ...]
citees += refs[1] - selfrefs[1]
return citees
else:
return intbitset([])
def get_records_that_can_be_displayed(user_info,
hitset_in_any_collection,
current_coll=CFG_SITE_NAME,
colls=None,
permitted_restricted_collections=None):
"""
Return records that can be displayed.
"""
records_that_can_be_displayed = intbitset()
if colls is None:
colls = [current_coll]
# let's get the restricted collections the user has rights to view
if permitted_restricted_collections is None:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
if policy == 'ANY':# the user needs to have access to at least one collection that restricts the records
#we need this to be able to remove records that are both in a public and restricted collection
permitted_recids = intbitset()
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection in permitted_restricted_collections:
permitted_recids |= get_collection_reclist(collection)
else:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - (notpermitted_recids - permitted_recids)
else:# the user needs to have access to all collections that restrict a records
notpermitted_recids = intbitset()
for collection in restricted_collection_cache.cache:
if collection not in permitted_restricted_collections:
notpermitted_recids |= get_collection_reclist(collection)
records_that_can_be_displayed = hitset_in_any_collection - notpermitted_recids
if records_that_can_be_displayed.is_infinite():
# We should not return infinite results for user.
records_that_can_be_displayed = intbitset()
for coll in colls_to_be_displayed:
records_that_can_be_displayed |= get_collection_reclist(coll)
return records_that_can_be_displayed
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {} # all final results
results_nbhits = 0
# calculate the list of recids (restricted or not) that the user has rights to access and we should display (only those)
if not req or isinstance(req, cStringIO.OutputType): # called from CLI
user_info = {}
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
records_that_can_be_displayed = hitset_in_any_collection
permitted_restricted_collections = []
else:
user_info = collect_user_info(req)
# let's get the restricted collections the user has rights to view
if user_info['guest'] == '1':
## For guest users that are actually authorized to some restricted
## collection (by virtue of the IP address in a FireRole rule)
## we explicitly build the list of permitted_restricted_collections
permitted_restricted_collections = get_permitted_restricted_collections(user_info)
else:
permitted_restricted_collections = user_info.get('precached_permitted_restricted_collections', [])
# let's build the list of the both public and restricted
# child collections of the collection from which the user
# started his/her search. This list of children colls will be
# used in the warning proposing a search in that collections
try:
current_coll = req.argd['cc'] # current_coll: coll from which user started his/her search
except:
from flask import request
current_coll = request.args.get('cc', CFG_SITE_NAME) # current_coll: coll from which user started his/her search
current_coll_children = get_collection_allchildren(current_coll) # real & virtual
# add all restricted collections, that the user has access to, and are under the current collection
# do not use set here, in order to maintain a specific order:
# children of 'cc' (real, virtual, restricted), rest of 'c' that are not cc's children
colls_to_be_displayed = [coll for coll in current_coll_children if coll in colls or coll in permitted_restricted_collections]
colls_to_be_displayed.extend([coll for coll in colls if coll not in colls_to_be_displayed])
records_that_can_be_displayed = get_records_that_can_be_displayed(
user_info,
hitset_in_any_collection,
current_coll,
colls,
permitted_restricted_collections)
for coll in colls_to_be_displayed:
results[coll] = results.get(coll, intbitset()) | (records_that_can_be_displayed & get_collection_reclist(coll))
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home and restricted and/or hidden collections:
results = {}
results_in_Home = records_that_can_be_displayed & get_collection_reclist(CFG_SITE_NAME)
results_in_restricted_collections = intbitset()
results_in_hidden_collections = intbitset()
for coll in permitted_restricted_collections:
if not get_coll_ancestors(coll): # hidden collection
results_in_hidden_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
else:
results_in_restricted_collections.union_update(records_that_can_be_displayed & get_collection_reclist(coll))
# in this way, we do not count twice, records that are both in Home collection and in a restricted collection
total_results = len(results_in_Home.union(results_in_restricted_collections))
if total_results > 0:
# some hits found in Home and/or restricted collections, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
len_colls_to_display = len(colls_to_be_displayed)
# trim the list of collections to first two, since it might get very large
write_warning(_("No match found in collection %(x_collection)s. Other collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %
{'x_collection': '<em>' +
string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed[:2]], ', ') +
(len_colls_to_display > 2 and ' et al' or '') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': total_results,
'x_url_close': '</a>'}, req=req)
# display the hole list of collections in a comment
if len_colls_to_display > 2:
write_warning("<!--No match found in collection <em>%(x_collection)s</em>.-->" %
{'x_collection': string.join([get_coll_i18nname(coll, ln, False) for coll in colls_to_be_displayed], ', ')},
req=req)
else:
# no hits found, either user is looking for a document and he/she has not rights
# or user is looking for a hidden document:
if of.startswith("h") and display_nearest_terms_box:
if len(results_in_hidden_collections) > 0:
write_warning(_("No public collection matched your query. "
"If you were looking for a hidden document, please type "
"the correct URL for this record."), req=req)
else:
write_warning(_("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."), req=req)
if verbose and of.startswith("h"):
t2 = os.times()[4]
write_warning("Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits, req=req)
write_warning("Search stage 4: execution took %.2f seconds." % (t2 - t1), req=req)
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
final_results = {}
for coll in results.keys():
final_results[coll] = results[coll].intersection(hitset)
nb_total += len(final_results[coll])
if nb_total == 0:
if of.startswith("h"):
write_warning(aptext, req=req)
final_results = results_ap
return final_results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:] == '%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto' or f == 'referstoexcludingselfcites':
return _("There are no records referring to %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'cataloguer':
return _("There are no records modified by %(x_rec)s.", x_rec=cgi.escape(p))
if f == 'citedby' or f == 'citedbyexcludingselfcites':
return _("There are no records cited by %(x_rec)s.", x_rec=cgi.escape(p))
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %(x_name)s.",
x_name=('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>'))
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for px, dummy_fx in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %(x_name)s did not match any record. Nearest terms in any collection are:",
x_name=("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>"))
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = [x[0] for x in res_above]
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = [x[0] for x in res_below]
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:] == '%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except ValueError:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f, in_hitset=None):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
if in_hitset is None:
nbhits = len(recIDs)
else:
nbhits = len(intbitset(recIDs.keys()).intersection(in_hitset))
return nbhits
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
recID = int(recID)
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': ['Current Price Enquiries', 'Archived Price Enquiries'],
'IT': ['Current Invitation for Tenders', 'Archived Invitation for Tenders'],
'MS': ['Current Market Surveys', 'Archived Market Surveys']}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
for coll_name in FP_collections[coll]:
if recID in get_collection_reclist(coll_name):
return coll_name
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]), "\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
return bibrecord.record_empty(get_record(recID))
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0, em=""):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
if em != '' and EM_REPOSITORY["search_info"] not in em:
return ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False, em=""):
"""Prints results overview box with links to particular collections below."""
if em != "" and EM_REPOSITORY["overview"] not in em:
return ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS, em = ""):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit,
display_body = em == "" or EM_REPOSITORY["body"] in em,
display_add_to_basket = em == "" or EM_REPOSITORY["basket"] in em)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except IndexError:
data_dict_ordered = {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except IndexError:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
res = run_sql("""SELECT m.name, m.definition
FROM bsrMETHOD m, bsrMETHODDATA md
WHERE m.id = md.id_bsrMETHOD""")
return dict(res)
SORTING_METHODS = get_sorting_methods()
CACHE_SORTED_DATA = {}
for sorting_method in SORTING_METHODS:
try:
CACHE_SORTED_DATA[sorting_method].is_ok_p
except KeyError:
CACHE_SORTED_DATA[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_from_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and (len(sort_field) > 1 and str(sort_field[0:2]).isdigit()):
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, field='', sorting_methods=SORTING_METHODS):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
# Special case: sorting by citations is fast because we store the
# ranking dictionary in memory, so we do not use bibsort buckets.
if CFG_BIBSORT_ENABLED and sorting_methods and rank_method_code != 'citation':
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:', '').strip().lower() == rank_method_code.lower():
solution_recs, solution_scores = \
sort_records_bibsort(req, hitset_global, sort_method,
'', sort_order, verbose, of, ln,
rg, jrec, 'r')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' % [[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return solution_recs, solution_scores, '(', ')', comment
if rank_method_code.lower() == 'citation':
related_to = []
else:
related_to = pattern
solution_recs, solution_scores, prefix, suffix, comment = \
rank_records_bibrank(rank_method_code=rank_method_code,
rank_limit_relevance=rank_limit_relevance,
hitset=hitset_global,
verbose=verbose,
field=field,
related_to=related_to,
rg=rg,
jrec=jrec)
# Solution recs can be None, in case of error or other cases
# which should be all be changed to return an empty list.
if solution_recs and sort_order == 'd':
solution_recs.reverse()
solution_scores.reverse()
return solution_recs, solution_scores, prefix, suffix, comment
def sort_records_latest(recIDs, jrec, rg, sort_order):
if sort_order == 'd':
recIDs.reverse()
return slice_records(recIDs, jrec, rg)
def sort_or_rank_records(req, recIDs, rm, sf, so, sp, p, verbose=0, of='hb',
ln=CFG_SITE_LANG, rg=None, jrec=None, field='',
sorting_methods=SORTING_METHODS):
"""Sort or rank records.
Entry point for deciding to either sort or rank records."""
if rm:
ranking_result = rank_records(req, rm, 0, recIDs, p, verbose, so,
of, ln, rg, jrec, field,
sorting_methods=sorting_methods)
if ranking_result[0]:
return ranking_result[0] # ranked recids
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS):
return sort_records(req, recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
return recIDs.tolist()
def sort_records(req, recIDs, sort_field='', sort_order='a', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sorting_methods=SORTING_METHODS):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
# bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order,
sort_pattern, verbose, of, ln, rg, jrec)
# ignore the use of buckets, use old fashion sorting
use_sorting_buckets = CFG_BIBSORT_ENABLED and sorting_methods
# Default sorting
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, CFG_BIBSORT_DEFAULT_FIELD_ORDER, verbose, of, ln, rg, jrec)
else:
return sort_records_latest(recIDs, jrec, rg, sort_order)
sort_fields = sort_field.split(",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and
definition.replace('FIELD:', '').strip().lower() == sort_fields[0].lower()) or
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, CFG_BIBSORT_DEFAULT_FIELD, sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
write_warning(_("Sorry, %(x_option)s does not seem to be a valid sort option. The records will not be sorted.", x_option=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
elif tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:', '').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
else:
return slice_records(recIDs, jrec, rg)
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=1, sort_or_rank='s', sorting_methods=SORTING_METHODS):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
if not jrec:
jrec = 1
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting (using BibSort cache) by method %s (definition %s)."
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))), req=req)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset()
input_recids = intbitset(recIDs)
CACHE_SORTED_DATA[sort_method].recreate_cache_if_needed()
sort_cache = CACHE_SORTED_DATA[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
write_warning("Not all buckets have been constructed.. switching to old fashion sorting.", req=req)
if sort_or_rank == 'r':
return rank_records_bibrank(rank_method_code=sort_method,
rank_limit_relevance=0,
hitset=recIDs,
verbose=verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field,
sort_order, '', verbose, of, ln, rg,
jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(
input_recids & sort_cache['bucket_data'][bucket_no]
)
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = intbitset()
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
# recid is in buckets, but not in the bsrMETHODDATA,
# maybe because the value has been deleted, but the change has not
# yet been propagated to the buckets
missing_records.add(recid)
# check if there are recids that are not in any bucket -> to be added at
# the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records += input_recids - solution
reverse = sort_order == 'd'
if sort_method.strip().lower() == CFG_BIBSORT_DEFAULT_FIELD and reverse:
# If we want to sort the records on their insertion date, add the
# missing records at the top.
solution = sorted(missing_records, reverse=True) + \
sorted(dict_solution, key=dict_solution.__getitem__, reverse=True)
else:
solution = sorted(dict_solution, key=dict_solution.__getitem__,
reverse=reverse) + sorted(missing_records)
# Only keep records, we are going to display
solution = slice_records(solution, jrec, rg)
if sort_or_rank == 'r':
# We need the recids, with their ranking score
return solution, [dict_solution.get(record, 0) for record in solution]
else:
return solution
def slice_records(recIDs, jrec, rg):
if not jrec:
jrec = 1
if rg:
recIDs = recIDs[jrec-1:jrec-1+rg]
else:
recIDs = recIDs[jrec-1:]
return recIDs
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
## check arguments:
if not sort_field:
return slice_records(recIDs, jrec, rg)
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
write_warning(_("Sorry, sorting is allowed on sets of up to %(x_name)d records only. Using default sort order.", x_name=CFG_WEBSEARCH_NB_RECORDS_TO_SORT), "Warning", req=req)
return slice_records(recIDs, jrec, rg)
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = sort_field.split(',')
tags, error_field = get_tags_from_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
write_warning(_("Sorry, %(x_name)s does not seem to be a valid sort option. The records will not be sorted.", x_name=cgi.escape(error_field)), "Error", req=req)
return slice_records(recIDs, jrec, rg)
if verbose >= 3 and of.startswith('h'):
write_warning("Sorting by tags %s." % cgi.escape(repr(tags)), req=req)
if sort_pattern:
write_warning("Sorting preferentially by %s." % cgi.escape(sort_pattern), req=req)
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-", 1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + ''.join(vals)
else:
# no sort pattern defined, so join them all together
val = ''.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if val in recIDs_dict:
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# create output array:
for k in sorted(recIDs_dict.keys()):
recIDs_out.extend(recIDs_dict[k])
# ascending or descending?
if sort_order == 'd':
recIDs_out.reverse()
recIDs = recIDs_out
# return only up to the maximum that we need
return slice_records(recIDs, jrec, rg)
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, format='hb', ot='', ln=CFG_SITE_LANG,
relevances=[], relevances_prologue="(", relevances_epilogue="%%)",
decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True,
print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='',
rm='', em='', nb_found=-1):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if em != "" and EM_REPOSITORY["body"] not in em:
return
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if nb_found == -1:
nb_found = len(recIDs)
if nb_found:
if not rg or rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if len(recIDs) > rg and rg != -9999:
recIDs = slice_records(recIDs, jrec, rg)
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
if ot:
# asked to print some filtered fields only, so call print_record() on the fly:
for recid in recIDs:
x = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(x)
if x:
req.write('\n')
else:
format_records(recIDs,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for recid in recIDs:
x = print_record(recid, format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format.startswith('recjson'):
# we are doing recjson output:
req.write('[')
for idx, recid in enumerate(recIDs):
if idx > 0:
req.write(',')
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
req.write(']')
elif format == 'excel':
create_excel(recIDs=recIDs, req=req, ot=ot, user_info=user_info)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for recid in recIDs:
req.write(print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
if em != "" and EM_REPOSITORY["basket"] not in em:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(ln=ln))
for irec, recid in enumerate(recIDs):
row_number = jrec+irec
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recid,
format,
ot=ot,
ln=ln,
search_pattern=search_pattern,
user_info=user_info,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln=ln,
recid=recid,
row_number=row_number,
relevance=relevance,
record=record,
relevances_prologue=relevances_prologue,
relevances_epilogue=relevances_epilogue,
display_add_to_basket=display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln=ln,
display_add_to_basket=display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
referer = user_info.get('referer', '')
for recid in recIDs:
if record_exists(recid) == -1:
write_warning(_("The record has been deleted."), req=req)
merged_recid = get_merged_recid(recid)
if merged_recid:
write_warning(_("The record %(x_rec)d replaces it.", x_rec=merged_recid), req=req)
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_collection_of_a_record(recid, referer, False)),
recid, ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in iteritems(unordered_tabs)]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'],
'%s/%s/%s/%s%s' % (CFG_BASE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln),
tab_id == tab,
unordered_tabs[tab_id]['enabled'])
for (tab_id, dummy_order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] is True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recid, "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recid, ln)
r = calculate_reading_similarity_list(recid, "pageviews")
viewsimilarity = None
if r:
viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recid,
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'citations':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
selfcited = rank_by_citations(get_self_cited_by(recid), verbose=verbose)
selfcited = reversed(selfcited[0])
selfcited = [recid for recid, dummy in selfcited]
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
write_warning("Citation graph debug: " +
str(len(citationhistory)), req=req)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(ln, citationhistory))
# Citation log
entries = get_citers_log(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citation_log(ln, entries))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recid, 'HDREF', ln=ln, user_info=user_info, verbose=verbose, force_2nd_pass=True))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'keywords':
from invenio.legacy.bibclassify.webinterface import main_page
main_page(req, recid, tabs, ln,
webstyle_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recid,
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'hepdata':
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
include_jquery=True,
include_mathjax=True))
from invenio.utils import hepdata as hepdatautils
from invenio.utils.hepdata import display as hepdatadisplayutils
data = hepdatautils.retrieve_data_for_record(recid)
if data:
content = websearch_templates.tmpl_record_hepdata(data, recid, True)
else:
content = websearch_templates.tmpl_record_no_hepdata()
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(
recid,
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recid) == 1:
creationdate = get_creation_date(recid)
modificationdate = get_modification_date(recid)
content = print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID=recid,
ln=ln,
format=format,
creationdate=creationdate,
modificationdate=modificationdate,
content=content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recid,
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.modules.comments.api import get_mini_reviews
reviews = get_mini_reviews(recid=recid, ln=ln)
else:
reviews = ''
actions = format_record(recid, 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recid, 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recid,
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for recid in recIDs:
req.write(print_record(recid, format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
write_warning(_("Use different search terms."), req=req)
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe8x'):
prologue = websearch_templates.tmpl_xml_endnote_8x_prologue()
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe8x'):
epilogue = websearch_templates.tmpl_xml_endnote_8x_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
val = value[0][0]
except IndexError:
### In case it does not exist, let's build it!
pass
else:
return deserialize_via_marshal(val)
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d',
sp='', rm='', brief_links=True):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = False
if user_info:
can_see_hidden = user_info.get('precached_canseehiddenmarctags', False)
if format == 'recjson':
import json
from invenio.modules.records.api import get_record as get_recjson
ot = ot if ot and len(ot) else None
return json.dumps(get_recjson(recID).dumps(
keywords=ot, filter_hidden=not can_see_hidden))
_ = gettext_set_language(ln)
# The 'attribute this paper' link is shown only if the session states it should and
# the record is included in the collections to which bibauthorid is limited.
if user_info:
display_claim_this_paper = (user_info.get("precached_viewclaimlink", False) and
recID in intbitset.union(*[get_collection_reclist(x)
for x in BIBAUTHORID_LIMIT_TO_COLLECTIONS]))
else:
display_claim_this_paper = False
can_edit_record = False
if check_user_can_edit_record(user_info, recID):
can_edit_record = True
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (format.lower().startswith('t')
or format.lower().startswith('hm')
or str(format[0:3]).isdigit()
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %(x_rec)d replaces it.", x_rec=merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if brief_links and format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
return out
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1 and not ot:
# record 'recID' is formatted in 'format', and we are not
# asking for field-filtered output; so print it:
out += "%s" % decompress(res[0][0])
elif ot:
# field-filtered output was asked for; print only some fields
record = get_record(recID)
if not can_see_hidden:
for tag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
del record[tag]
ot = list(set(ot) - set(cfg['CFG_BIBFORMAT_HIDDEN_TAGS']))
out += record_xml_output(record, ot)
else:
# record 'recID' is not formatted in 'format' or we ask
# for field-filtered output -- they are not in "bibfmt"
# table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s">%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in cfg['CFG_BIBFORMAT_HIDDEN_TAGS']:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre style=\"margin: 1em 0px;\">" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper,
display_edit_link=can_edit_record)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.modules.formatter.utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, str(search_pattern), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri'].lower():
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = ''
try:
snippets = get_pdf_snippets(recID, keywords, user_info)
except:
register_exception()
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except IndexError:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
def clean_dictionary(dictionary, list_of_items):
"""Returns a copy of the dictionary with all the items
in the list_of_items as empty strings"""
out_dictionary = dictionary.copy()
out_dictionary.update((item, '') for item in list_of_items)
return out_dictionary
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=None, sf="", so="a", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="",
wl=0, em=""):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed. (Note that `rg' is ignored in case of `of=id'.)
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found, "intbitset" means to return an intbitset
representation of the recIDs found (no sorting or ranking
will be performed). (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
em - output only part of the page.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results. (Note that `jrec' is ignored
in case of `of=id'.)
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
kwargs = prs_wash_arguments(req=req, cc=cc, c=c, p=p, f=f, rg=rg, sf=sf, so=so, sp=sp, rm=rm, of=of, ot=ot, aas=aas,
p1=p1, f1=f1, m1=m1, op1=op1, p2=p2, f2=f2, m2=m2, op2=op2, p3=p3, f3=f3, m3=m3, sc=sc, jrec=jrec,
recid=recid, recidb=recidb, sysno=sysno, id=id, idb=idb, sysnb=sysnb, action=action, d1=d1,
d1y=d1y, d1m=d1m, d1d=d1d, d2=d2, d2y=d2y, d2m=d2m, d2d=d2d, dt=dt, verbose=verbose, ap=ap, ln=ln, ec=ec,
tab=tab, wl=wl, em=em)
return prs_perform_search(kwargs=kwargs, **kwargs)
def prs_perform_search(kwargs=None, **dummy):
"""Internal call which does the search, it is calling standard Invenio;
Unless you know what you are doing, don't use this call as an API
"""
# separately because we can call it independently
out = prs_wash_arguments_colls(kwargs=kwargs, **kwargs)
if not out:
return out
return prs_search(kwargs=kwargs, **kwargs)
def prs_wash_arguments_colls(kwargs=None, of=None, req=None, cc=None, c=None, sc=None, verbose=None,
aas=None, ln=None, em="", **dummy):
"""
Check and wash collection list argument before we start searching.
If there are troubles, e.g. a collection is not defined, print
warning to the browser.
@return: True if collection list is OK, and various False values
(empty string, empty list) if there was an error.
"""
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
kwargs['colls_to_display'] = colls_to_display
kwargs['colls_to_search'] = colls_to_search
kwargs['hosted_colls'] = hosted_colls
kwargs['wash_colls_debug'] = wash_colls_debug
except InvenioWebSearchUnknownCollectionError as exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
page_end(req, of, ln, em)
return ''
elif of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
page_end(req, of, ln, em)
return ''
else:
page_end(req, of, ln, em)
return ''
return True
def prs_wash_arguments(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS,
sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="",
sc=0, jrec=0, recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG,
ec=None, tab="", uid=None, wl=0, em="", **dummy):
"""
Sets the (default) values and checks others for the PRS call
"""
# wash output format:
of = wash_output_format(of)
# wash all arguments requiring special care
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
(d1y, d1m, d1d, d2y, d2m, d2d) = map(int, (d1y, d1m, d1d, d2y, d2m, d2d))
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, (cStringIO.OutputType, dict)) \
and getattr(req, 'args', None): # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldcode in fieldargs:
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
if uid is None:
try:
uid = getUid(req)
except:
uid = 0
_ = gettext_set_language(ln)
if aas == 2: #add-to-search interface
p = create_add_to_search_pattern(p, p1, f1, m1, op1)
default_addtosearch_args = websearch_templates.restore_search_args_to_default(['p1', 'f1', 'm1', 'op1'])
if req:
req.argd.update(default_addtosearch_args)
req.argd['p'] = p
kwargs = {'req': req, 'cc': cc, 'c': c, 'p': p, 'f': f, 'rg': rg, 'sf': sf,
'so': so, 'sp': sp, 'rm': rm, 'of': of, 'ot': ot, 'aas': aas,
'p1': p1, 'f1': f1, 'm1': m1, 'op1': op1, 'p2': p2, 'f2': f2,
'm2': m2, 'op2': op2, 'p3': p3, 'f3': f3, 'm3': m3, 'sc': sc,
'jrec': jrec, 'recid': recid, 'recidb': recidb, 'sysno': sysno,
'id': id, 'idb': idb, 'sysnb': sysnb, 'action': action, 'd1': d1,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d, 'd2': d2, 'd2y': d2y,
'd2m': d2m, 'd2d': d2d, 'dt': dt, 'verbose': verbose, 'ap': ap,
'ln': ln, 'ec': ec, 'tab': tab, 'wl': wl, 'em': em,
'datetext1': datetext1, 'datetext2': datetext2, 'uid': uid,
'pl': pl, 'pl_in_url': pl_in_url, '_': _,
'selected_external_collections_infos': None,
}
kwargs.update(**dummy)
return kwargs
def prs_search(kwargs=None, recid=0, req=None, cc=None, p=None, p1=None, p2=None, p3=None,
f=None, ec=None, verbose=None, ln=None, selected_external_collections_infos=None,
action=None, rm=None, of=None, em=None,
**dummy):
"""
This function write various bits into the req object as the search
proceeds (so that pieces of a page are rendered even before the
search ended)
"""
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
output = prs_detailed_record(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif action == "browse":
## 2 - browse needed
of = 'hb'
output = prs_browse(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
output = prs_search_similar_records(kwargs=kwargs, **kwargs)
if output is not None:
return output
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
output = prs_search_cocitedwith(kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3 - common search needed
output = prs_search_common(kwargs=kwargs, **kwargs)
if output is not None:
return output
# External searches
if of.startswith("h"):
if not of in ['hcs', 'hcs2']:
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_detailed_record(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, recid=None, recidb=None,
p=None, verbose=None, tab=None, sf=None, so=None, sp=None, rm=None, ot=None, _=None, em=None,
**dummy):
"""Formats and prints one record"""
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab, em)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of in ["id", "intbitset"]:
result = [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
if of == "intbitset":
return intbitset(result)
else:
return result
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln,
search_pattern=p, verbose=verbose, tab=tab, sf=sf,
so=so, sp=sp, rm=rm, em=em, nb_found=len(range(recid, recidb)))
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
def prs_browse(kwargs=None, req=None, of=None, cc=None, aas=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
colls_to_search=None, verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
write_warning(create_exact_author_browse_help_link(p, p1, p2, p3, f, f1, f2, f3,
rm, cc, ln, jrec, rg, aas, action),
req=req)
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_search_similar_records(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, em=None,
verbose=None, **dummy):
if req and req.method != 'HEAD':
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recid = p[6:]
if record_exists(recid) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN(apache.HTTP_NOT_FOUND)
else:
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
if of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
(results_similar_recIDs,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
results_similar_comments) = \
rank_records_bibrank(rank_method_code=rm,
rank_limit_relevance=0,
hitset=get_collection_reclist(cc),
related_to=[p],
verbose=verbose,
field=f,
rg=rg,
jrec=jrec)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
write_warning(results_similar_comments, req=req)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
elif of == "id":
return results_similar_recIDs
elif of == "intbitset":
return intbitset(results_similar_recIDs)
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances,
results_similar_relevances_prologue,
results_similar_relevances_epilogue,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_similar_recIDs))
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning(results_similar_relevances_prologue, req=req)
write_warning(results_similar_relevances_epilogue, req=req)
write_warning(results_similar_comments, req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_cocitedwith(kwargs=None, req=None, of=None, cc=None, pl_in_url=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None,
verbose=None, em=None, **dummy):
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3),
em=em)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
write_warning(_("Requested record does not seem to exist."), req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = [x[0] for x in calculate_co_cited_with_list(int(recID))]
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
elif of == "id":
return results_cocited_recIDs
elif of == "intbitset":
return intbitset(results_cocited_recIDs)
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln,
search_pattern=p, verbose=verbose, sf=sf, so=so,
sp=sp, rm=rm, em=em,
nb_found=len(results_cocited_recIDs))
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
write_warning("nothing found", req=req)
if of == "id":
return []
elif of == "intbitset":
return intbitset()
elif of == "recjson":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
def prs_search_hosted_collections(kwargs=None, req=None, of=None, ln=None, _=None, p=None,
p1=None, p2=None, p3=None, hosted_colls=None, f=None,
colls_to_search=None, hosted_colls_actual_or_potential_results_p=None,
verbose=None, **dummy):
hosted_colls_results = hosted_colls_timeouts = hosted_colls_true_results = None
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] is None or result[1] is False:
# these are the searches the returned no or zero results
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned no results" % result[0][1].name, req=req)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
write_warning("Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]), req=req)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time", req=req)
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
write_warning("Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name, req=req)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
write_warning("Hosted collections (perform_search_request): there were no hosted collections to be searched", req=req)
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
kwargs['hosted_colls_actual_or_potential_results_p'] = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
kwargs['hosted_colls_potential_results_p'] = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
kwargs['only_hosted_colls_actual_or_potential_results_p'] = not colls_to_search and hosted_colls_actual_or_potential_results_p
kwargs['hosted_colls_results'] = hosted_colls_results
kwargs['hosted_colls_timeouts'] = hosted_colls_timeouts
kwargs['hosted_colls_true_results'] = hosted_colls_true_results
def prs_advanced_search(results_in_any_collection, kwargs=None, req=None, of=None,
cc=None, ln=None, _=None, p=None, p1=None, p2=None, p3=None,
f=None, f1=None, m1=None, op1=None, f2=None, m2=None,
op2=None, f3=None, m3=None, ap=None, ec=None,
selected_external_collections_infos=None, verbose=None,
wl=None, em=None, **dummy):
len_results_p1 = 0
len_results_p2 = 0
len_results_p3 = 0
try:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl))
len_results_p1 = len(results_in_any_collection)
if len_results_p1 == 0:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec,
verbose, ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p2 = len(results_tmp)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op1), "Error", req=req)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
if len_results_p2:
#each individual query returned results, but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
len_results_p3 = len(results_tmp)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
write_warning("Invalid set operation %s." % cgi.escape(op2), "Error", req=req)
if len(results_in_any_collection) == 0 and len_results_p3 and of.startswith("h"):
#each individual query returned results but the boolean operation did not
nearestterms = []
nearest_search_args = req.argd.copy()
if p1:
nearestterms.append((p1, len_results_p1, clean_dictionary(nearest_search_args, ['p2', 'f2', 'm2', 'p3', 'f3', 'm3'])))
if p2:
nearestterms.append((p2, len_results_p2, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p3', 'f3', 'm3'])))
nearestterms.append((p3, len_results_p3, clean_dictionary(nearest_search_args, ['p1', 'f1', 'm1', 'p2', 'f2', 'm2'])))
write_warning(websearch_templates.tmpl_search_no_boolean_hits(ln=ln, nearestterms=nearestterms), req=req)
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_simple_search(results_in_any_collection, kwargs=None, req=None, of=None, cc=None, ln=None, p=None, f=None,
p1=None, p2=None, p3=None, ec=None, verbose=None, selected_external_collections_infos=None,
only_hosted_colls_actual_or_potential_results_p=None, query_representation_in_cache=None,
ap=None, hosted_colls_actual_or_potential_results_p=None, wl=None, em=None,
**dummy):
try:
results_in_cache = intbitset().fastload(
search_results_cache.get(query_representation_in_cache))
except:
results_in_cache = None
if results_in_cache is not None:
# query is not in the cache already, so reuse it:
results_in_any_collection.union_update(results_in_cache)
if verbose and of.startswith("h"):
write_warning("Search stage 0: query found in cache, reusing cached results.", req=req)
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection.union_update(search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln,
display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p,
wl=wl))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_intersect_results_with_collrecs(results_final, results_in_any_collection,
kwargs=None, colls_to_search=None,
req=None, of=None, ln=None,
cc=None, p=None, p1=None, p2=None, p3=None, f=None,
ec=None, verbose=None, selected_external_collections_infos=None,
em=None, **dummy):
display_nearest_terms_box=not kwargs['hosted_colls_actual_or_potential_results_p']
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final.update(intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, of,
verbose, ln, display_nearest_terms_box=display_nearest_terms_box))
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
def prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, req=None, verbose=None, of=None, **dummy):
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE > 0:
search_results_cache.set(query_representation_in_cache,
results_in_any_collection.fastdump(),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
search_results_cache.set(query_representation_in_cache + '::cc',
dummy.get('cc', CFG_SITE_NAME),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if req:
from flask import request
req = request
search_results_cache.set(query_representation_in_cache + '::p',
req.values.get('p', ''),
timeout=CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT)
if verbose and of.startswith("h"):
write_warning(req, "Search stage 3: storing query results in cache.", req=req)
def prs_apply_search_limits(results_final, kwargs=None, req=None, of=None, cc=None, ln=None, _=None,
p=None, p1=None, p2=None, p3=None, f=None, pl=None, ap=None, dt=None,
ec=None, selected_external_collections_infos=None,
hosted_colls_actual_or_potential_results_p=None,
datetext1=None, datetext2=None, verbose=None, wl=None, em=None,
**dummy):
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln, em)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
write_warning("Search stage 5: applying search pattern limit %s..." % cgi.escape(pl), req=req)
try:
results_temp = intersect_results_with_hitset(
req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
if results_temp:
results_final.update(results_temp)
else:
results_final.clear()
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
return page_end(req, of, ln, em)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search_with_em(req, cc, [p, p1, p2, p3], f, ec, verbose,
ln, selected_external_collections_infos, em=em)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln, em)
def prs_split_into_collections(kwargs=None, results_final=None, colls_to_search=None, hosted_colls_results=None,
cpu_time=0, results_final_nb_total=None, hosted_colls_actual_or_potential_results_p=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, **dummy):
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
kwargs['results_final_nb'] = results_final_nb
kwargs['results_final_nb_total'] = results_final_nb_total
kwargs['results_final_for_all_selected_colls'] = results_final_for_all_selected_colls
kwargs['cpu_time'] = cpu_time #rca TODO: check where the cpu_time is used, this line was missing
return (results_final_nb, results_final_nb_total, results_final_for_all_selected_colls)
def prs_summarize_records(kwargs=None, req=None, p=None, f=None, aas=None,
p1=None, p2=None, p3=None, f1=None, f2=None, f3=None, op1=None, op2=None,
ln=None, results_final_for_all_selected_colls=None, of='hcs', **dummy):
# feed the current search to be summarized:
from invenio.legacy.search_engine.summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, of, ln, search_p, search_f, req)
def prs_print_records(kwargs=None, results_final=None, req=None, of=None, cc=None, pl_in_url=None,
ln=None, _=None, p=None, p1=None, p2=None, p3=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, d1y=None, d1m=None,
d1d=None, d2y=None, d2m=None, d2d=None, dt=None, jrec=None, colls_to_search=None,
hosted_colls_actual_or_potential_results_p=None, hosted_colls_results=None,
hosted_colls_true_results=None, hosted_colls_timeouts=None, results_final_nb=None,
cpu_time=None, verbose=None, em=None, **dummy):
if len(colls_to_search) > 1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if coll in results_final and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
results_final_recIDs = list(results_final[coll])
results_final_nb_found = len(results_final_recIDs)
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec, kwargs['f'])
if of.startswith("h"):
write_warning(results_final_comments, req=req)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
write_warning(results_final_relevances_prologue, req=req)
write_warning(results_final_relevances_epilogue, req=req)
else:
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm,
em=em,
nb_found=results_final_nb_found)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1, em=em))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, em=em))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] is None or result[1] is False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg, em=em))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg, em=em))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
def prs_log_query(kwargs=None, req=None, uid=None, of=None, ln=None, p=None, f=None,
colls_to_search=None, results_final_nb_total=None, em=None, **dummy):
# FIXME move query logging to signal receiver
# log query:
try:
from flask.ext.login import current_user
if req:
from flask import request
req = request
id_query = log_query(req.host,
'&'.join(map(lambda (k,v): k+'='+v, request.values.iteritems(multi=True))),
uid)
#id_query = log_query(req.remote_host, req.args, uid)
#of = request.values.get('of', 'hb')
if of.startswith("h") and id_query and (em == '' or EM_REPOSITORY["alert"] in em):
if not of in ['hcs', 'hcs2']:
# display alert/RSS teaser for non-summary formats:
display_email_alert_part = True
if current_user:
if current_user['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not current_user['precached_usealerts']:
display_email_alert_part = False
from flask import flash
flash(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, \
ln=ln, display_email_alert_part=display_email_alert_part), 'search-results-after')
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
def prs_search_common(kwargs=None, req=None, of=None, cc=None, ln=None, uid=None, _=None, p=None,
p1=None, p2=None, p3=None, colls_to_display=None, f=None, rg=None, sf=None,
so=None, sp=None, rm=None, ot=None, aas=None, f1=None, m1=None, op1=None,
f2=None, m2=None, op2=None, f3=None, m3=None, sc=None, pl=None,
d1y=None, d1m=None, d1d=None, d2y=None, d2m=None, d2d=None,
dt=None, jrec=None, ec=None, action=None, colls_to_search=None, wash_colls_debug=None,
verbose=None, wl=None, em=None, **dummy):
query_representation_in_cache = get_search_results_cache_key(**kwargs)
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3), em=em)
if of.startswith("h") and verbose and wash_colls_debug:
write_warning("wash_colls debugging info : %s" % wash_colls_debug, req=req)
prs_search_hosted_collections(kwargs=kwargs, **kwargs)
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action,
em
))
# WebSearch services
if jrec <= 1 and \
(em == "" and True or (EM_REPOSITORY["search_services"] in em)):
user_info = collect_user_info(req)
# display only on first search page, and only if wanted
# when 'em' param set.
for answer_relevance, answer_html in services.get_answers(
req, user_info, of, cc, colls_to_search, p, f, ln):
req.write('<div class="searchservicebox">')
req.write(answer_html)
if verbose > 8:
write_warning("Service relevance: %i" % answer_relevance, req=req)
req.write('</div>')
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 2 and not (p2 or p3):
## 3A add-to-search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
elif aas == 1 or (p1 or p2 or p3):
## 3B - advanced search
output = prs_advanced_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
else:
## 3C - simple search
output = prs_simple_search(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
if len(results_in_any_collection) == 0 and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return None
# store this search query results into search results cache if needed:
prs_store_results_in_cache(query_representation_in_cache, results_in_any_collection, **kwargs)
# search stage 4 and 5: intersection with collection universe and sorting/limiting
try:
output = prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection, kwargs=kwargs, **kwargs)
if output is not None:
return output
except KeyboardInterrupt:
# This happens usually from the command line
# The error handling we want is different
raise
except: # no results to display
return None
t2 = os.times()[4]
cpu_time = t2 - t1
kwargs['cpu_time'] = cpu_time
## search stage 6: display results:
return prs_display_results(kwargs=kwargs, **kwargs)
def prs_intersect_with_colls_and_apply_search_limits(results_in_any_collection,
kwargs=None, req=None, of=None,
**dummy):
# search stage 4: intersection with collection universe:
results_final = {}
output = prs_intersect_results_with_collrecs(results_final, results_in_any_collection, kwargs, **kwargs)
if output is not None:
return output
# another external search if we still don't have something
if results_final == {} and not kwargs['hosted_colls_actual_or_potential_results_p']:
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
kwargs['results_final'] = results_final
raise Exception
# search stage 5: apply search option limits and restrictions:
output = prs_apply_search_limits(results_final, kwargs=kwargs, **kwargs)
kwargs['results_final'] = results_final
if output is not None:
return output
def prs_display_results(kwargs=None, results_final=None, req=None, of=None, sf=None,
so=None, sp=None, verbose=None, p=None, p1=None, p2=None, p3=None,
cc=None, ln=None, _=None, ec=None, colls_to_search=None, rm=None, cpu_time=None,
f=None, em=None, jrec=None, rg=None, **dummy
):
## search stage 6: display results:
# split result set into collections
(results_final_nb, results_final_nb_total, results_final_for_all_selected_colls) = prs_split_into_collections(kwargs=kwargs, **kwargs)
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total == 0 and not kwargs['hosted_colls_potential_results_p']:
if of.startswith("h"):
write_warning("No match found, please enter different search terms.", req=req)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
prs_log_query(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "intbitset":
#return the result as an intbitset
return results_final_for_all_selected_colls
elif of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, ln, kwargs['rg'], kwargs['jrec'], kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln)
return slice_records(recIDs, jrec, rg)
elif of.startswith("h"):
if of not in ['hcs', 'hcs2', 'hcv', 'htcv', 'tlcv']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time,
ln, ec, hosted_colls_potential_results_p=kwargs['hosted_colls_potential_results_p'], em=em))
kwargs['selected_external_collections_infos'] = print_external_results_overview(req, cc, [p, p1, p2, p3],
f, ec, verbose, ln, print_overview=em == "" or EM_REPOSITORY["overview"] in em)
# print number of hits found for XML outputs:
if of.startswith("x") or of == 'mobb':
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % kwargs['results_final_nb_total'])
# print records:
if of in ['hcs', 'hcs2']:
prs_summarize_records(kwargs=kwargs, **kwargs)
elif of in ['hcv', 'htcv', 'tlcv'] and CFG_INSPIRE_SITE:
from invenio.legacy.search_engine.cvifier import cvify_records
cvify_records(results_final_for_all_selected_colls, of, req, so)
else:
prs_print_records(kwargs=kwargs, **kwargs)
# this is a copy of the prs_display_results with output parts removed, needed for external modules
def prs_rank_results(kwargs=None, results_final=None, req=None, colls_to_search=None,
sf=None, so=None, sp=None, of=None, rm=None, p=None, p1=None, p2=None, p3=None,
verbose=None, **dummy
):
## search stage 6: display results:
# split result set into collections
dummy_results_final_nb, dummy_results_final_nb_total, results_final_for_all_selected_colls = prs_split_into_collections(kwargs=kwargs, **kwargs)
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
p.split() + p1.split() +
p2.split() + p3.split(), verbose, so, of, field=kwargs['f'])
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_ENABLED and SORTING_METHODS): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of)
return recIDs
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = line.split("#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" %
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True, split_by=0):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
@return: list of tuples containing tag and its frequency
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
[('PREPRINT', 10), ('THESIS', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
[('Ellis, J', 10), ('Ellis, N', 7), ...]
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
[('Ellis, N', 7), ...]
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"""Compare VAL1 and VAL2 according to, firstly, frequency, then
secondly, alphabetically."""
compared_via_frequencies = cmp(valuefreqdict[val2],
valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, string_types):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False,
split_by=split_by))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if val in valuefreqdict:
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
if not CFG_NUMPY_IMPORTABLE:
## original version
out = []
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if val in displaytmp:
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out.append((tmpdisplv, valuefreqdict[val]))
return out
else:
f = [] # frequencies
n = [] # original names
ln = [] # lowercased names
## build lists within one iteration
for (val, freq) in iteritems(valuefreqdict):
f.append(-1 * freq)
if val in displaytmp:
n.append(displaytmp[val])
else:
n.append(val)
ln.append(val.lower())
## sort by frequency (desc) and then by lowercased name.
return [(n[i], -1 * f[i]) for i in numpy.lexsort([ln, f])]
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile as pyprofile
import pstats
pyprofile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
def perform_external_collection_search_with_em(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, em=""):
perform_external_collection_search(req, current_collection, pattern_list, field, external_collection,
verbosity_level, lang, selected_external_collections_infos,
print_overview=em == "" or EM_REPOSITORY["overview"] in em,
print_search_info=em == "" or EM_REPOSITORY["search_info"] in em,
print_see_also_box=em == "" or EM_REPOSITORY["see_also_box"] in em,
print_body=em == "" or EM_REPOSITORY["body"] in em)
@cache.memoize(timeout=5)
def get_fulltext_terms_from_search_pattern(search_pattern):
keywords = []
if search_pattern is not None:
for unit in create_basic_search_units(None, search_pattern.encode('utf-8'), None):
bsu_o, bsu_p, bsu_f, bsu_m = unit[0], unit[1], unit[2], unit[3]
if (bsu_o != '-' and bsu_f in [None, 'fulltext']):
if bsu_m == 'a' and bsu_p.startswith('%') and bsu_p.endswith('%'):
# remove leading and training `%' representing partial phrase search
keywords.append(bsu_p[1:-1])
else:
keywords.append(bsu_p)
return keywords
def check_user_can_edit_record(req, recid):
""" Check if user has authorization to modify a collection
the recid belongs to
"""
record_collections = get_all_collections_of_a_record(recid)
if not record_collections:
# Check if user has access to all collections
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection='')
if auth_code == 0:
return True
else:
for collection in record_collections:
auth_code, auth_message = acc_authorize_action(req, 'runbibedit',
collection=collection)
if auth_code == 0:
return True
return False
| gpl-2.0 | 3,541,788,180,805,324,000 | 45.394049 | 268 | 0.549861 | false |
mozman/ezdxf | tests/test_05_tools/test_510_byte_stream.py | 1 | 1134 | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
import struct
from ezdxf.tools.binarydata import ByteStream
def test_init():
bs = ByteStream(b'ABCDABC\x00')
assert bs.index == 0
assert len(bs.buffer) == 8
def test_read_ps():
bs = ByteStream(b'ABCDABC\x00')
s = bs.read_padded_string()
assert s == 'ABCDABC'
assert bs.index == 8
assert bs.has_data is False
def test_read_ps_align():
bs = ByteStream(b'ABCD\x00')
s = bs.read_padded_string()
assert s == 'ABCD'
assert bs.index == 8
assert bs.has_data is False
def test_read_pus():
bs = ByteStream(b'A\x00B\x00C\x00D\x00\x00\x00')
s = bs.read_padded_unicode_string()
assert s == 'ABCD'
assert bs.index == 12
assert bs.has_data is False
def test_read_doubles():
data = struct.pack('3d', 1.0, 2.0, 3.0)
bs = ByteStream(data)
x = bs.read_struct('d')[0]
y = bs.read_struct('d')[0]
z = bs.read_struct('d')[0]
assert (x, y, z) == (1.0, 2.0, 3.0)
assert bs.index == 24
assert bs.has_data is False
if __name__ == '__main__':
pytest.main([__file__])
| mit | 846,370,826,801,595,800 | 21.235294 | 52 | 0.602293 | false |
alindt/Cinnamon | files/usr/lib/cinnamon-settings/modules/cs_default.py | 1 | 14784 | #!/usr/bin/env python
from SettingsWidgets import *
PREF_MEDIA_AUTORUN_NEVER = "autorun-never"
PREF_MEDIA_AUTORUN_X_CONTENT_START_APP = "autorun-x-content-start-app"
PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE = "autorun-x-content-ignore"
PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER = "autorun-x-content-open-folder"
CUSTOM_ITEM_ASK = "cc-item-ask"
CUSTOM_ITEM_DO_NOTHING = "cc-item-do-nothing"
CUSTOM_ITEM_OPEN_FOLDER = "cc-item-open-folder"
MEDIA_HANDLING_SCHEMA = "org.cinnamon.desktop.media-handling"
PREF_CONTENT_TYPE = 0
PREF_GEN_CONTENT_TYPE = 1
PREF_LABEL = 2
DEF_CONTENT_TYPE = 0
DEF_LABEL = 1
DEF_HEADING = 2
preferred_app_defs = [
# for web, we need to support text/html,
# application/xhtml+xml and x-scheme-handler/https,
# hence the "*" pattern
( "x-scheme-handler/http", "x-scheme-handler/http", _("_Web") ),
( "x-scheme-handler/mailto", "x-scheme-handler/mailto", _("_Mail") ),
( "text/plain", "text", _("Text") ), #TODO: Add mnemonic once we're out of M16 release to preserve i18n for now
# 1st mimetype is to let us find apps
# 2nd mimetype is to set default handler for (so we handle all of that type, not just a specific format)
( "audio/x-vorbis+ogg", "audio", _("M_usic") ),
( "video/x-ogm+ogg", "video", _("_Video") ),
( "image/jpeg", "image", _("_Photos") )
]
removable_media_defs = [
( "x-content/audio-cdda", _("CD _audio") , _("Select an application for audio CDs")),
( "x-content/video-dvd", _("_DVD video"), _("Select an application for video DVDs") ),
( "x-content/audio-player", _("_Music player"), _("Select an application to run when a music player is connected") ),
( "x-content/image-dcf", _("_Photos"), _("Select an application to run when a camera is connected") ),
( "x-content/unix-software", _("_Software"), _("Select an application for software CDs") )
]
other_defs = [
# translators: these strings are duplicates of shared-mime-info
# strings, just here to fix capitalization of the English originals.
# If the shared-mime-info translation works for your language,
# simply leave these untranslated.
( "x-content/audio-dvd", _("audio DVD") ),
( "x-content/blank-bd", _("blank Blu-ray disc") ),
( "x-content/blank-cd", _("blank CD disc") ),
( "x-content/blank-dvd", _("blank DVD disc") ),
( "x-content/blank-hddvd", _("blank HD DVD disc") ),
( "x-content/video-bluray", _("Blu-ray video disc") ),
( "x-content/ebook-reader", _("e-book reader") ),
( "x-content/video-hddvd", _("HD DVD video disc") ),
( "x-content/image-picturecd", _("Picture CD") ),
( "x-content/video-svcd", _("Super Video CD") ),
( "x-content/video-vcd", _("Video CD") ),
( "x-content/win32-software", _("Windows software") ),
( "x-content/software", _("Software") )
]
class ColumnBox(Gtk.VBox):
def __init__(self, title, content):
super(ColumnBox, self).__init__()
label = Gtk.Label("")
label.set_markup('<b>%s\n</b>' % title)
label.set_alignment(0.5, 0.5)
self.set_homogeneous(False)
self.pack_start(label, False, False, 0)
self.pack_end(content, True, True, 0)
class ButtonTable(Gtk.Table):
def __init__(self, lines):
super(ButtonTable, self).__init__(lines, 2, False)
self.set_row_spacings(8)
self.set_col_spacings(15)
self.attach(Gtk.Label(""), 2, 3, 0, lines, Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0, 0)
self.row = 0
def addRow(self, label, button):
if label:
label = MnemonicLabel(label, button)
self.attach(label, 0, 1, self.row, self.row+1, Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL, 0, 0, 0)
self.attach(button, 1, 2, self.row, self.row+1, Gtk.AttachOptions.FILL, 0, 0, 0)
self.row += 1
def forgetRow(self):
self.row -= 1
class MnemonicLabel(Gtk.Label):
def __init__(self, text, widget):
super(MnemonicLabel, self).__init__("")
self.set_text_with_mnemonic(text)
self.set_alignment(1, 0.5)
self.get_style_context().add_class("dim-label")
self.set_mnemonic_widget(widget)
class DefaultAppChooserButton(Gtk.AppChooserButton):
def __init__(self, content_type, gen_content_type):
super(DefaultAppChooserButton, self).__init__(content_type=content_type)
self.content_type = content_type
self.generic_content_type = gen_content_type
self.set_show_default_item(True)
self.connect("changed", self.onChanged)
def onChanged(self, button):
info = button.get_app_info()
if info:
types = info.get_supported_types()
for t in types:
if self.generic_content_type in t:
if not info.set_as_default_for_type(t):
print "Failed to set '%s' as the default application for '%s'" % (info.get_name(), self.generic_content_type)
if self.content_type == "x-scheme-handler/http":
if info.set_as_default_for_type ("x-scheme-handler/https") == False:
print "Failed to set '%s' as the default application for '%s'" % (info.get_name(), "x-scheme-handler/https")
class CustomAppChooserButton(Gtk.AppChooserButton):
def __init__(self, media_settings, content_type, heading=None):
super(CustomAppChooserButton, self).__init__(content_type=content_type)
self.media_settings = media_settings
content_type = self.get_content_type()
self.set_show_default_item(True)
#fetch preferences for this content type
(pref_start_app, pref_ignore, pref_open_folder) = self.getPreferences()
pref_ask = not pref_start_app and not pref_ignore and not pref_open_folder
info = self.get_app_info()
#append the separator only if we have >= 1 apps in the chooser
if info:
self.append_separator()
icon = Gio.ThemedIcon.new("gtk-dialog-question")
self.append_custom_item(CUSTOM_ITEM_ASK, _("Ask what to do"), icon)
icon = Gio.ThemedIcon.new("gtk-directory")
self.append_custom_item(CUSTOM_ITEM_OPEN_FOLDER, _("Open folder"), icon)
icon = Gio.ThemedIcon.new("gtk-cancel")
self.append_custom_item(CUSTOM_ITEM_DO_NOTHING, _("Do nothing"), icon)
self.set_show_dialog_item(True)
self.set_heading(heading)
if pref_ask:
self.set_active_custom_item(CUSTOM_ITEM_ASK)
elif pref_ignore:
self.set_active_custom_item(CUSTOM_ITEM_DO_NOTHING)
elif pref_open_folder:
self.set_active_custom_item(CUSTOM_ITEM_OPEN_FOLDER)
self.connect("changed", self.onChanged)
self.connect("custom-item-activated", self.onCustomItemActivated)
def onChanged(self, button):
info = self.get_app_info()
if info:
content_type = self.get_content_type()
self.setPreferences(True, False, False)
info.set_as_default_for_type(content_type)
def onCustomItemActivated(self, button, item):
content_type = self.get_content_type()
if item == CUSTOM_ITEM_ASK:
self.setPreferences(False, False, False)
elif item == CUSTOM_ITEM_OPEN_FOLDER:
self.setPreferences(False, False, True)
elif item == CUSTOM_ITEM_DO_NOTHING:
self.setPreferences(False, True, False)
def getPreference(self, settings_key):
strv = self.media_settings.get_strv(settings_key)
return strv != None and self.get_content_type() in strv
def getPreferences(self):
pref_start_app = self.getPreference( PREF_MEDIA_AUTORUN_X_CONTENT_START_APP)
pref_ignore = self.getPreference(PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE)
pref_open_folder = self.getPreference(PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER)
return (pref_start_app, pref_ignore, pref_open_folder)
def setPreference(self, pref_value, settings_key):
array = self.media_settings.get_strv(settings_key)
content_type = self.get_content_type()
array = [ v for v in array if v != content_type ]
if pref_value:
array.append(content_type)
self.media_settings.set_strv(settings_key, array)
def setPreferences(self, pref_start_app, pref_ignore, pref_open_folder):
self.setPreference(pref_start_app, PREF_MEDIA_AUTORUN_X_CONTENT_START_APP)
self.setPreference(pref_ignore, PREF_MEDIA_AUTORUN_X_CONTENT_IGNORE)
self.setPreference(pref_open_folder, PREF_MEDIA_AUTORUN_X_CONTENT_OPEN_FOLDER)
class OtherTypeDialog(Gtk.Dialog):
def __init__(self, media_settings):
super(OtherTypeDialog, self).__init__(_("Other Media"),
None,
0,
(_("Close"), Gtk.ResponseType.OK))
self.set_default_size(350, 100)
self.media_settings = media_settings
list_store = Gtk.ListStore(str, str)
list_store.set_sort_column_id (1, Gtk.SortType.ASCENDING)
self.type_combo = Gtk.ComboBox.new_with_model(list_store)
self.application_combo = None
content_types = Gio.content_types_get_registered()
for content_type in content_types:
if self.acceptContentType(content_type):
list_store.append([self.getDescription(content_type), content_type])
renderer = Gtk.CellRendererText()
self.type_combo.pack_start(renderer, True)
self.type_combo.add_attribute (renderer,"text", 0)
self.type_combo.set_active(False)
table = ButtonTable(2)
table.addRow(_("_Type:"), self.type_combo)
self.table = table
self.vbox.pack_start(ColumnBox(_("Select how other media should be handled"), table), True, True, 2)
self.vbox.show()
self.type_combo.connect("changed", self.onTypeComboChanged)
def acceptContentType(self, content_type):
if not content_type.startswith("x-content/"):
return False
for d in removable_media_defs:
if Gio.content_type_is_a(content_type, d[DEF_CONTENT_TYPE]):
return False
return True
def getDescription(self, content_type):
for d in other_defs:
if content_type == d[DEF_CONTENT_TYPE]:
s = d[DEF_LABEL]
if s == _(s):
description = Gio.content_type_get_description(content_type)
else:
description = s
break
if description == None:
print "Content type '%s' is missing from the info panel" % content_type
return Gio.content_type_get_description(content_type)
return description
def doShow(self, topLevel):
self.set_transient_for(topLevel)
self.set_modal(True)
self.connect("response", self.onResponse)
self.connect("delete-event", self.onDelete)
self.onTypeComboChanged(self.type_combo)
self.present()
self.show_all()
def onDelete(self, *args):
return self.hide_on_delete()
def doHide(self):
self.hide()
if self.application_combo != None:
self.application_combo.destroy()
self.application_combo = None
self.table.forgetRow()
def onResponse(self, dialog, response):
self.doHide()
def onTypeComboChanged(self, type_combo):
iter = type_combo.get_active_iter()
if not iter:
return
model = type_combo.get_model()
if not model:
return
x_content_type = model.get_value(iter, 1)
heading = model.get_value(iter, 0)
action_container = Gtk.HBox()
if self.application_combo != None:
self.application_combo.destroy()
self.table.forgetRow()
self.application_combo = CustomAppChooserButton(self.media_settings, x_content_type, heading)
self.application_combo.show()
self.table.addRow(_("_Action:"), self.application_combo)
class Module:
def __init__(self, content_box):
keywords = _("media, defaults, applications, programs, removable, browser, email, calendar, music, videos, photos, images, cd, autostart")
advanced = False
sidePage = SidePage(_("Applications & Removable Media"), "default-applications.svg", keywords, advanced, content_box)
self.sidePage = sidePage
self.name = "default"
self.category = "prefs"
hbox = Gtk.HBox()
hbox.set_homogeneous(True)
sidePage.add_widget(hbox, False)
hbox.pack_start(self.setupDefaultApps(), False, False, 0)
hbox.pack_start(self.setupMedia(), False, False, 0)
def setupDefaultApps(self):
table = ButtonTable(len(preferred_app_defs))
for d in preferred_app_defs:
table.addRow(d[PREF_LABEL], DefaultAppChooserButton(d[PREF_CONTENT_TYPE], d[PREF_GEN_CONTENT_TYPE]))
return ColumnBox(_("Default Applications"), table)
def onMoreClicked(self, button):
self.other_type_dialog.doShow(button.get_toplevel())
def setupMedia(self):
self.media_settings = Gio.Settings.new(MEDIA_HANDLING_SCHEMA)
self.other_type_dialog = OtherTypeDialog(self.media_settings)
hbox = Gtk.VBox()
hboxToggle = Gtk.VBox()
hbox.add(hboxToggle)
table = ButtonTable(len(removable_media_defs)+1)
hboxToggle.add(table)
for d in removable_media_defs:
table.addRow(d[DEF_LABEL], CustomAppChooserButton(self.media_settings, d[DEF_CONTENT_TYPE], d[DEF_HEADING]))
more = Gtk.Button.new_with_mnemonic(_("_Other Media..."))
more.connect("clicked", self.onMoreClicked)
table.addRow(None, more)
never = Gtk.CheckButton.new_with_mnemonic(_("_Never prompt or start programs on media insertion"))
hbox.add(never)
self.media_settings.bind(PREF_MEDIA_AUTORUN_NEVER, never, "active", Gio.SettingsBindFlags.DEFAULT)
self.media_settings.bind(PREF_MEDIA_AUTORUN_NEVER, hboxToggle, "sensitive", Gio.SettingsBindFlags.INVERT_BOOLEAN)
return ColumnBox(_("Select how media should be handled"), hbox)
| gpl-2.0 | -4,939,759,339,208,699,000 | 39.952909 | 150 | 0.603084 | false |
bugzPDX/airmozilla | airmozilla/manage/views/dashboard.py | 1 | 5401 | import datetime
from django.contrib.auth.models import User
from django.shortcuts import render
from django.utils import timezone
from django.db.models import Sum
from jsonview.decorators import json_view
from airmozilla.main.models import (
Event,
SuggestedEvent,
Picture,
EventRevision,
)
from airmozilla.comments.models import Comment
from .decorators import staff_required
@staff_required
def dashboard(request):
"""Management home / explanation page."""
return render(request, 'manage/dashboard.html')
@staff_required
@json_view
def dashboard_data(request):
context = {}
now = timezone.now()
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
tomorrow = today + datetime.timedelta(days=1)
yesterday = today - datetime.timedelta(days=1)
this_week = today - datetime.timedelta(days=today.weekday())
next_week = this_week + datetime.timedelta(days=7)
last_week = this_week - datetime.timedelta(days=7)
this_month = today.replace(day=1)
next_month = this_month
while next_month.month == this_month.month:
next_month += datetime.timedelta(days=1)
last_month = (this_month - datetime.timedelta(days=1)).replace(day=1)
this_year = this_month.replace(month=1)
next_year = this_year.replace(year=this_year.year + 1)
last_year = this_year.replace(year=this_year.year - 1)
context['groups'] = []
def get_counts(qs, key):
counts = {}
def make_filter(gte=None, lt=None):
filter = {}
if gte is not None:
filter['%s__gte' % key] = gte
if lt is not None:
filter['%s__lt' % key] = lt
return filter
counts['today'] = qs.filter(
**make_filter(gte=today, lt=tomorrow)
).count()
counts['yesterday'] = qs.filter(
**make_filter(gte=yesterday, lt=today)).count()
counts['this_week'] = qs.filter(
**make_filter(gte=this_week, lt=next_week)).count()
counts['last_week'] = qs.filter(
**make_filter(gte=last_week, lt=this_week)).count()
counts['this_month'] = qs.filter(
**make_filter(gte=this_month, lt=next_month)).count()
counts['last_month'] = qs.filter(
**make_filter(gte=last_month, lt=this_month)).count()
counts['this_year'] = qs.filter(
**make_filter(gte=this_year, lt=next_year)).count()
counts['last_year'] = qs.filter(
**make_filter(gte=last_year, lt=this_year)).count()
counts['ever'] = qs.count()
return counts
# Events
events = Event.objects.exclude(status=Event.STATUS_REMOVED)
counts = get_counts(events, 'start_time')
context['groups'].append({
'name': 'New Events',
'counts': counts
})
# Suggested Events
counts = get_counts(SuggestedEvent.objects.all(), 'created')
context['groups'].append({
'name': 'Requested Events',
'counts': counts
})
# Users
counts = get_counts(User.objects.all(), 'date_joined')
context['groups'].append({
'name': 'New Users',
'counts': counts
})
# Comments
counts = get_counts(Comment.objects.all(), 'created')
context['groups'].append({
'name': 'Comments',
'counts': counts
})
# Event revisions
counts = get_counts(EventRevision.objects.all(), 'created')
context['groups'].append({
'name': 'Event Revisions',
'counts': counts
})
# Pictures
counts = get_counts(Picture.objects.all(), 'created')
context['groups'].append({
'name': 'Pictures',
'counts': counts
})
def get_duration_totals(qs):
key = 'start_time'
def make_filter(gte=None, lt=None):
filter = {}
if gte is not None:
filter['%s__gte' % key] = gte
if lt is not None:
filter['%s__lt' % key] = lt
return filter
counts = {}
def sum(elements):
seconds = elements.aggregate(Sum('duration'))['duration__sum']
seconds = seconds or 0 # in case it's None
minutes = seconds / 60
hours = minutes / 60
if hours > 1:
return "%dh" % hours
elif minutes > 1:
return "%dm" % minutes
return "%ds" % seconds
counts['today'] = sum(qs.filter(**make_filter(gte=today)))
counts['yesterday'] = sum(qs.filter(
**make_filter(gte=yesterday, lt=today)))
counts['this_week'] = sum(qs.filter(**make_filter(gte=this_week)))
counts['last_week'] = sum(qs.filter(
**make_filter(gte=last_week, lt=this_week)))
counts['this_month'] = sum(qs.filter(**make_filter(gte=this_month)))
counts['last_month'] = sum(qs.filter(
**make_filter(gte=last_month, lt=this_month)))
counts['this_year'] = sum(qs.filter(**make_filter(gte=this_year)))
counts['last_year'] = sum(qs.filter(
**make_filter(gte=last_year, lt=this_year)))
counts['ever'] = sum(qs)
return counts
# Exceptional
counts = get_duration_totals(Event.objects.exclude(duration__isnull=True))
context['groups'].append({
'name': 'Total Event Durations',
'counts': counts
})
return context
| bsd-3-clause | -7,759,845,208,928,086,000 | 29.514124 | 78 | 0.578967 | false |
sebastianwelsh/artifacts | tests/reader_test.py | 1 | 8292 | # -*- coding: utf-8 -*-
"""Tests for the artifact definitions readers."""
import io
import os
import unittest
from artifacts import definitions
from artifacts import errors
from artifacts import reader
class YamlArtifactsReaderTest(unittest.TestCase):
"""Class to test the YAML artifacts reader."""
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
artifact_reader = reader.YamlArtifactsReader()
test_file = os.path.join('test_data', 'definitions.yaml')
with open(test_file, 'rb') as file_object:
artifact_definitions = list(artifact_reader.ReadFileObject(file_object))
self.assertEqual(len(artifact_definitions), 7)
# Artifact with file source type.
artifact_definition = artifact_definitions[0]
self.assertEqual(artifact_definition.name, 'SecurityEventLogEvtx')
expected_description = (
'Windows Security Event log for Vista or later systems.')
self.assertEqual(artifact_definition.description, expected_description)
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_FILE)
expected_paths = sorted([
'%%environ_systemroot%%\\System32\\winevt\\Logs\\Security.evtx'])
self.assertEqual(sorted(source_type.paths), expected_paths)
self.assertEqual(len(artifact_definition.conditions), 1)
expected_condition = 'os_major_version >= 6'
self.assertEqual(artifact_definition.conditions[0], expected_condition)
self.assertEqual(len(artifact_definition.labels), 1)
self.assertEqual(artifact_definition.labels[0], 'Logs')
self.assertEqual(len(artifact_definition.supported_os), 1)
self.assertEqual(artifact_definition.supported_os[0], 'Windows')
self.assertEqual(len(artifact_definition.urls), 1)
expected_url = (
'http://www.forensicswiki.org/wiki/Windows_XML_Event_Log_(EVTX)')
self.assertEqual(artifact_definition.urls[0], expected_url)
# Artifact with Windows Registry key source type.
artifact_definition = artifact_definitions[1]
self.assertEqual(
artifact_definition.name, 'AllUsersProfileEnvironmentVariable')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY)
expected_keys = sorted([
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\ProfilesDirectory'),
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\AllUsersProfile')])
self.assertEqual(sorted(source_type.keys), expected_keys)
# Artifact with Windows Registry value source type.
artifact_definition = artifact_definitions[2]
self.assertEqual(artifact_definition.name, 'CurrentControlSet')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE)
self.assertEqual(len(source_type.key_value_pairs), 1)
key_value_pair = source_type.key_value_pairs[0]
expected_key = 'HKEY_LOCAL_MACHINE\\SYSTEM\\Select'
self.assertEqual(key_value_pair['key'], expected_key)
self.assertEqual(key_value_pair['value'], 'Current')
# Artifact with WMI query source type.
artifact_definition = artifact_definitions[3]
self.assertEqual(artifact_definition.name, 'WMIProfileUsersHomeDir')
expected_provides = sorted(['users.homedir'])
self.assertEqual(sorted(artifact_definition.provides), expected_provides)
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_WMI_QUERY)
expected_query = (
'SELECT * FROM Win32_UserProfile WHERE SID=\'%%users.sid%%\'')
self.assertEqual(source_type.query, expected_query)
# Artifact with artifact definition source type.
artifact_definition = artifact_definitions[4]
self.assertEqual(artifact_definition.name, 'EventLogs')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_ARTIFACT)
# Artifact with command definition source type.
artifact_definition = artifact_definitions[5]
self.assertEqual(artifact_definition.name, 'RedhatPackagesList')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_COMMAND)
# Artifact with COMMAND definition collector definition.
artifact_definition = artifact_definitions[5]
self.assertEqual(artifact_definition.name, 'RedhatPackagesList')
self.assertEqual(len(artifact_definition.sources), 1)
collector_definition = artifact_definition.sources[0]
self.assertNotEqual(collector_definition, None)
self.assertEqual(
collector_definition.type_indicator,
definitions.TYPE_INDICATOR_COMMAND)
def testBadKey(self):
"""Tests top level keys are correct."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadKey
doc: bad extra key.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
extra_key: 'wrong'
labels: [Logs]
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testMissingSources(self):
"""Tests sources is present."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadSources
doc: must have one sources.
labels: [Logs]
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testBadSupportedOS(self):
"""Tests supported_os is checked correctly."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadSupportedOS
doc: supported_os should be an array of strings.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
labels: [Logs]
supported_os: Windows
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testBadLabels(self):
"""Tests labels is checked correctly."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadLabel
doc: badlabel.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
labels: Logs
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testMissingDoc(self):
"""Tests doc is required."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: NoDoc
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testReadFile(self):
"""Tests the ReadFile function."""
artifact_reader = reader.YamlArtifactsReader()
test_file = os.path.join('test_data', 'definitions.yaml')
artifact_definitions = list(artifact_reader.ReadFile(test_file))
self.assertEqual(len(artifact_definitions), 7)
def testReadDirectory(self):
"""Tests the ReadDirectory function."""
artifact_reader = reader.YamlArtifactsReader()
artifact_definitions = list(artifact_reader.ReadDirectory('test_data'))
self.assertEqual(len(artifact_definitions), 7)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 2,995,640,185,578,991,000 | 33.406639 | 80 | 0.721298 | false |
uclouvain/osis | ddd/logic/application/test/use_case/write/test_renew_multiple_attributions_service.py | 1 | 9080 | # ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# ############################################################################
import copy
import datetime
from decimal import Decimal
import mock
import uuid
from django.test import TestCase
from attribution.models.enums.function import Functions
from base.ddd.utils.business_validator import MultipleBusinessExceptions
from base.models.enums.vacant_declaration_type import VacantDeclarationType
from ddd.logic.application.commands import RenewMultipleAttributionsCommand
from ddd.logic.application.domain.builder.applicant_identity_builder import ApplicantIdentityBuilder
from ddd.logic.application.domain.model.applicant import Applicant
from ddd.logic.application.domain.model.application import Application, ApplicationIdentity
from ddd.logic.application.domain.model.application_calendar import ApplicationCalendar, ApplicationCalendarIdentity
from ddd.logic.application.domain.model._attribution import Attribution
from ddd.logic.application.domain.model._allocation_entity import AllocationEntity
from ddd.logic.application.domain.model.vacant_course import VacantCourse, VacantCourseIdentity
from ddd.logic.application.domain.validator.exceptions import AttributionAboutToExpireNotFound, \
VolumesAskedShouldBeLowerOrEqualToVolumeAvailable, ApplicationAlreadyExistsException
from ddd.logic.learning_unit.domain.model.learning_unit import LearningUnitIdentity
from ddd.logic.shared_kernel.academic_year.builder.academic_year_identity_builder import AcademicYearIdentityBuilder
from infrastructure.application.repository.applicant_in_memory import ApplicantInMemoryRepository
from infrastructure.application.repository.application_calendar_in_memory import ApplicationCalendarInMemoryRepository
from infrastructure.application.repository.application_in_memory import ApplicationInMemoryRepository
from infrastructure.application.repository.vacant_course_in_memory import VacantCourseInMemoryRepository
from infrastructure.messages_bus import message_bus_instance
class TestRenewMultipleAttributionsService(TestCase):
@classmethod
def setUpTestData(cls):
today = datetime.date.today()
cls.application_calendar = ApplicationCalendar(
entity_id=ApplicationCalendarIdentity(uuid=uuid.uuid4()),
authorized_target_year=AcademicYearIdentityBuilder.build_from_year(year=2019),
start_date=today - datetime.timedelta(days=5),
end_date=today + datetime.timedelta(days=10),
)
cls.attribution_about_to_expire = Attribution(
course_id=LearningUnitIdentity(
code='LDROI1200',
academic_year=AcademicYearIdentityBuilder.build_from_year(year=2018)
),
course_title="Introduction au droit",
function=Functions.HOLDER,
end_year=AcademicYearIdentityBuilder.build_from_year(year=2018),
start_year=AcademicYearIdentityBuilder.build_from_year(year=2017),
lecturing_volume=Decimal(5),
practical_volume=None,
is_substitute=False
)
cls.global_id = '123456789'
cls.applicant = Applicant(
entity_id=ApplicantIdentityBuilder.build_from_global_id(global_id=cls.global_id),
first_name="Thomas",
last_name="Durant",
attributions=[cls.attribution_about_to_expire]
)
cls.vacant_course = VacantCourse(
entity_id=VacantCourseIdentity(
code='LDROI1200',
academic_year=cls.application_calendar.authorized_target_year
),
lecturing_volume_available=Decimal(10),
practical_volume_available=Decimal(50),
title='Introduction au droit',
vacant_declaration_type=VacantDeclarationType.RESEVED_FOR_INTERNS,
is_in_team=False,
allocation_entity=AllocationEntity(code='DRT')
)
def setUp(self) -> None:
self.applicant_repository = ApplicantInMemoryRepository([self.applicant])
self.application_calendar_repository = ApplicationCalendarInMemoryRepository([self.application_calendar])
self.vacant_course_repository = VacantCourseInMemoryRepository([self.vacant_course])
self.application_repository = ApplicationInMemoryRepository([])
message_bus_patcher = mock.patch.multiple(
'infrastructure.messages_bus',
ApplicationRepository=lambda: self.application_repository,
ApplicantRepository=lambda: self.applicant_repository,
VacantCourseRepository=lambda: self.vacant_course_repository,
ApplicationCalendarRepository=lambda: self.application_calendar_repository
)
message_bus_patcher.start()
self.addCleanup(message_bus_patcher.stop)
self.message_bus = message_bus_instance
def test_assert_renewal_is_correctly_processed(self):
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200'])
self.message_bus.invoke(cmd)
self.assertEqual(
len(self.application_repository.search(applicant_id=self.applicant.entity_id)),
1
)
def test_renewal_multiple_case_one_not_about_to_renewal_assert_applications_not_created_at_all(self):
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200', 'LDROI2500'])
with self.assertRaises(MultipleBusinessExceptions) as cm:
self.message_bus.invoke(cmd)
exceptions_raised = cm.exception.exceptions
self.assertTrue(
any([
exception for exception in exceptions_raised
if isinstance(exception, AttributionAboutToExpireNotFound)
])
)
self.assertEqual(
len(self.application_repository.search(applicant_id=self.applicant.entity_id)),
0
)
def test_renewal_multiple_case_vacant_course_with_less_availability_than_attribution_assert_raise_exception(self):
vacant_course_with_less_availability = copy.deepcopy(self.vacant_course)
vacant_course_with_less_availability.practical_volume_available = Decimal(1)
vacant_course_with_less_availability.lecturing_volume_available = Decimal(1)
self.vacant_course_repository = VacantCourseInMemoryRepository([vacant_course_with_less_availability])
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200'])
with self.assertRaises(MultipleBusinessExceptions) as cm:
self.message_bus.invoke(cmd)
exceptions_raised = cm.exception.exceptions
self.assertTrue(
any([
exception for exception in exceptions_raised
if isinstance(exception, VolumesAskedShouldBeLowerOrEqualToVolumeAvailable)
])
)
def test_renewal_multiple_case_already_applied_on_course_assert_raise_exception(self):
application = Application(
entity_id=ApplicationIdentity(uuid=uuid.uuid4()),
applicant_id=self.applicant.entity_id,
vacant_course_id=self.vacant_course.entity_id,
lecturing_volume=self.vacant_course.lecturing_volume_available,
practical_volume=self.vacant_course.practical_volume_available,
remark='',
course_summary='',
)
self.application_repository = ApplicationInMemoryRepository([application])
cmd = RenewMultipleAttributionsCommand(global_id=self.global_id, renew_codes=['LDROI1200'])
with self.assertRaises(MultipleBusinessExceptions) as cm:
self.message_bus.invoke(cmd)
exceptions_raised = cm.exception.exceptions
self.assertTrue(
any([
exception for exception in exceptions_raised
if isinstance(exception, ApplicationAlreadyExistsException)
])
)
| agpl-3.0 | -76,823,991,052,303,460 | 48.342391 | 118 | 0.708007 | false |
matthewgall/dnsjson.com | app.py | 1 | 5654 | #!/usr/bin/env python3
import os, logging, argparse, json, datetime
import requests
import dns.resolver
from bottle import route, request, response, redirect, hook, error, default_app, view, static_file, template
def set_content_type(fn):
def _return_type(*args, **kwargs):
if request.headers.get('Accept') == "application/json":
response.headers['Content-Type'] = 'application/json'
if request.headers.get('Accept') == "text/plain":
response.headers['Content-Type'] = 'text/plain'
if request.method != 'OPTIONS':
return fn(*args, **kwargs)
return _return_type
def enable_cors(fn):
def _enable_cors(*args, **kwargs):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if request.method != 'OPTIONS':
return fn(*args, **kwargs)
return _enable_cors
def resolveDomain(domain, recordType, args):
records = []
if args.doh:
try:
payload = {
'name': domain,
'type': recordType
}
data = requests.get("{}".format(args.resolver), params=payload)
for rec in data.json()['Answer']:
records.append(rec['data'])
except:
return records
return records
else:
try:
resolver = dns.resolver.Resolver()
resolver.nameservers = args.resolver.split(',')
if recordType in args.records.split(','):
lookup = resolver.resolve(domain, recordType)
for data in lookup:
if recordType in ['A', 'AAAA']:
records.append(data.address)
elif recordType in ['TXT']:
for rec in data.strings:
records.append(rec.decode("utf-8").replace('"', '').strip())
else:
records.append(str(data).replace('"', '').strip())
return records
except dns.resolver.NXDOMAIN:
return records
except dns.resolver.NoAnswer:
return records
except dns.exception.Timeout:
return records
except dns.resolver.NoNameservers:
return records
@error('404')
@error('403')
def returnError(code, msg, contentType="text/plain"):
response.status = int(code)
response.content_type = contentType
return template('error')
@route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='views/static')
@route('/servers')
def servers():
try:
response.content_type = 'text/plain'
return "\r\n".join(args.resolver.split(","))
except:
return "Unable to open servers file."
@route('/version')
def version():
try:
dirname, filename = os.path.split(os.path.abspath(__file__))
del filename
f = open(os.getenv('VERSION_PATH', dirname + '/.git/refs/heads/master'), 'r')
content = f.read()
response.content_type = 'text/plain'
return content
except:
return "Unable to open version file."
@route('/<record>')
def route_redirect(record):
return redirect("/{}/A".format(record))
@route('/<record>/<type>')
@route('/<record>/<type>.<ext>')
@set_content_type
@enable_cors
def loadRecord(record, type='A', ext='html'):
try:
if record == "":
raise ValueError
if not ext in ["html","txt", "text", "json"]:
raise ValueError
if not type.upper() in args.records.split(','):
raise ValueError
except ValueError:
return returnError(404, "Not Found", "text/html")
if ext in ["json"]:
response.content_type = 'application/json'
if ext in ["txt", "text"]:
response.content_type = 'text/plain'
# We make a request to get information
data = resolveDomain(record, type.upper(), args)
if response.content_type == 'application/json':
return json.dumps({
'results': {
'name': record,
'type': type.upper(),
'records': data,
}
})
elif response.content_type == "text/plain":
return "\r\n".join(data)
else:
return template('rec', {
'name': record,
'type': type.upper(),
'records': data,
'recTypes': args.records.split(',')
})
@route('/', ('GET', 'POST'))
def index():
if request.method == "POST":
recordName = request.forms.get('recordName', '')
recordType = request.forms.get('recordType', '')
if recordName != '' and recordType in args.records.split(','):
return redirect("/{}/{}".format(recordName, recordType))
else:
return returnError(404, "We were not able to figure out what you were asking for", "text/html")
return template("home", {
'recTypes': args.records.split(',')
})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Server settings
parser.add_argument("-i", "--host", default=os.getenv('HOST', '127.0.0.1'), help="server ip")
parser.add_argument("-p", "--port", default=os.getenv('PORT', 5000), help="server port")
# Redis settings
parser.add_argument("--redis", default=os.getenv('REDIS', 'redis://localhost:6379/0'), help="redis connection string")
# Application settings
parser.add_argument("--doh", help="use DNS-over-HTTPS and treat --resolver as DNS-over-HTTPS capable (beta)", action="store_true")
parser.add_argument("--records", default=os.getenv('RECORDS', "A,AAAA,CAA,CNAME,DS,DNSKEY,MX,NS,NSEC,NSEC3,RRSIG,SOA,TXT"), help="supported records")
parser.add_argument("--resolver", default=os.getenv('RESOLVER', '8.8.8.8'), help="resolver address")
# Verbose mode
parser.add_argument("--verbose", "-v", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
app = default_app()
app.run(host=args.host, port=args.port, server='tornado')
except:
log.error("Unable to start server on {}:{}".format(args.host, args.port)) | mit | -8,385,073,387,106,826,000 | 28.763158 | 150 | 0.67156 | false |
anirudhSK/chromium | tools/perf/benchmarks/page_cycler.py | 1 | 3123 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import page_cycler
from telemetry import test
class PageCyclerBloat(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/bloat.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerDhtml(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/dhtml.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlArFaHe(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_ar_fa_he.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlEsFrPtBr(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_es_fr_pt-BR.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlHiRu(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_hi_ru.json'
options = {'pageset_repeat_iters': 10}
@test.Disabled('win') # crbug.com/330909
class PageCyclerIntlJaZh(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_ja_zh.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerIntlKoThVi(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/intl_ko_th_vi.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerMorejs(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/morejs.json'
options = {'pageset_repeat_iters': 20,
'cold_load_percent': 50}
class PageCyclerMoz(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/page_cycler/moz.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerNetsimTop10(test.Test):
"""Measures load time of the top 10 sites under simulated cable network."""
tag = 'netsim'
test = page_cycler.PageCycler
page_set = 'page_sets/top_10.json'
options = {
'cold_load_percent': 100,
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat_iters': 5,
}
def __init__(self):
super(PageCyclerNetsimTop10, self).__init__()
# TODO: This isn't quite right.
# This option will still apply to page cyclers that run after this one.
self.test.clear_cache_before_each_run = True
class PageCyclerTop10Mobile(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/top_10_mobile.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerKeyMobileSites(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/key_mobile_sites.json'
options = {'pageset_repeat_iters': 10}
class PageCyclerToughLayoutCases(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/tough_layout_cases.json'
options = {'pageset_repeat_iters': 10}
# crbug.com/273986: This test is really flakey on xp.
# cabug.com/341843: This test is always timing out on Android.
@test.Disabled('android', 'win')
class PageCyclerTypical25(test.Test):
test = page_cycler.PageCycler
page_set = 'page_sets/typical_25.json'
options = {'pageset_repeat_iters': 10}
| bsd-3-clause | -8,354,810,265,709,558,000 | 27.390909 | 77 | 0.706372 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/offline_user_data_job_service/transports/base.py | 1 | 5564 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import offline_user_data_job
from google.ads.googleads.v7.services.types import offline_user_data_job_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class OfflineUserDataJobServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for OfflineUserDataJobService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.create_offline_user_data_job: gapic_v1.method.wrap_method(
self.create_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
self.get_offline_user_data_job: gapic_v1.method.wrap_method(
self.get_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
self.add_offline_user_data_job_operations: gapic_v1.method.wrap_method(
self.add_offline_user_data_job_operations,
default_timeout=None,
client_info=client_info,
),
self.run_offline_user_data_job: gapic_v1.method.wrap_method(
self.run_offline_user_data_job,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError
@property
def create_offline_user_data_job(self) -> typing.Callable[
[offline_user_data_job_service.CreateOfflineUserDataJobRequest],
offline_user_data_job_service.CreateOfflineUserDataJobResponse]:
raise NotImplementedError
@property
def get_offline_user_data_job(self) -> typing.Callable[
[offline_user_data_job_service.GetOfflineUserDataJobRequest],
offline_user_data_job.OfflineUserDataJob]:
raise NotImplementedError
@property
def add_offline_user_data_job_operations(self) -> typing.Callable[
[offline_user_data_job_service.AddOfflineUserDataJobOperationsRequest],
offline_user_data_job_service.AddOfflineUserDataJobOperationsResponse]:
raise NotImplementedError
@property
def run_offline_user_data_job(self) -> typing.Callable[
[offline_user_data_job_service.RunOfflineUserDataJobRequest],
operations_pb2.Operation]:
raise NotImplementedError
__all__ = (
'OfflineUserDataJobServiceTransport',
)
| apache-2.0 | 6,447,268,863,987,547,000 | 38.183099 | 83 | 0.651689 | false |
harshadyeola/easyengine | ee/cli/plugins/stack_upgrade.py | 1 | 12752 | from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.logging import Log
from ee.core.variables import EEVariables
from ee.core.aptget import EEAptGet
from ee.core.apt_repo import EERepo
from ee.core.services import EEService
from ee.core.fileutils import EEFileUtils
from ee.core.shellexec import EEShellExec
from ee.core.git import EEGit
from ee.core.download import EEDownload
import configparser
import os
class EEStackUpgradeController(CementBaseController):
class Meta:
label = 'upgrade'
stacked_on = 'stack'
stacked_type = 'nested'
description = ('Upgrade stack safely')
arguments = [
(['--all'],
dict(help='Upgrade all stack', action='store_true')),
(['--web'],
dict(help='Upgrade web stack', action='store_true')),
(['--admin'],
dict(help='Upgrade admin tools stack', action='store_true')),
(['--mail'],
dict(help='Upgrade mail server stack', action='store_true')),
(['--mailscanner'],
dict(help='Upgrade mail scanner stack', action='store_true')),
(['--nginx'],
dict(help='Upgrade Nginx stack', action='store_true')),
(['--nginxmainline'],
dict(help='Upgrade Nginx Mainline stack', action='store_true')),
(['--php'],
dict(help='Upgrade PHP stack', action='store_true')),
(['--mysql'],
dict(help='Upgrade MySQL stack', action='store_true')),
(['--hhvm'],
dict(help='Upgrade HHVM stack', action='store_true')),
(['--postfix'],
dict(help='Upgrade Postfix stack', action='store_true')),
(['--wpcli'],
dict(help='Upgrade WPCLI', action='store_true')),
(['--redis'],
dict(help='Upgrade Redis', action='store_true')),
(['--php56'],
dict(help="Upgrade to PHP5.6 from PHP5.5",
action='store_true')),
(['--no-prompt'],
dict(help="Upgrade Packages without any prompt",
action='store_true')),
]
@expose(hide=True)
def upgrade_php56(self):
if EEVariables.ee_platform_distro == "ubuntu":
if os.path.isfile("/etc/apt/sources.list.d/ondrej-php5-5_6-{0}."
"list".format(EEVariables.ee_platform_codename)):
Log.error(self, "Unable to find PHP 5.5")
else:
if not(os.path.isfile(EEVariables.ee_repo_file_path) and
EEFileUtils.grep(self, EEVariables.ee_repo_file_path,
"php55")):
Log.error(self, "Unable to find PHP 5.5")
Log.info(self, "During PHP update process non nginx-cached"
" parts of your site may remain down.")
# Check prompt
if (not self.app.pargs.no_prompt):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting PHP package update")
if EEVariables.ee_platform_distro == "ubuntu":
EERepo.remove(self, ppa="ppa:ondrej/php5")
EERepo.add(self, ppa=EEVariables.ee_php_repo)
else:
EEAptGet.remove(self, ["php5-xdebug"])
EEFileUtils.searchreplace(self, EEVariables.ee_repo_file_path,
"php55", "php56")
Log.info(self, "Updating apt-cache, please wait...")
EEAptGet.update(self)
Log.info(self, "Installing packages, please wait ...")
if EEVariables.ee_platform_codename == 'trusty':
EEAptGet.install(self, EEVariables.ee_php5_6 + EEVariables.ee_php_extra)
else:
EEAptGet.install(self, EEVariables.ee_php)
if EEVariables.ee_platform_distro == "debian":
EEShellExec.cmd_exec(self, "pecl install xdebug")
with open("/etc/php5/mods-available/xdebug.ini",
encoding='utf-8', mode='a') as myfile:
myfile.write(";zend_extension=/usr/lib/php5/20131226/"
"xdebug.so\n")
EEFileUtils.create_symlink(self, ["/etc/php5/mods-available/"
"xdebug.ini", "/etc/php5/fpm/conf.d"
"/20-xedbug.ini"])
Log.info(self, "Successfully upgraded from PHP 5.5 to PHP 5.6")
@expose(hide=True)
def default(self):
# All package update
if ((not self.app.pargs.php56)):
apt_packages = []
packages = []
if ((not self.app.pargs.web) and (not self.app.pargs.nginx) and
(not self.app.pargs.php) and (not self.app.pargs.mysql) and
(not self.app.pargs.postfix) and (not self.app.pargs.hhvm) and
(not self.app.pargs.mailscanner) and (not self.app.pargs.all)
and (not self.app.pargs.wpcli) and (not self.app.pargs.redis) and (not self.app.pargs.nginxmainline)):
self.app.pargs.web = True
if self.app.pargs.all:
self.app.pargs.web = True
self.app.pargs.mail = True
if self.app.pargs.web:
if EEAptGet.is_installed(self, 'nginx-custom'):
self.app.pargs.nginx = True
elif EEAptGet.is_installed(self, 'nginx-mainline'):
self.app.pargs.nginxmainline = True
else:
Log.info(self, "Nginx is not already installed")
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
self.app.pargs.wpcli = True
if self.app.pargs.mail:
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.wpcli = True
self.app.pargs.postfix = True
if EEAptGet.is_installed(self, 'dovecot-core'):
apt_packages = apt_packages + EEVariables.ee_mail
self.app.pargs.mailscanner = True
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.nginx :
if EEAptGet.is_installed(self, 'nginx-custom'):
apt_packages = apt_packages + EEVariables.ee_nginx
else:
Log.info(self, "Nginx Stable is not already installed")
if self.app.pargs.nginxmainline:
if EEAptGet.is_installed(self, 'nginx-mainline'):
apt_packages = apt_packages + EEVariables.ee_nginx_dev
else:
Log.info(self, "Nginx Mainline is not already installed")
if self.app.pargs.php:
if EEVariables.ee_platform_codename != 'trusty':
if EEAptGet.is_installed(self, 'php5-fpm'):
apt_packages = apt_packages + EEVariables.ee_php
else:
Log.info(self, "PHP is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
apt_packages = apt_packages + EEVariables.ee_php5_6 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 5.6 is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
apt_packages = apt_packages + EEVariables.ee_php7_0 + EEVariables.ee_php_extra
else:
Log.info(self, "PHP 7.0 is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
apt_packages = apt_packages + EEVariables.ee_hhvm
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.mysql:
if EEAptGet.is_installed(self, 'mariadb-server'):
apt_packages = apt_packages + EEVariables.ee_mysql
else:
Log.info(self, "MariaDB is not installed")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
apt_packages = apt_packages + EEVariables.ee_postfix
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
apt_packages = apt_packages + EEVariables.ee_redis
else:
Log.info(self, "Redis is not installed")
if self.app.pargs.wpcli:
if os.path.isfile('/usr/bin/wp'):
packages = packages + [["https://github.com/wp-cli/wp-cli/"
"releases/download/v{0}/"
"wp-cli-{0}.phar"
"".format(EEVariables.ee_wp_cli),
"/usr/bin/wp",
"WP-CLI"]]
else:
Log.info(self, "WPCLI is not installed with EasyEngine")
if self.app.pargs.mailscanner:
if EEAptGet.is_installed(self, 'amavisd-new'):
apt_packages = (apt_packages + EEVariables.ee_mailscanner)
else:
Log.info(self, "MailScanner is not installed")
if len(packages) or len(apt_packages):
Log.info(self, "During package update process non nginx-cached"
" parts of your site may remain down")
# Check prompt
if (not self.app.pargs.no_prompt):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting package update")
Log.info(self, "Updating packages, please wait...")
if len(apt_packages):
# apt-get update
EEAptGet.update(self)
# Update packages
EEAptGet.install(self, apt_packages)
# Post Actions after package updates
if (set(EEVariables.ee_nginx).issubset(set(apt_packages)) or
set(EEVariables.ee_nginx_dev).issubset(set(apt_packages))):
EEService.restart_service(self, 'nginx')
if EEVariables.ee_platform_codename != 'trusty':
if set(EEVariables.ee_php).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5-fpm')
else:
if set(EEVariables.ee_php5_6).issubset(set(apt_packages)):
EEService.restart_service(self, 'php5.6-fpm')
if set(EEVariables.ee_php7_0).issubset(set(apt_packages)):
EEService.restart_service(self, 'php7.0-fpm')
if set(EEVariables.ee_hhvm).issubset(set(apt_packages)):
EEService.restart_service(self, 'hhvm')
if set(EEVariables.ee_postfix).issubset(set(apt_packages)):
EEService.restart_service(self, 'postfix')
if set(EEVariables.ee_mysql).issubset(set(apt_packages)):
EEService.restart_service(self, 'mysql')
if set(EEVariables.ee_mail).issubset(set(apt_packages)):
EEService.restart_service(self, 'dovecot')
if set(EEVariables.ee_redis).issubset(set(apt_packages)):
EEService.restart_service(self, 'redis-server')
if len(packages):
if self.app.pargs.wpcli:
EEFileUtils.remove(self,['/usr/bin/wp'])
Log.debug(self, "Downloading following: {0}".format(packages))
EEDownload.download(self, packages)
if self.app.pargs.wpcli:
EEFileUtils.chmod(self, "/usr/bin/wp", 0o775)
Log.info(self, "Successfully updated packages")
# PHP 5.6 to 5.6
elif (self.app.pargs.php56):
self.upgrade_php56()
else:
self.app.args.print_help()
| mit | -3,016,983,847,023,325,700 | 44.870504 | 117 | 0.513018 | false |
noba3/KoTos | addons/plugin.video.filmibynaturex-2.5.7/mymoves/movie/Metadata.py | 1 | 2017 |
from common import XBMCInterfaceUtils, Logger
from metahandler import metahandlers # @UnresolvedImport
import sys
def retieveMovieInfoAndAddItem(request_obj, response_obj):
items = response_obj.get_item_list()
XBMCInterfaceUtils.callBackDialogProgressBar(getattr(sys.modules[__name__], '__addMovieInfo_in_item'), items, 'Retrieving MOVIE info', 'Failed to retrieve movie information, please try again later')
__metaget__ = None
def __addMovieInfo_in_item(item):
if item.get_next_action_name() == 'Movie_Streams':
year = unicode(item.get_moving_data()['movieYear'], errors='ignore').encode('utf-8')
title = unicode(item.get_moving_data()['movieTitle'], errors='ignore').encode('utf-8')
meta = None
try:
global __metaget__
if __metaget__ is None:
__metaget__ = metahandlers.MetaData()
meta = __metaget__.get_meta('movie', title, year=year)
except:
Logger.logDebug('Failed to load metahandler module')
xbmc_item = item.get_xbmc_list_item_obj()
if(meta is not None):
xbmc_item.setIconImage(meta['thumb_url'])
xbmc_item.setThumbnailImage(meta['cover_url'])
videoInfo = {'trailer_url':meta['trailer_url']}
for key, value in meta.items():
if type(value) is str:
value = unicode(value).encode('utf-8')
videoInfo[key] = value
xbmc_item.setInfo('video', videoInfo)
xbmc_item.setProperty('fanart_image', meta['backdrop_url'])
item.add_request_data('videoInfo', videoInfo)
contextMenuItems = []
contextMenuItems.append(('Movie Information', 'XBMC.Action(Info)'))
xbmc_item.addContextMenuItems(contextMenuItems, replaceItems=False)
else:
xbmc_item.setInfo('video', {'title':title, 'year':year})
item.add_request_data('videoInfo', {'title':title, 'year':year})
| gpl-2.0 | -5,643,942,562,608,663,000 | 42.847826 | 202 | 0.608825 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/soyuz/model/archivesubscriber.py | 1 | 9106 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Database class for table ArchiveSubscriber."""
__metaclass__ = type
__all__ = [
'ArchiveSubscriber',
]
from operator import itemgetter
import pytz
from storm.expr import (
And,
Desc,
Join,
LeftJoin,
)
from storm.locals import (
DateTime,
Int,
Reference,
Store,
Storm,
Unicode,
)
from storm.store import EmptyResultSet
from zope.component import getUtility
from zope.interface import implements
from lp.registry.interfaces.person import validate_person
from lp.registry.model.person import Person
from lp.registry.model.teammembership import TeamParticipation
from lp.services.database.constants import UTC_NOW
from lp.services.database.decoratedresultset import DecoratedResultSet
from lp.services.database.enumcol import DBEnum
from lp.services.identity.interfaces.emailaddress import EmailAddressStatus
from lp.services.identity.model.emailaddress import EmailAddress
from lp.soyuz.enums import ArchiveSubscriberStatus
from lp.soyuz.interfaces.archiveauthtoken import IArchiveAuthTokenSet
from lp.soyuz.interfaces.archivesubscriber import IArchiveSubscriber
from lp.soyuz.model.archiveauthtoken import ArchiveAuthToken
class ArchiveSubscriber(Storm):
"""See `IArchiveSubscriber`."""
implements(IArchiveSubscriber)
__storm_table__ = 'ArchiveSubscriber'
id = Int(primary=True)
archive_id = Int(name='archive', allow_none=False)
archive = Reference(archive_id, 'Archive.id')
registrant_id = Int(name='registrant', allow_none=False)
registrant = Reference(registrant_id, 'Person.id')
date_created = DateTime(
name='date_created', allow_none=False, tzinfo=pytz.UTC)
subscriber_id = Int(
name='subscriber', allow_none=False,
validator=validate_person)
subscriber = Reference(subscriber_id, 'Person.id')
date_expires = DateTime(
name='date_expires', allow_none=True, tzinfo=pytz.UTC)
status = DBEnum(
name='status', allow_none=False,
enum=ArchiveSubscriberStatus)
description = Unicode(name='description', allow_none=True)
date_cancelled = DateTime(
name='date_cancelled', allow_none=True, tzinfo=pytz.UTC)
cancelled_by_id = Int(name='cancelled_by', allow_none=True)
cancelled_by = Reference(cancelled_by_id, 'Person.id')
@property
def displayname(self):
"""See `IArchiveSubscriber`."""
return "%s's access to %s" % (
self.subscriber.displayname, self.archive.displayname)
def cancel(self, cancelled_by):
"""See `IArchiveSubscriber`."""
self.date_cancelled = UTC_NOW
self.cancelled_by = cancelled_by
self.status = ArchiveSubscriberStatus.CANCELLED
def getNonActiveSubscribers(self):
"""See `IArchiveSubscriber`."""
store = Store.of(self)
if self.subscriber.is_team:
# We get all the people who already have active tokens for
# this archive (for example, through separate subscriptions).
auth_token = LeftJoin(
ArchiveAuthToken,
And(ArchiveAuthToken.person_id == Person.id,
ArchiveAuthToken.archive_id == self.archive_id,
ArchiveAuthToken.date_deactivated == None))
team_participation = Join(
TeamParticipation,
TeamParticipation.personID == Person.id)
# Only return people with preferred email address set.
preferred_email = Join(
EmailAddress, EmailAddress.personID == Person.id)
# We want to get all participants who are themselves
# individuals, not teams:
non_active_subscribers = store.using(
Person, team_participation, preferred_email, auth_token).find(
(Person, EmailAddress),
EmailAddress.status == EmailAddressStatus.PREFERRED,
TeamParticipation.teamID == self.subscriber_id,
Person.teamowner == None,
# There is no existing archive auth token.
ArchiveAuthToken.person_id == None)
non_active_subscribers.order_by(Person.name)
return non_active_subscribers
else:
# Subscriber is not a team.
token_set = getUtility(IArchiveAuthTokenSet)
if token_set.getActiveTokenForArchiveAndPerson(
self.archive, self.subscriber) is not None:
# There are active tokens, so return an empty result
# set.
return EmptyResultSet()
# Otherwise return a result set containing only the
# subscriber and their preferred email address.
return store.find(
(Person, EmailAddress),
Person.id == self.subscriber_id,
EmailAddress.personID == Person.id,
EmailAddress.status == EmailAddressStatus.PREFERRED)
class ArchiveSubscriberSet:
"""See `IArchiveSubscriberSet`."""
def _getBySubscriber(self, subscriber, archive, current_only,
with_active_tokens):
"""Return all the subscriptions for a person.
:param subscriber: An `IPerson` for whom to return all
`ArchiveSubscriber` records.
:param archive: An optional `IArchive` which restricts
the results to that particular archive.
:param current_only: Whether the result should only include current
subscriptions (which is the default).
:param with_active_tokens: Indicates whether the tokens for the given
subscribers subscriptions should be included in the resultset.
By default the tokens are not included in the resultset.
^ """
# Grab the extra Storm expressions, for this query,
# depending on the params:
extra_exprs = self._getExprsForSubscriptionQueries(
archive, current_only)
origin = [
ArchiveSubscriber,
Join(
TeamParticipation,
TeamParticipation.teamID == ArchiveSubscriber.subscriber_id)]
if with_active_tokens:
result_row = (ArchiveSubscriber, ArchiveAuthToken)
# We need a left join with ArchiveSubscriber as
# the origin:
origin.append(
LeftJoin(
ArchiveAuthToken,
And(
ArchiveAuthToken.archive_id ==
ArchiveSubscriber.archive_id,
ArchiveAuthToken.person_id == subscriber.id,
ArchiveAuthToken.date_deactivated == None)))
else:
result_row = ArchiveSubscriber
# Set the main expression to find all the subscriptions for
# which the subscriber is a direct subscriber OR is a member
# of a subscribed team.
# Note: the subscription to the owner itself will also be
# part of the join as there is a TeamParticipation entry
# showing that each person is a member of the "team" that
# consists of themselves.
store = Store.of(subscriber)
return store.using(*origin).find(
result_row,
TeamParticipation.personID == subscriber.id,
*extra_exprs).order_by(Desc(ArchiveSubscriber.date_created))
def getBySubscriber(self, subscriber, archive=None, current_only=True):
"""See `IArchiveSubscriberSet`."""
return self._getBySubscriber(subscriber, archive, current_only, False)
def getBySubscriberWithActiveToken(self, subscriber, archive=None):
"""See `IArchiveSubscriberSet`."""
return self._getBySubscriber(subscriber, archive, True, True)
def getByArchive(self, archive, current_only=True):
"""See `IArchiveSubscriberSet`."""
extra_exprs = self._getExprsForSubscriptionQueries(
archive, current_only)
store = Store.of(archive)
result = store.using(ArchiveSubscriber,
Join(Person, ArchiveSubscriber.subscriber_id == Person.id)).find(
(ArchiveSubscriber, Person),
*extra_exprs).order_by(Person.name)
return DecoratedResultSet(result, itemgetter(0))
def _getExprsForSubscriptionQueries(self, archive=None,
current_only=True):
"""Return the Storm expressions required for the parameters.
Just to keep the code DRY.
"""
extra_exprs = []
# Restrict the results to the specified archive if requested:
if archive:
extra_exprs.append(ArchiveSubscriber.archive == archive)
# Restrict the results to only those subscriptions that are current
# if requested:
if current_only:
extra_exprs.append(
ArchiveSubscriber.status == ArchiveSubscriberStatus.CURRENT)
return extra_exprs
| agpl-3.0 | 6,795,547,473,769,740,000 | 37.100418 | 78 | 0.639798 | false |
pnasrat/puppet-codereview | rietveld.py | 1 | 6634 | #!/usr/bin/python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to simplify some common AppEngine actions.
Use 'rietveld help' for a list of commands.
"""
import logging
import os
import re
import shutil
import subprocess
import sys
import zipfile
APPCFG = 'appcfg.py'
DEV_APPSERVER = 'dev_appserver.py'
RELEASE = 'release'
ZIPFILE = 'django.zip'
FILES = ["app.yaml", "index.yaml",
"__init__.py", "main.py", "settings.py", "urls.py"]
DIRS = ["static", "templates", "codereview"]
IGNORED_DIR = (".svn", "gis", "admin", "localflavor", "mysql", "mysql_old",
"oracle", "postgresql", "postgresql_psycopg2", "sqlite3",
"test")
IGNORED_EXT = (".pyc", ".pyo", ".po", ".mo")
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=use_shell)
output = ""
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output += line
p.wait()
p.stdout.close()
return output
def Help():
print "Available commands:"
print "help"
print "release"
print "serve"
print "serve_email"
print "serve_remote"
print "serve_remote_email"
print "update"
print "update_indexes"
print "upload"
print "vacuum_indexes"
def CreateRelease():
""" Creates a "release" subdirectory.
This is a subdirectory containing a bunch of symlinks, from which the app can
be updated. The main reason for this is to import Django from a zipfile,
which saves dramatically in upload time: statting and computing the SHA1 for
1000s of files is slow. Even if most of those files don't actually need to
be uploaded, they still add to the work done for each update.
"""
def GetDjangoFiles():
"""Return a list of Django files to send to the server.
We prune:
- .svn subdirectories for obvious reasons.
- the other directories are huge and unneeded.
- *.po and *.mo files because they are bulky and unneeded.
- *.pyc and *.pyo because they aren't used by App Engine anyway.
"""
result = []
for root, dirs, files in os.walk("django"):
dirs[:] = [d for d in dirs if d not in IGNORED_DIR]
for file in files:
unused, extension = os.path.splitext(file)
if extension in IGNORED_EXT:
continue
result.append(os.path.join(root, file))
return result
def CopyRietveldDirectory(src, dst):
"""Copies a directory used by Rietveld.
Skips ".svn" directories and ".pyc" files.
"""
for root, dirs, files in os.walk(src):
if not os.path.exists(os.path.join(dst, root)):
os.mkdir(os.path.join(dst, root))
for file in files:
unused, extension = os.path.splitext(file)
if extension in (".pyc", ".pyo"):
continue
shutil.copyfile(os.path.join(root, file), os.path.join(dst, root, file))
dirs[:] = [d for d in dirs if d not in (".svn")]
for dir in dirs:
os.mkdir(os.path.join(dst, root, dir))
# Remove old ZIPFILE file.
if os.path.exists(ZIPFILE):
os.remove(ZIPFILE)
django_files = GetDjangoFiles()
django_zip = zipfile.ZipFile(ZIPFILE, "w")
for file in django_files:
django_zip.write(file, compress_type=zipfile.ZIP_DEFLATED)
django_zip.close()
# Remove old RELEASE directory.
if sys.platform.startswith("win"):
RunShell(["rmdir", "/s", "/q", RELEASE])
else:
RunShell(["rm", "-rf", RELEASE])
# Create new RELEASE directory.
os.mkdir(RELEASE)
if sys.platform.startswith("win"):
# No symbolic links on Windows, just copy.
for x in FILES + [ZIPFILE]:
shutil.copyfile(x, os.path.join(RELEASE, x))
for x in DIRS:
CopyRietveldDirectory(x, RELEASE)
else:
# Create symbolic links.
for x in FILES + DIRS + [ZIPFILE]:
RunShell(["ln", "-s", "../" + x, os.path.join(RELEASE, x)])
def GetApplicationName():
file = open("app.yaml", "r")
result = file.read()
file.close()
APP_REGEXP = ".*?application: ([\w\-]+)"
return re.compile(APP_REGEXP, re.DOTALL).match(result).group(1)
def Update(args):
print "Updating " + GetApplicationName()
output = RunShell(["svn", "info"])
revision = re.compile(".*?\nRevision: (\d+)",
re.DOTALL).match(output).group(1)
revision_file = os.path.join("templates", "live_revision.html")
file = open(revision_file, "w")
file.write('This is <a class="novisit" '
'href="http://code.google.com/p/rietveld/">Rietveld</a> r' +
revision)
file.close()
CreateRelease()
appcfg_args = [APPCFG, "update", RELEASE] + args
# Use os.system here because input might be required, and that doesn't work
# through subprocess.Popen.
os.system(" ".join(appcfg_args))
RunShell(["svn", "revert", revision_file])
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
Help()
return 0
command = argv[1]
if command == "help":
Help()
elif command == "serve":
RunShell([DEV_APPSERVER, "."], True)
elif command == "serve_remote":
RunShell([DEV_APPSERVER, "--address", "0.0.0.0", "."], True)
elif command == "serve_email":
RunShell([DEV_APPSERVER, "--enable_sendmail", "."], True)
elif command == "serve_remote_email":
RunShell([DEV_APPSERVER, "--enable_sendmail", "--address", "0.0.0.0", "."],
True)
elif command == "release":
CreateRelease()
elif command in ("update", "upload"):
Update(argv[2:])
elif command == "update_indexes":
RunShell([APPCFG, "update_indexes", "."], True)
elif command == "vacuum_indexes":
RunShell([APPCFG, "vacuum_indexes", "."], True)
else:
print "Unknown command: " + command
return 2
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -6,787,070,875,122,825,000 | 28.484444 | 80 | 0.641091 | false |
F8LEFT/ART | KDE/share/ECM/find-modules/rules_engine.py | 1 | 21090 | #!/usr/bin/env python
#=============================================================================
# Copyright 2016 by Shaheed Haque ([email protected])
# Copyright 2016 Stephen Kelly <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#=============================================================================
"""SIP file generation rules engine."""
from __future__ import print_function
from abc import *
import argparse
import gettext
import inspect
import logging
import os
import re
import sys
import textwrap
import traceback
from copy import deepcopy
from clang.cindex import CursorKind
from clang.cindex import AccessSpecifier
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
logger = logging.getLogger(__name__)
gettext.install(__name__)
_SEPARATOR = "\x00"
def _parents(container):
parents = []
parent = container.semantic_parent
while parent and parent.kind != CursorKind.TRANSLATION_UNIT:
parents.append(parent.spelling)
parent = parent.semantic_parent
if parents:
parents = "::".join(reversed(parents))
else:
parents = os.path.basename(container.translation_unit.spelling)
return parents
class Rule(object):
def __init__(self, db, rule_number, fn, pattern_zip):
self.db = db
self.rule_number = rule_number
self.fn = fn
self.usage = 0
try:
groups = ["(?P<{}>{})".format(name, pattern) for pattern, name in pattern_zip]
groups = _SEPARATOR.join(groups)
self.matcher = re.compile(groups)
except Exception as e:
groups = ["{} '{}'".format(name, pattern) for pattern, name in pattern_zip]
groups = ", ".join(groups)
raise RuntimeError(_("Bad {}: {}: {}").format(self, groups, e))
def match(self, candidate):
return self.matcher.match(candidate)
def trace_result(self, parents, item, original, modified):
fqn = parents + "::" + original["name"] + "[" + str(item.extent.start.line) + "]"
self._trace_result(fqn, original, modified)
def _trace_result(self, fqn, original, modified):
if not modified["name"]:
logger.debug(_("Rule {} suppressed {}, {}").format(self, fqn, original))
else:
delta = False
for k, v in original.iteritems():
if v != modified[k]:
delta = True
break
if delta:
logger.debug(_("Rule {} modified {}, {}->{}").format(self, fqn, original, modified))
else:
logger.warn(_("Rule {} did not modify {}, {}").format(self, fqn, original))
def __str__(self):
return "[{},{}]".format(self.rule_number, self.fn.__name__)
class AbstractCompiledRuleDb(object):
__metaclass__ = ABCMeta
def __init__(self, db, parameter_names):
self.db = db
self.compiled_rules = []
for i, raw_rule in enumerate(db()):
if len(raw_rule) != len(parameter_names) + 1:
raise RuntimeError(_("Bad raw rule {}: {}: {}").format(db.__name__, raw_rule, parameter_names))
z = zip(raw_rule[:-1], parameter_names)
self.compiled_rules.append(Rule(db, i, raw_rule[-1], z))
self.candidate_formatter = _SEPARATOR.join(["{}"] * len(parameter_names))
def _match(self, *args):
candidate = self.candidate_formatter.format(*args)
for rule in self.compiled_rules:
matcher = rule.match(candidate)
if matcher:
#
# Only use the first matching rule.
#
rule.usage += 1
return matcher, rule
return None, None
@abstractmethod
def apply(self, *args):
raise NotImplemented(_("Missing subclass"))
def dump_usage(self, fn):
""" Dump the usage counts."""
for rule in self.compiled_rules:
fn(self.__class__.__name__, str(rule), rule.usage)
class ContainerRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR CONTAINERS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any container (class, namespace, struct, union) to be
customised, for example to add SIP compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the container.
1. A regular expression which matches the container name.
2. A regular expression which matches any template parameters.
3. A regular expression which matches the container declaration.
4. A regular expression which matches any base specifiers.
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def container_xxx(container, sip, matcher):
'''
Return a modified declaration for the given container.
:param container: The clang.cindex.Cursor for the container.
:param sip: A dict with the following keys:
name The name of the container.
template_parameters Any template parameters.
decl The declaration.
base_specifiers Any base specifiers.
body The body, less the outer
pair of braces.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT body and annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(ContainerRuleDb, self).__init__(db, ["parents", "container", "template_parameters", "decl", "base_specifiers"])
def apply(self, container, sip):
"""
Walk over the rules database for functions, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param sip: The SIP dict.
"""
parents = _parents(container)
matcher, rule = self._match(parents, sip["name"], sip["template_parameters"], sip["decl"], sip["base_specifiers"])
if matcher:
before = deepcopy(sip)
rule.fn(container, sip, matcher)
rule.trace_result(parents, container, before, sip)
class FunctionRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR FUNCTIONS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any function to be customised, for example to add SIP
compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the function.
1. A regular expression which matches the function name.
2. A regular expression which matches any template parameters.
3. A regular expression which matches the function result.
4. A regular expression which matches the function parameters (e.g.
"int a, void *b" for "int foo(int a, void *b)").
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def function_xxx(container, function, sip, matcher):
'''
Return a modified declaration for the given function.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param sip: A dict with the following keys:
name The name of the function.
template_parameters Any template parameters.
fn_result Result, if not a constructor.
decl The declaration.
prefix Leading keyworks ("static"). Separated by space,
ends with a space.
suffix Trailing keywords ("const"). Separated by space, starts with
space.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(FunctionRuleDb, self).__init__(db, ["container", "function", "template_parameters", "fn_result", "parameters"])
def apply(self, container, function, sip):
"""
Walk over the rules database for functions, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param sip: The SIP dict.
"""
parents = _parents(function)
matcher, rule = self._match(parents, sip["name"], ", ".join(sip["template_parameters"]), sip["fn_result"], ", ".join(sip["parameters"]))
if matcher:
before = deepcopy(sip)
rule.fn(container, function, sip, matcher)
rule.trace_result(parents, function, before, sip)
class ParameterRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR FUNCTION PARAMETERS.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any parameter in any function to be customised, for
example to add SIP compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the function enclosing the parameter.
1. A regular expression which matches the function name enclosing the
parameter.
2. A regular expression which matches the parameter name.
3. A regular expression which matches the parameter declaration (e.g.
"int foo").
4. A regular expression which matches the parameter initialiser (e.g.
"Xyz:MYCONST + 42").
5. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def parameter_xxx(container, function, parameter, sip, init, matcher):
'''
Return a modified declaration and initialiser for the given parameter.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param parameter: The clang.cindex.Cursor for the parameter.
:param sip: A dict with the following keys:
name The name of the function.
decl The declaration.
init Any initialiser.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(ParameterRuleDb, self).__init__(db, ["container", "function", "parameter", "decl", "init"])
def apply(self, container, function, parameter, sip):
"""
Walk over the rules database for parameters, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param function: The clang.cindex.Cursor for the function.
:param parameter: The clang.cindex.Cursor for the parameter.
:param sip: The SIP dict.
"""
parents = _parents(function)
matcher, rule = self._match(parents, function.spelling, sip["name"], sip["decl"], sip["init"])
if matcher:
before = deepcopy(sip)
rule.fn(container, function, parameter, sip, matcher)
rule.trace_result(parents, parameter, before, sip)
class VariableRuleDb(AbstractCompiledRuleDb):
"""
THE RULES FOR VARIABLES.
These are used to customise the behaviour of the SIP generator by allowing
the declaration for any variable to be customised, for example to add SIP
compiler annotations.
Each entry in the raw rule database must be a list with members as follows:
0. A regular expression which matches the fully-qualified name of the
"container" enclosing the variable.
1. A regular expression which matches the variable name.
2. A regular expression which matches the variable declaration (e.g.
"int foo").
3. A function.
In use, the database is walked in order from the first entry. If the regular
expressions are matched, the function is called, and no further entries are
walked. The function is called with the following contract:
def variable_xxx(container, variable, sip, matcher):
'''
Return a modified declaration for the given variable.
:param container: The clang.cindex.Cursor for the container.
:param variable: The clang.cindex.Cursor for the variable.
:param sip: A dict with the following keys:
name The name of the variable.
decl The declaration.
annotations Any SIP annotations.
:param matcher: The re.Match object. This contains named
groups corresponding to the key names above
EXCEPT annotations.
:return: An updated set of sip.xxx values. Setting sip.name to the
empty string will cause the container to be suppressed.
'''
:return: The compiled form of the rules.
"""
def __init__(self, db):
super(VariableRuleDb, self).__init__(db, ["container", "variable", "decl"])
def apply(self, container, variable, sip):
"""
Walk over the rules database for variables, applying the first matching transformation.
:param container: The clang.cindex.Cursor for the container.
:param variable: The clang.cindex.Cursor for the variable.
:param sip: The SIP dict.
"""
parents = _parents(variable)
matcher, rule = self._match(parents, sip["name"], sip["decl"])
if matcher:
before = deepcopy(sip)
rule.fn(container, variable, sip, matcher)
rule.trace_result(parents, variable, before, sip)
class RuleSet(object):
"""
To implement your own binding, create a subclass of RuleSet, also called
RuleSet in your own Python module. Your subclass will expose the raw rules
along with other ancilliary data exposed through the subclass methods.
You then simply run the SIP generation and SIP compilation programs passing
in the name of your rules file
"""
__metaclass__ = ABCMeta
@abstractmethod
def container_rules(self):
"""
Return a compiled list of rules for containers.
:return: A ContainerRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def function_rules(self):
"""
Return a compiled list of rules for functions.
:return: A FunctionRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def parameter_rules(self):
"""
Return a compiled list of rules for function parameters.
:return: A ParameterRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
@abstractmethod
def variable_rules(self):
"""
Return a compiled list of rules for variables.
:return: A VariableRuleDb instance
"""
raise NotImplemented(_("Missing subclass implementation"))
def dump_unused(self):
"""Usage statistics, to identify unused rules."""
def dumper(db_name, rule, usage):
if usage:
logger.info(_("Rule {}::{} used {} times".format(db_name, rule, usage)))
else:
logger.warn(_("Rule {}::{} unused".format(db_name, rule)))
for db in [self.container_rules(), self.function_rules(), self.parameter_rules(),
self.variable_rules()]:
db.dump_usage(dumper)
def container_discard(container, sip, matcher):
sip["name"] = ""
def function_discard(container, function, sip, matcher):
sip["name"] = ""
def parameter_transfer_to_parent(container, function, parameter, sip, matcher):
if function.is_static_method():
sip["annotations"].add("Transfer")
else:
sip["annotations"].add("TransferThis")
def param_rewrite_mode_t_as_int(container, function, parameter, sip, matcher):
sip["decl"] = sip["decl"].replace("mode_t", "unsigned int")
def return_rewrite_mode_t_as_int(container, function, sip, matcher):
sip["fn_result"] = "unsigned int"
def variable_discard(container, variable, sip, matcher):
sip["name"] = ""
def parameter_strip_class_enum(container, function, parameter, sip, matcher):
sip["decl"] = sip["decl"].replace("class ", "").replace("enum ", "")
def function_discard_impl(container, function, sip, matcher):
if function.extent.start.column == 1:
sip["name"] = ""
def rules(project_rules):
"""
Constructor.
:param project_rules: The rules file for the project.
"""
import imp
imp.load_source("project_rules", project_rules)
#
# Statically prepare the rule logic. This takes the rules provided by the user and turns them into code.
#
return getattr(sys.modules["project_rules"], "RuleSet")()
| gpl-3.0 | 3,596,388,408,542,231,000 | 39.095057 | 144 | 0.600853 | false |
bsmithyman/pymatsolver | pymatsolver/Tests/test_Triangle.py | 1 | 1715 | import unittest
import numpy as np, scipy.sparse as sp
TOL = 1e-12
class TestMumps(unittest.TestCase):
def setUp(self):
n = 50
nrhs = 20
self.A = sp.rand(n, n, 0.4) + sp.identity(n)
self.sol = np.ones((n, nrhs))
self.rhsU = sp.triu(self.A) * self.sol
self.rhsL = sp.tril(self.A) * self.sol
def test_directLower(self):
from pymatsolver import ForwardSolver
ALinv = ForwardSolver(sp.tril(self.A))
X = ALinv * self.rhsL
x = ALinv * self.rhsL[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_1(self):
from pymatsolver import BackwardSolver
AUinv = BackwardSolver(sp.triu(self.A))
X = AUinv * self.rhsU
x = AUinv * self.rhsU[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_python(self):
from pymatsolver import _ForwardSolver
ALinv = _ForwardSolver(sp.tril(self.A))
X = ALinv * self.rhsL
x = ALinv * self.rhsL[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
def test_directLower_1_python(self):
from pymatsolver import _BackwardSolver
AUinv = _BackwardSolver(sp.triu(self.A))
X = AUinv * self.rhsU
x = AUinv * self.rhsU[:,0]
self.assertLess(np.linalg.norm(self.sol-X,np.inf), TOL)
self.assertLess(np.linalg.norm(self.sol[:,0]-x,np.inf), TOL)
if __name__ == '__main__':
unittest.main()
| mit | -503,422,981,200,351,500 | 34 | 68 | 0.602915 | false |
ric2b/Vivaldi-browser | chromium/components/policy/tools/make_policy_zip.py | 1 | 2188 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a zip archive with policy template files.
"""
import optparse
import sys
import zipfile
def add_files_to_zip(zip_file, base_dir, file_list):
"""Pack a list of files into a zip archive, that is already opened for
writing.
Args:
zip_file: An object representing the zip archive.
base_dir: Base path of all the files in the real file system.
file_list: List of absolute file paths to add. Must start with base_dir.
The base_dir is stripped in the zip file entries.
"""
if (base_dir[-1] != '/'):
base_dir += '/'
for file_path in file_list:
assert file_path.startswith(base_dir)
zip_file.write(file_path, file_path[len(base_dir):])
return 0
def main(argv):
"""Pack a list of files into a zip archive.
Args:
output: The file path of the zip archive.
base_dir: Base path of input files.
languages: Comma-separated list of languages, e.g. en-US,de.
add: List of files to include in the archive. The language placeholder
${lang} is expanded into one file for each language.
"""
parser = optparse.OptionParser()
parser.add_option("--output", dest="output")
parser.add_option("--base_dir", dest="base_dir")
parser.add_option("--languages", dest="languages")
parser.add_option("--add", action="append", dest="files", default=[])
options, args = parser.parse_args(argv[1:])
# Process file list, possibly expanding language placeholders.
_LANG_PLACEHOLDER = "${lang}"
languages = filter(bool, options.languages.split(','))
file_list = []
for file_to_add in options.files:
if (_LANG_PLACEHOLDER in file_to_add):
for lang in languages:
file_list.append(file_to_add.replace(_LANG_PLACEHOLDER, lang))
else:
file_list.append(file_to_add)
zip_file = zipfile.ZipFile(options.output, 'w', zipfile.ZIP_DEFLATED)
try:
return add_files_to_zip(zip_file, options.base_dir, file_list)
finally:
zip_file.close()
if '__main__' == __name__:
sys.exit(main(sys.argv))
| bsd-3-clause | -7,656,171,030,158,575,000 | 31.656716 | 76 | 0.679159 | false |
Solewer/vino-cave | vinocave/settings.py | 1 | 3114 | """
Django settings for vinocave project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vj#grw^2zlpbh^w&0aed2ac_q51p_s#kiw-f*x6(^u_xy_f-jk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vinocave.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vinocave.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit | -9,186,155,754,209,220,000 | 24.735537 | 91 | 0.685613 | false |
toothris/toothris | src/bprofile.py | 1 | 5189 | # Copyright 2008, 2015 Oleg Plakhotniuk
#
# This file is part of Toothris.
#
# Toothris is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Toothris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Toothris. If not, see <http://www.gnu.org/licenses/>.
# LIBS
import pygame
# CONSTS
MEASURES_MIN_COUNT = 200
class Profiler :
def __init__ ( self, name ) :
self.name = name
self.time_min = 999999
self.time_max = 0
self.time_total = 0
self.measures = - MEASURES_MIN_COUNT
self.clock = pygame.time.Clock ()
self.measuring = False
self.profilers = {}
def begin ( self ) :
if self.measuring :
raise RuntimeError ( "trying to start already started profiler" )
self.clock.tick ()
self.measuring = True
def end ( self ) :
if not self.measuring :
raise RuntimeError ( "trying to stop not started profiler" )
self.clock.tick ()
self.measuring = False
self.measures += 1
if self.measures > 0 :
self.time_total += self.clock.get_time ()
self.time_min = min ( self.time_min, self.clock.get_time () )
self.time_max = max ( self.time_max, self.clock.get_time () )
def time_avg ( self ) :
return float ( self.time_total ) / max ( self.measures, 1 )
root_profilers = {}
stack_profilers = []
def begin ( name ) :
global stack_profiler
global root_profilers
if not isinstance ( name, type ( "" ) ) :
raise RuntimeError ( "string name expected" )
if name == "" :
raise RuntimeError ( "name must not be empty" )
if len ( stack_profilers ) > 0 :
profilers = stack_profilers [ len ( stack_profilers ) - 1 ].profilers
else :
profilers = root_profilers
if name in profilers :
profiler = profilers [ name ]
else :
profiler = Profiler ( name )
profilers [ name ] = profiler
profiler.begin ()
stack_profilers.append ( profiler )
def end ( name ) :
global stack_profilers
if not isinstance ( name, type ( "" ) ) :
raise RuntimeError ( "string name expected" )
if len ( stack_profilers ) == 0 :
raise RuntimeError ( "no profiler currently running" )
if name == "" :
raise RuntimeError ( "name must not be empty" )
last_profiler = stack_profilers [ len ( stack_profilers ) - 1 ]
if name != last_profiler.name :
raise RuntimeError ( "trying to stop profiler " + name + \
" before profiler " + last_profiler.name )
stack_profilers.pop ().end ()
def stats_profilers ( profilers, indent = 0 ) :
if len ( profilers ) == 0 :
return
def padded_str ( value, max_len = 0, left_padding = True ) :
if isinstance ( value, type ( "" ) ) :
str_value = value
elif isinstance ( value, type ( 0 ) ) :
str_value = str ( value )
elif isinstance ( value, type ( 0.0 ) ) :
str_value = "%(number).2f" % { "number" : value }
spaces = max ( 0, max_len - len ( str_value ) )
if left_padding :
return " " * spaces + str_value
else :
return str_value + " " * spaces
longest_name = max ( [ len ( padded_str ( p.name ) ) for p in profilers.values () ] )
longest_min = max ( [ len ( padded_str ( p.time_min ) ) for p in profilers.values () ] )
longest_max = max ( [ len ( padded_str ( p.time_max ) ) for p in profilers.values () ] )
longest_avg = max ( [ len ( padded_str ( p.time_avg() ) ) for p in profilers.values () ] )
longest_msr = max ( [ len ( padded_str ( p.measures ) ) for p in profilers.values () ] )
names = profilers.keys ()
names.sort ()
for name in names :
profiler = profilers [ name ]
if profiler.measures > 0 :
print " " * 4 * indent + padded_str ( profiler.name , longest_name, False ) + \
" : min = " + padded_str ( profiler.time_min , longest_min ) + \
" max = " + padded_str ( profiler.time_max , longest_max ) + \
" avg = " + padded_str ( profiler.time_avg(), longest_avg ) + \
" frames = " + padded_str ( profiler.measures , longest_msr )
else :
print " " * 4 * indent + padded_str ( profiler.name , longest_name, False ) + \
" : not enough frames to profile ( " + str ( -profiler.measures ) + " left )"
stats_profilers ( profiler.profilers, indent + 1 )
def stats () :
print "profilers stats:"
stats_profilers ( root_profilers )
| gpl-3.0 | 8,399,911,232,193,724,000 | 31.841772 | 99 | 0.56639 | false |
Subsets and Splits