repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mhri/mhri | motion_renderer/src/fake_renderer.py | 1 | 5896 | #!/usr/bin/env python
#-*- encoding: utf8 -*-
import json
import rospy
import actionlib
import random
import re
from mhri_msgs.msg import RenderItemAction, RenderItemResult, RenderItemFeedback
from mhri_msgs.srv import GetInstalledGestures, GetInstalledGesturesResponse
class FakeMotionRender:
def __init__(self):
rospy.init_node('fake_renderer', anonymous=True)
try:
topic_name = rospy.get_param('~action_name')
except KeyError as e:
print('[ERROR] Needed parameter for (topic_name)...')
quit()
if 'render_gesture' in rospy.get_name():
self.GetInstalledGesturesService = rospy.Service(
"get_installed_gestures",
GetInstalledGestures,
self.handle_get_installed_gestures
)
self.motion_list = {
'neutral': ['neutral_motion1'],
'encourge': ['encourge_motion1'],
'attention': ['attention_motion1'],
'consolation': ['consolation_motion1'],
'greeting': ['greeting_motion1'],
'waiting': ['waiting_motion1'],
'advice': ['advice_motion1'],
'praise': ['praise_motion1'],
'command': ['command_motion1'],
}
self.server = actionlib.SimpleActionServer(
topic_name, RenderItemAction, self.execute_callback, False)
self.server.start()
rospy.loginfo('[%s] initialized...' % rospy.get_name())
rospy.spin()
def handle_get_installed_gestures(self, req):
result = json.dumps(self.motion_list)
return GetInstalledGesturesResponse(result)
def execute_callback(self, goal):
rospy.loginfo('\033[95m%s\033[0m rendering requested...' % rospy.get_name())
result = RenderItemResult()
feedback = RenderItemFeedback()
success = True
loop_count = 0
if 'render_gesture' in rospy.get_name():
(gesture_category, gesture_item) = goal.data.split('=')
if gesture_category == 'pointing':
parse_data = json.loads(gesture_item)
rospy.loginfo('\033[94m[%s]\033[0m rendering pointing to xyz:%s, frame_id [%s]...'%(rospy.get_name(),
parse_data['xyz'], parse_data['frame_id']))
elif gesture_category == 'gesture':
(cmd, item_name) = gesture_item.split(':')
if cmd == 'tag':
match = re.search(r'\[(.+?)\]', item_name)
if match:
item_name = item_name.replace(match.group(0), '')
emotion = match.group(1)
try:
rospy.loginfo('\033[94m[%s]\033[0m rendering gesture cmd [%s], name [%s]...'%(rospy.get_name(),
cmd,
self.motion_list[item_name][emotion][random.randint(0, len(self.motion_list[item_name]) - 1)]))
except (KeyError, TypeError):
rospy.loginfo('\033[94m[%s]\033[0m rendering gesture cmd [%s], name [%s]...'%(rospy.get_name(),
cmd,
self.motion_list[item_name][random.randint(0, len(self.motion_list[item_name]) - 1)]))
else:
try:
rospy.loginfo('\033[94m[%s]\033[0m rendering gesture cmd [%s], name [%s]...'%(rospy.get_name(),
cmd,
self.motion_list[item_name][random.randint(0, len(self.motion_list[item_name]) - 1)]))
except KeyError:
rospy.logwarn('\033[94m[%s]\033[0m rendering gesture cmd [%s], name [%s]...'%(rospy.get_name(),
cmd,
self.motion_list['neutral'][random.randint(0, len(self.motion_list['neutral']) - 1)]))
elif cmd == 'play':
find_result = False
for k, v in self.motion_list.items():
if item_name in v:
find_result = True
if find_result:
rospy.loginfo('\033[94m[%s]\033[0m rendering gesture cmd [%s], name [%s]...'%(rospy.get_name(),
cmd, item_name))
else:
rospy.logwarn('\033[94m[%s]\033[0m rendering gesture cmd [%s], name [%s]...'%(rospy.get_name(),
cmd,
self.motion_list['neutral'][random.randint(0, len(self.motion_list['neutral']) - 1)]))
loop_count = 40
if 'render_speech' in rospy.get_name():
rospy.loginfo('\033[94m[%s]\033[0m rendering speech [%s]...'%(rospy.get_name(), goal.data))
loop_count = 40
if 'render_facial_expression' in rospy.get_name():
rospy.loginfo('\033[94m[%s]\033[0m rendering expression [%s]...'%(rospy.get_name(), goal.data))
loop_count = 5
while not rospy.is_shutdown():
if self.server.is_preempt_requested():
self.server.set_preempted()
success = False
break
feedback.is_rendering = True
self.server.publish_feedback(feedback)
rospy.sleep(0.1)
loop_count = loop_count - 1
if loop_count == 0:
break
if success:
result.result = True
self.server.set_succeeded(result)
rospy.loginfo('\033[95m%s\033[0m rendering completed...' % rospy.get_name())
else:
rospy.loginfo('\033[95m%s\033[0m rendering canceled...' % rospy.get_name())
if __name__ == '__main__':
m = FakeMotionRender()
| apache-2.0 | -2,385,697,753,688,501,000 | 40.230769 | 127 | 0.501696 | false |
google/tf-quant-finance | tf_quant_finance/experimental/local_volatility/local_volatility_model.py | 1 | 18620 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Local Volatility Model."""
import functools
import tensorflow.compat.v2 as tf
from tf_quant_finance import black_scholes
from tf_quant_finance import datetime
from tf_quant_finance import math
from tf_quant_finance.experimental.pricing_platform.framework.market_data import volatility_surface
from tf_quant_finance.models import generic_ito_process
interpolation_2d = math.interpolation.interpolation_2d
def _dupire_local_volatility(time, spot_price, initial_spot_price,
implied_volatility_surface, discount_factor_fn,
dividend_yield):
"""Constructs local volatility function using Dupire's formula.
Args:
time: A real `Tensor` of shape compatible with `spot_price` specifying the
times at which local volatility function is computed.
spot_price: A real `Tensor` specifying the underlying price at which local
volatility function is computed.
initial_spot_price: A real `Tensor` of shape compatible with `spot_price`
specifying the underlying spot price at t=0.
implied_volatility_surface: A Python callable which implements the
interpolation of market implied volatilities. The callable should have the
interface `implied_volatility_surface(strike, expiry_times)` which takes
real `Tensor`s corresponding to option strikes and time to expiry and
returns a real `Tensor` containing the corresponding market implied
volatility. The shape of `strike` is `(n,dim)` where `dim` is the
dimensionality of the local volatility process and `t` is a scalar tensor.
The output from the callable is a `Tensor` of shape `(n,dim)` containing
the interpolated implied volatilties.
discount_factor_fn: A python callable accepting one real `Tensor` argument
time t. It should return a `Tensor` specifying the discount factor to time
t.
dividend_yield: A real `Tensor` of shape compatible with `spot_price`
specifying the (continuously compounded) dividend yield.
Returns:
A real `Tensor` of same shape as `spot_price` containing the local
volatility computed at `(spot_price,time)` using the Dupire's
construction of local volatility.
"""
dtype = time.dtype
risk_free_rate_fn = _get_risk_free_rate_from_discount_factor(
discount_factor_fn)
risk_free_rate = tf.convert_to_tensor(risk_free_rate_fn(time), dtype=dtype)
def _option_price(expiry_time, strike):
discount_factors = tf.convert_to_tensor(
discount_factor_fn(expiry_time), dtype=dtype)
vols = implied_volatility_surface(strike=strike, expiry_times=expiry_time)
c_k_t = black_scholes.option_price(
volatilities=vols,
strikes=strike,
expiries=expiry_time,
spots=initial_spot_price,
dividend_rates=dividend_yield,
discount_factors=discount_factors,
dtype=dtype)
return c_k_t
dcdk_fn = lambda x: _option_price(time, x)
dcdt_fn = lambda x: _option_price(x, spot_price)
d2cdk2_fn = lambda x: math.fwd_gradient(dcdk_fn, x)
# TODO(b/173568116): Replace gradients of call prices with imp vol gradients.
numerator = (
math.fwd_gradient(dcdt_fn, time) + (risk_free_rate - dividend_yield) *
spot_price * math.fwd_gradient(dcdk_fn, spot_price) +
dividend_yield * _option_price(time, spot_price))
denominator = math.fwd_gradient(d2cdk2_fn, spot_price) * spot_price**2
# we use relu for safety so that we do not take the square root of
# negative real `Tensors`.
local_volatility_squared = tf.nn.relu(
2 * tf.math.divide_no_nan(numerator, denominator))
return tf.math.sqrt(local_volatility_squared)
class LocalVolatilityModel(generic_ito_process.GenericItoProcess):
r"""Local volatility model for smile modeling.
Local volatility (LV) model specifies that the dynamics of an asset is
governed by the following stochastic differential equation:
```None
dS(t) / S(t) = mu(t, S(t)) dt + sigma(t, S(t)) * dW(t)
```
where `mu(t, S(t))` is the drift and `sigma(t, S(t))` is the instantaneous
volatility. The local volatility function `sigma(t, S(t))` is state dependent
and is computed by calibrating against a given implied volatility surface
`sigma_iv(T, K)` using the Dupire's formula [1]:
```
sigma(T,K)^2 = 2 * (dC(T,K)/dT + (r - q)K dC(T,K)/dK + qC(T,K)) /
(K^2 d2C(T,K)/dK2)
```
where the derivatives above are the partial derivatives. The LV model provides
a flexible framework to model any (arbitrage free) volatility surface.
#### Example: Simulation of local volatility process.
```python
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
dtype = tf.float64
dim = 2
year = dim * [[2021, 2022]]
month = dim * [[1, 1]]
day = dim * [[1, 1]]
expiries = tff.datetime.dates_from_year_month_day(year, month, day)
valuation_date = [(2020, 1, 1)]
expiry_times = tff.datetime.daycount_actual_365_fixed(
start_date=valuation_date, end_date=expiries, dtype=dtype)
strikes = dim * [[[0.1, 0.9, 1.0, 1.1, 3], [0.1, 0.9, 1.0, 1.1, 3]]]
iv = dim * [[[0.135, 0.13, 0.1, 0.11, 0.13],
[0.135, 0.13, 0.1, 0.11, 0.13]]]
spot = dim * [1.0]
risk_free_rate = [0.02]
r = tf.convert_to_tensor(risk_free_rate, dtype=dtype)
df = lambda t: tf.math.exp(-r * t)
lv = tff.models.LocalVolatilityModel.from_market_data(
dim, val_date, expiries, strikes, iv, spot, df, [0.0], dtype=dtype)
num_samples = 10000
paths = lv.sample_paths(
[1.0, 1.5, 2.0],
num_samples=num_samples,
initial_state=spot,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2])
# paths.shape = (10000, 3, 2)
#### References:
[1]: Iain J. Clark. Foreign exchange option pricing - A Practitioner's
guide. Chapter 5, Section 5.3.2. 2011.
"""
def __init__(self,
dim,
risk_free_rate=None,
dividend_yield=None,
local_volatility_fn=None,
corr_matrix=None,
dtype=None,
name=None):
"""Initializes the Local volatility model.
Args:
dim: A Python scalar which corresponds to the number of underlying assets
comprising the model.
risk_free_rate: One of the following: (a) An optional real `Tensor` of
shape compatible with `[dim]` specifying the (continuously compounded)
risk free interest rate. (b) A python callable accepting one real
`Tensor` argument time t returning a `Tensor` of shape compatible with
`[dim]`. If the underlying is an FX rate, then use this input to specify
the domestic interest rate.
Default value: `None` in which case the input is set to Zero.
dividend_yield: A real `Tensor` of shape compatible with `spot_price`
specifying the (continuosly compounded) dividend yield.
If the underlying is an FX rate, then use this input to specify the
foreign interest rate.
Default value: `None` in which case the input is set to Zero.
local_volatility_fn: A Python callable which returns instantaneous
volatility as a function of state and time. The function must accept a
scalar `Tensor` corresponding to time 't' and a real `Tensor` of shape
`[num_samples, dim]` corresponding to the underlying price (S) as inputs
and return a real `Tensor` of shape `[num_samples, dim]` containing the
local volatility computed at (S,t).
corr_matrix: A `Tensor` of shape `[dim, dim]` and the same `dtype` as
`risk_free_rate`. Corresponds to the instantaneous correlation between
the underlying assets.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this class.
Default value: `None` which maps to the default name
`local_volatility_model`.
"""
self._name = name or "local_volatility_model"
self._local_volatility_fn = local_volatility_fn
with tf.name_scope(self._name):
self._dtype = dtype
risk_free_rate = risk_free_rate or [0.0]
dividend_yield = dividend_yield or [0.0]
self._domestic_rate = _convert_to_tensor_fn(risk_free_rate, dtype,
"risk_free_rate")
self._foreign_rate = _convert_to_tensor_fn(dividend_yield, dtype,
"dividend_yield")
corr_matrix = corr_matrix or tf.eye(dim, dim, dtype=self._dtype)
self._rho = tf.convert_to_tensor(
corr_matrix, dtype=self._dtype, name="rho")
self._sqrt_rho = tf.linalg.cholesky(self._rho)
# TODO(b/173286140): Simulate using X(t)=log(S(t))
def _vol_fn(t, state):
"""Volatility function of LV model."""
lv = self._local_volatility_fn(t, state)
diffusion = tf.expand_dims(state * lv, axis=-1)
return diffusion * self._sqrt_rho
# Drift function
def _drift_fn(t, state):
"""Drift function of LV model."""
domestic_rate = self._domestic_rate(t)
foreign_rate = self._foreign_rate(t)
return (domestic_rate - foreign_rate) * state
super(LocalVolatilityModel, self).__init__(
dim, _drift_fn, _vol_fn, dtype, name)
def local_volatility_fn(self):
"""Local volatility function."""
return self._local_volatility_fn
@classmethod
def from_market_data(cls,
dim,
valuation_date,
expiry_dates,
strikes,
implied_volatilities,
spot,
discount_factor_fn,
dividend_yield=None,
dtype=None,
name=None):
"""Creates a `LocalVolatilityModel` from market data.
Args:
dim: A Python scalar which corresponds to the number of underlying assets
comprising the model.
valuation_date: A `DateTensor` specifying the valuation (or settlement)
date for the market data.
expiry_dates: A `DateTensor` of shape `(dim, num_expiries)` containing the
expiry dates on which the implied volatilities are specified.
strikes: A `Tensor` of real dtype and shape `(dim, num_expiries,
num_strikes)`specifying the strike prices at which implied volatilities
are specified.
implied_volatilities: A `Tensor` of real dtype and shape `(dim,
num_expiries, num_strikes)` specifying the implied volatilities.
spot: A real `Tensor` of shape `(dim,)` specifying the underlying spot
price on the valuation date.
discount_factor_fn: A python callable accepting one real `Tensor` argument
time t. It should return a `Tensor` specifying the discount factor to
time t.
dividend_yield: A real `Tensor` of shape compatible with `spot_price`
specifying the (continuosly compounded) dividend yield. If the
underlying is an FX rate, then use this input to specify the foreign
interest rate.
Default value: `None` in which case the input is set to Zero.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this class.
Default value: `None` which maps to the default name `from_market_data`.
Returns:
An instance of `LocalVolatilityModel` constructed using the input data.
"""
name = name or "from_market_data"
with tf.name_scope(name):
spot = tf.convert_to_tensor(spot, dtype=dtype)
dtype = dtype or spot.dtype
dividend_yield = dividend_yield or [0.0]
dividend_yield = tf.convert_to_tensor(dividend_yield, dtype=dtype)
risk_free_rate_fn = _get_risk_free_rate_from_discount_factor(
discount_factor_fn)
valuation_date = datetime.convert_to_date_tensor(valuation_date)
expiry_dates = datetime.convert_to_date_tensor(expiry_dates)
expiry_times = (
tf.cast(valuation_date.days_until(expiry_dates), dtype=dtype) / 365.0)
strikes = tf.convert_to_tensor(strikes, dtype=dtype)
implied_volatilities = tf.convert_to_tensor(
implied_volatilities, dtype=dtype)
def _log_forward_moneyness(times, strikes):
# log_fwd_moneyness = log(strike/(spot*exp((r-d)*times)))
risk_free_rate = tf.squeeze(risk_free_rate_fn(times))
log_forward_moneyness = tf.math.log(
tf.math.divide_no_nan(strikes, tf.reshape(
spot, [dim, 1, 1]))) - tf.expand_dims(
(risk_free_rate - dividend_yield) * times, axis=-1)
return log_forward_moneyness
interpolator = interpolation_2d.Interpolation2D(
expiry_times,
_log_forward_moneyness(expiry_times, strikes),
implied_volatilities,
dtype=dtype)
def _log_moneyness_2d_interpolator(times, strikes):
risk_free_rate = risk_free_rate_fn(times)
log_forward_moneyness = tf.math.log(
strikes / spot) - (risk_free_rate - dividend_yield) * times
moneyness_transposed = tf.transpose(log_forward_moneyness)
times = tf.broadcast_to(times, moneyness_transposed.shape)
return tf.transpose(
interpolator.interpolate(times, moneyness_transposed))
vs = volatility_surface.VolatilitySurface(
valuation_date,
expiry_dates,
strikes,
implied_volatilities,
interpolator=_log_moneyness_2d_interpolator,
dtype=dtype)
local_volatility_fn = functools.partial(
_dupire_local_volatility,
initial_spot_price=spot,
discount_factor_fn=discount_factor_fn,
dividend_yield=dividend_yield,
implied_volatility_surface=vs.volatility)
return LocalVolatilityModel(
dim,
risk_free_rate=risk_free_rate_fn,
dividend_yield=dividend_yield,
local_volatility_fn=local_volatility_fn,
dtype=dtype)
@classmethod
def from_volatility_surface(cls,
dim,
spot,
implied_volatility_surface,
discount_factor_fn,
dividend_yield=None,
dtype=None,
name=None):
"""Creates a `LocalVolatilityModel` from implied volatility data.
Args:
dim: A Python scalar which corresponds to the number of underlying assets
comprising the model.
spot: A real `Tensor` of shape `(dim,)` specifying the underlying spot
price on the valuation date.
implied_volatility_surface: Either an instance of
`processed_market_data.VolatilitySurface` or a Python object containing
the implied volatility market data. If the input is a Python object,
then the object must implement a function `volatility(strike,
expiry_times)` which takes real `Tensor`s corresponding to option
strikes and time to expiry and returns a real `Tensor` containing the
corresponding market implied volatility.
The shape of `strike` is `(n,dim)` where `dim` is the dimensionality of
the local volatility process and `t` is a scalar tensor. The output from
the callable is a `Tensor` of shape `(n,dim)` containing the
interpolated implied volatilties.
discount_factor_fn: A python callable accepting one real `Tensor` argument
time t. It should return a `Tensor` specifying the discount factor to
time t.
dividend_yield: A real `Tensor` of shape compatible with `spot_price`
specifying the (continuosly compounded) dividend yield.
If the underlying is an FX rate, then use this input to specify the
foreign interest rate.
Default value: `None` in which case the input is set to Zero.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this class.
Default value: `None` which maps to the default name
`from_volatility_surface`.
Returns:
An instance of `LocalVolatilityModel` constructed using the input data.
"""
name = name or "from_volatility_surface"
with tf.name_scope(name):
dividend_yield = dividend_yield or [0.0]
dividend_yield = tf.convert_to_tensor(dividend_yield, dtype=dtype)
risk_free_rate_fn = _get_risk_free_rate_from_discount_factor(
discount_factor_fn)
local_volatility_fn = functools.partial(
_dupire_local_volatility,
initial_spot_price=spot,
discount_factor_fn=discount_factor_fn,
dividend_yield=dividend_yield,
implied_volatility_surface=implied_volatility_surface.volatility)
return LocalVolatilityModel(
dim,
risk_free_rate=risk_free_rate_fn,
dividend_yield=dividend_yield,
local_volatility_fn=local_volatility_fn,
dtype=dtype)
def _convert_to_tensor_fn(x, dtype, name):
if callable(x):
return x
else:
return lambda t: tf.convert_to_tensor(x, dtype, name=name)
def _get_risk_free_rate_from_discount_factor(discount_factor_fn):
"""Returns r(t) given a discount factor function."""
def risk_free_rate_fn(t):
logdf = lambda x: -tf.math.log(discount_factor_fn(x))
return math.fwd_gradient(
logdf, t, unconnected_gradients=tf.UnconnectedGradients.ZERO)
return risk_free_rate_fn
| apache-2.0 | 8,897,452,932,536,462,000 | 41.804598 | 99 | 0.65247 | false |
jgroszko/django-albums | albums/models.py | 1 | 8578 | import os.path
import datetime
from django.conf import settings
from django.db import models
from django.db.models import signals
from djangoratings.fields import RatingField
from appearances.models import Appearance
from favorites.models import Favorite
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
import tagging
from tagging_ext.models import TagAutocompleteField
from django.conf import settings
import albums.tasks as tasks
class AlbumItem(models.Model):
title = models.CharField(max_length=1000)
description = models.TextField(null=True, blank=True)
created = models.DateTimeField()
edited = models.DateTimeField()
def __unicode__(self):
return self.title
def can_edit(self, user):
if user.is_staff:
return True
elif(Album.objects.filter(id=self.id).count() == 1):
return self.album.is_owner(user)
else:
return self.parent.album.is_owner(user)
def albumitem_save(sender, instance, **kwargs):
if(instance.id is None):
instance.created = datetime.datetime.now()
instance.edited = datetime.datetime.now()
class AlbumConvertableItem(AlbumItem):
slug = models.SlugField(unique=False,
verbose_name='ID', help_text=_('A unique id for this item\'s URL. Only alphanumeric characters allowed.'))
def get_directory(self):
''' /albums/##/## '''
return os.path.join('albums', str(self.parent.id), str(self.id))
def get_preview_path(self, filename):
''' /site_media/albums/##/## '''
result = os.path.join(settings.ALBUMS_UPLOAD_TO, self.get_directory(), filename)
os.mkdir(os.path.dirname(result))
return result
preview = models.FileField(upload_to=get_preview_path, null=True, verbose_name="File")
preview_ready = models.BooleanField(default=False, null=False)
submitter = models.ForeignKey(User, blank=False)
parent = models.ForeignKey('Album', related_name='children', null=True)
rating = RatingField(range=5, can_change_vote=True)
tags = TagAutocompleteField(help_text=_("Comma or space separated"))
appearances = generic.GenericRelation(Appearance)
favorites = generic.GenericRelation(Favorite)
allow_ratings = models.BooleanField(default=True)
allow_comments = models.BooleanField(default=True)
class Meta:
unique_together = (('slug', 'parent'),)
def _resized_path(self, size):
d, f = os.path.split(self.preview.name)
return os.path.join(d, 'resized', str(size), f)
def thumbnail(self, size=80):
try:
resized_path = self._resized_path(size)
except AttributeError:
return os.path.join(settings.MEDIA_URL,
"failed.png")
if(self.preview_ready):
return "%s://%s/%s" % (settings.DEFAULT_HTTP_PROTOCOL,
settings.ALBUMS_AWS_CF_DOWNLOAD_DOMAIN,
resized_path)
else:
return os.path.join(settings.MEDIA_URL,
"waiting.png")
def get_next(self):
try:
return AlbumConvertableItem.objects.filter(
parent=self.parent,created__gt=self.created
).order_by(
'created'
)[0:1].get()
except AlbumConvertableItem.DoesNotExist:
return None
def get_previous(self):
try:
return AlbumConvertableItem.objects.filter(
parent=self.parent,created__lt=self.created
).order_by(
'-created'
)[0:1].get()
except AlbumConvertableItem.DoesNotExist:
return None
@models.permalink
def get_absolute_url(self):
return ('albums.views.albumitem', [self.parent.slug, self.slug])
def delete(self):
# Make sure the album doesn't get blasted if we're the highlight
if(self.parent.album.highlight is not None and
self.parent.album.highlight.id == self.id):
self.parent.highlight = self.get_next()
self.parent.save()
tasks.albumitem_delete_directory.delay(self.get_directory())
return super(AlbumConvertableItem, self).delete()
def __unicode__(self):
return self.title
def get_tags(self):
return tagging.models.Tag.objects.get_for_object(self)
tagging.register(AlbumConvertableItem, tag_descriptor_attr='tagss')
class Image(AlbumConvertableItem):
pass
signals.pre_save.connect(albumitem_save, sender=Image)
def generate_image_thumbs(sender, instance, **kwargs):
if(kwargs['created']):
tasks.albumitem_generate_thumbnails.delay(instance, settings.ALBUMS_THUMBSIZES)
signals.post_save.connect(generate_image_thumbs, sender=Image)
class Video(AlbumConvertableItem):
def get_video_path(self, filename):
return os.path.join(self.get_directory(), filename)
video = models.FileField(upload_to=get_video_path)
video_ready = models.BooleanField(default=False, null=False)
def converted_path(self, format='mp4', extension='mp4', add_extension=True):
d, f = os.path.split(self.video.name)
f = os.path.splitext(os.path.basename(f))[0]
if(add_extension):
f += "." + extension
return os.path.join(d, format, f)
def converted_format(self, extension):
return self.video.storage.exists(self.converted_path(extension))
duration = models.IntegerField(null=True)
def duration_str(self):
result = str(datetime.timedelta(seconds=self.duration))
if result[:3] == "0:0":
return result[3:]
if result[:2] == "0:":
return result[2:]
else:
return result
signals.pre_save.connect(albumitem_save, sender=Video)
class AlbumManager(models.Manager):
def browse(self):
return self.filter(is_profile=False)
def make_slug_unique(self, slug):
if(self.filter(slug=slug).count() == 0):
return slug
else:
nums = [int(x.slug[len(slug)+1:]) for x in self.filter(slug__regex="^%s-\d+$" % slug)]
if(len(nums) == 0):
return "%s-0" % slug
else:
nums.sort()
return "%s-%d" % (slug, nums[-1]+1)
def create_profile_album(self, user):
profile_album = Album()
profile_album.title = "%s's Profile Pictures" % user.username
profile_album.is_profile = True
profile_album.slug = self.make_slug_unique(user.username)
profile_album.save()
profile_album.owners.add(user)
profile_album.save()
return profile_album
class Album(AlbumItem):
slug = models.SlugField(unique=True,
verbose_name='ID', help_text='A unique id for this item\'s URL. Only alphanumeric characters allowed.')
highlight = models.ForeignKey(AlbumConvertableItem, related_name='highlight_parent', null=True, blank=True)
owners = models.ManyToManyField(User, blank=False)
is_profile = models.BooleanField(blank=False, default=False)
def is_owner(self, user):
try:
self.owners.get(id=user.id)
return True
except User.DoesNotExist:
return False
def can_delete(self, user):
return ((not self.is_profile) and
(user.is_staff or
self.is_owner(user)))
def can_add_video(self, user):
return ((not self.is_profile) and
(user.is_staff or
self.is_owner(user)))
@models.permalink
def get_absolute_url(self):
return ('albums.views.album', [self.slug])
def thumbnail(self, size=80):
if self.highlight is not None:
return self.highlight.thumbnail(size)
else:
return None
def preview_ready(self):
return (self.highlight is not None and
self.highlight.preview_ready)
def delete(self):
for child in self.children.all():
child.delete()
super(Album, self).delete()
objects = AlbumManager()
def bump_highlight(self):
if self.highlight:
self.highlight = self.highlight.get_next()
self.save()
else:
self.highlight = self.children.all()[0]
self.save()
signals.pre_save.connect(albumitem_save, sender=Album)
| gpl-3.0 | 8,582,124,832,705,695,000 | 31.24812 | 134 | 0.619025 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/types/customer_user_access_invitation_service.py | 1 | 3729 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.resources.types import (
customer_user_access_invitation,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={
"GetCustomerUserAccessInvitationRequest",
"MutateCustomerUserAccessInvitationRequest",
"CustomerUserAccessInvitationOperation",
"MutateCustomerUserAccessInvitationResponse",
"MutateCustomerUserAccessInvitationResult",
},
)
class GetCustomerUserAccessInvitationRequest(proto.Message):
r"""Request message for
[CustomerUserAccessInvitation.GetCustomerUserAccessInvitation][]
Attributes:
resource_name (str):
Required. Resource name of the access
invitation.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class MutateCustomerUserAccessInvitationRequest(proto.Message):
r"""Request message for
[CustomerUserAccessInvitation.MutateCustomerUserAccessInvitation][]
Attributes:
customer_id (str):
Required. The ID of the customer whose access
invitation is being modified.
operation (google.ads.googleads.v8.services.types.CustomerUserAccessInvitationOperation):
Required. The operation to perform on the
access invitation
"""
customer_id = proto.Field(proto.STRING, number=1,)
operation = proto.Field(
proto.MESSAGE,
number=2,
message="CustomerUserAccessInvitationOperation",
)
class CustomerUserAccessInvitationOperation(proto.Message):
r"""A single operation (create or remove) on customer user access
invitation.
Attributes:
create (google.ads.googleads.v8.resources.types.CustomerUserAccessInvitation):
Create operation: No resource name is
expected for the new access invitation.
remove (str):
Remove operation: A resource name for the revoke invitation
is expected, in this format:
``customers/{customer_id}/customerUserAccessInvitations/{invitation_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=customer_user_access_invitation.CustomerUserAccessInvitation,
)
remove = proto.Field(proto.STRING, number=2, oneof="operation",)
class MutateCustomerUserAccessInvitationResponse(proto.Message):
r"""Response message for access invitation mutate.
Attributes:
result (google.ads.googleads.v8.services.types.MutateCustomerUserAccessInvitationResult):
Result for the mutate.
"""
result = proto.Field(
proto.MESSAGE,
number=1,
message="MutateCustomerUserAccessInvitationResult",
)
class MutateCustomerUserAccessInvitationResult(proto.Message):
r"""The result for the access invitation mutate.
Attributes:
resource_name (str):
Returned for successful operations.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -2,776,112,629,645,454,000 | 30.601695 | 97 | 0.698847 | false |
google-research/google-research | cold_posterior_flax/cifar10/models/conv_layers.py | 1 | 17044 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variants of Conv Modules."""
from flax import nn
from flax.nn import initializers
from jax import lax
import jax.numpy as jnp
import numpy as onp
class ConvFixedScale(nn.Module):
"""Convolutional layer that uses Weight Norm [1] with the scale fixed at 1.
[1] Salimans, T., & Kingma, D. P. (2016, February 25). Weight Normalization:
A Simple Reparameterization to Accelerate Training of Deep Neural Networks.
arXiv [cs.LG]. http://arxiv.org/abs/1602.07868.
"""
def apply(self,
inputs,
features,
kernel_size,
strides=None,
padding='SAME',
lhs_dilation=None,
rhs_dilation=None,
feature_group_count=1,
bias=True,
dtype=jnp.float32,
precision=None,
kernel_init=nn.linear.default_kernel_init,
bias_init=initializers.zeros,
compensate_padding=True):
"""Applies a convolution to the inputs.
Args:
inputs: input data with dimensions (batch, spatial_dims..., features).
features: number of convolution filters.
kernel_size: shape of the convolutional kernel.
strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the dilation
factor to apply in each spatial dimension of `lhs`. LHS dilation is also
known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the dilation
factor to apply in each spatial dimension of `rhs`. RHS dilation is also
known as atrous convolution.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
compensate_padding: Renormalize output based on introduced zero padding.
Returns:
The convolved data.
"""
inputs = jnp.asarray(inputs, dtype)
if strides is None:
strides = (1,) * (inputs.ndim - 2)
in_features = inputs.shape[-1]
assert in_features % feature_group_count == 0
kernel_shape = kernel_size + (in_features // feature_group_count, features)
kernel_unnorm = self.param('kernel', kernel_shape, kernel_init)
kernel_unnorm = jnp.asarray(kernel_unnorm, dtype)
kernel_unnorm = jnp.reshape(
kernel_unnorm,
(-1, features),
)
kernel = kernel_unnorm / (
jnp.linalg.norm(kernel_unnorm, axis=0, keepdims=True) + 1e-5)
kernel = jnp.reshape(kernel, kernel_shape)
# pylint: disable=protected-access
dimension_numbers = nn.linear._conv_dimension_numbers(inputs.shape)
# pylint: enable=protected-access
y = lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
precision=precision)
if bias:
bias = self.param('bias', (features,), bias_init)
bias = jnp.asarray(bias, dtype)
y = y + bias
if compensate_padding:
y = padding_compensate(inputs, kernel_size, lhs_dilation, padding,
precision, rhs_dilation, strides, y)
return y
class ConvLearnedScale(nn.Module):
"""Convolutional layer that uses Weight Norm [1].
[1] Salimans, T., & Kingma, D. P. (2016, February 25). Weight Normalization:
A Simple Reparameterization to Accelerate Training of Deep Neural Networks.
arXiv [cs.LG]. http://arxiv.org/abs/1602.07868.
Convolution Module wrapping lax.conv_general_dilated.
"""
def apply(self,
inputs,
features,
kernel_size,
strides=None,
padding='SAME',
lhs_dilation=None,
rhs_dilation=None,
feature_group_count=1,
bias=True,
dtype=jnp.float32,
precision=None,
kernel_init=nn.linear.default_kernel_init,
bias_init=initializers.zeros,
scale_init=initializers.ones,
compensate_padding=True):
"""Applies a convolution to the inputs.
Args:
inputs: input data with dimensions (batch, spatial_dims..., features).
features: number of convolution filters.
kernel_size: shape of the convolutional kernel.
strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the dilation
factor to apply in each spatial dimension of `lhs`. LHS dilation is also
known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the dilation
factor to apply in each spatial dimension of `rhs`. RHS dilation is also
known as atrous convolution.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
scale_init: initializer for the scale.
compensate_padding: Renormalize output based on introduced zero padding.
Returns:
The convolved data.
"""
inputs = jnp.asarray(inputs, dtype)
if strides is None:
strides = (1,) * (inputs.ndim - 2)
in_features = inputs.shape[-1]
assert in_features % feature_group_count == 0
kernel_shape = kernel_size + (in_features // feature_group_count, features)
kernel_unnorm = self.param('kernel', kernel_shape, kernel_init)
kernel_unnorm = jnp.asarray(kernel_unnorm, dtype)
kernel_unnorm = jnp.reshape(
kernel_unnorm,
(-1, features),
)
kernel = kernel_unnorm / (
jnp.linalg.norm(kernel_unnorm, axis=0, keepdims=True) + 1e-5)
scale = self.param('scale', (features,), scale_init)
kernel *= scale.reshape((-1, features))
kernel = jnp.reshape(kernel, kernel_shape)
# pylint: disable=protected-access
dimension_numbers = nn.linear._conv_dimension_numbers(inputs.shape)
# pylint: enable=protected-access
y = lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
precision=precision)
if bias:
bias = self.param('bias', (features,), bias_init)
bias = jnp.asarray(bias, dtype)
y = y + bias
if compensate_padding:
y = padding_compensate(inputs, kernel_size, lhs_dilation, padding,
precision, rhs_dilation, strides, y)
return y
class Conv(nn.Module):
"""Plain Convolution Module that supports padding compensation."""
def apply(self,
inputs,
features,
kernel_size,
strides=None,
padding='SAME',
input_dilation=None,
kernel_dilation=None,
feature_group_count=1,
bias=True,
dtype=jnp.float32,
precision=None,
kernel_init=nn.linear.default_kernel_init,
bias_init=initializers.zeros,
compensate_padding=True):
"""Applies a convolution to the inputs.
Args:
inputs: input data with dimensions (batch, spatial_dims..., features).
features: number of convolution filters.
kernel_size: shape of the convolutional kernel.
strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
input_dilation: `None`, or a sequence of `n` integers, giving the dilation
factor to apply in each spatial dimension of `inputs`. Convolution with
input dilation `d` is equivalent to transposed convolution with stride
`d`.
kernel_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of the convolution
kernel. Convolution with kernel dilation is also known as 'atrous
convolution'.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
compensate_padding: Renormalize output based on introduced zero padding.
Returns:
The convolved data.
"""
inputs = jnp.asarray(inputs, dtype)
if strides is None:
strides = (1,) * (inputs.ndim - 2)
in_features = inputs.shape[-1]
assert in_features % feature_group_count == 0
kernel_shape = kernel_size + (in_features // feature_group_count, features)
kernel = self.param('kernel', kernel_shape, kernel_init)
kernel = jnp.asarray(kernel, dtype)
# pylint: disable=protected-access
dimension_numbers = nn.linear._conv_dimension_numbers(inputs.shape)
# pylint: enable=protected-access
y = lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
precision=precision)
if bias:
bias = self.param('bias', (features,), bias_init)
bias = jnp.asarray(bias, dtype)
y = y + bias
if compensate_padding:
y = padding_compensate(inputs, kernel_size, input_dilation, padding,
precision, kernel_dilation, strides, y)
return y
def padding_compensate(inputs, kernel_size, lhs_dilation, padding, precision,
rhs_dilation, strides, y):
"""Divide inputs by the expected reduction in std-dev induced by zero padding."""
if padding != 'VALID':
# Figure out input count by conv:
ones = jnp.ones((1, inputs.shape[1], inputs.shape[2], 1))
ones_kernel = jnp.ones(kernel_size + (1, 1))
count = lax.conv_general_dilated(
ones,
ones_kernel,
strides,
padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
# pylint: disable=protected-access
dimension_numbers=nn.linear._conv_dimension_numbers(ones.shape),
# pylint: enable=protected-access
feature_group_count=1,
precision=precision)
var = count / (onp.prod(kernel_size))
var_avg = jnp.mean(var)
std_var = jnp.sqrt(var_avg)
y /= std_var
return y
class ConvWS(nn.Module):
"""Convolution Module using weight standardization [1].
- [1] Qiao, S., Wang, H., Liu, C., Shen, W., & Yuille, A. (2019, March 25).
Micro-Batch Training with Batch-Channel Normalization and Weight
Standardization. arXiv [cs.CV]. http://arxiv.org/abs/1903.10520.
"""
def apply(self,
inputs,
features,
kernel_size,
strides=None,
padding='SAME',
lhs_dilation=None,
rhs_dilation=None,
feature_group_count=1,
bias=True,
dtype=jnp.float32,
precision=None,
kernel_init=nn.linear.default_kernel_init,
bias_init=initializers.zeros,
kaiming_scaling=True,
compensate_padding=True):
"""Applies a convolution to the inputs.
Args:
inputs: input data with dimensions (batch, spatial_dims..., features).
features: number of convolution filters.
kernel_size: shape of the convolutional kernel.
strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the dilation
factor to apply in each spatial dimension of `lhs`. LHS dilation is also
known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the dilation
factor to apply in each spatial dimension of `rhs`. RHS dilation is also
known as atrous convolution.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: float32).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
kaiming_scaling: Scale kernel according to Kaiming initialization scaling.
compensate_padding: Renormalize output based on introduced zero padding.
Returns:
The convolved data.
"""
inputs = jnp.asarray(inputs, dtype)
if strides is None:
strides = (1,) * (inputs.ndim - 2)
in_features = inputs.shape[-1]
assert in_features % feature_group_count == 0
input_channels = in_features // feature_group_count
kernel_shape = kernel_size + (input_channels, features)
kernel_unnorm = self.param('kernel', kernel_shape, kernel_init)
kernel_unnorm = jnp.asarray(kernel_unnorm, dtype)
# Normalize mean.
kernel = kernel_unnorm - jnp.mean(
kernel_unnorm, keepdims=True, axis=[0, 1, 2])
# Normalize stdev.
std_estimate = (
jnp.sqrt(jnp.mean(kernel**2, keepdims=True, axis=[0, 1, 2]) + 1e-5))
# Sample estimate compensation:
kernel = kernel / std_estimate
# Normalize by number of inputs:
if kaiming_scaling:
kernel = kernel / jnp.sqrt(int(input_channels * onp.prod(kernel_size)))
# pylint: disable=protected-access
dimension_numbers = nn.linear._conv_dimension_numbers(inputs.shape)
# pylint: enable=protected-access
y = lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
precision=precision)
if bias:
bias = self.param('bias', (features,), bias_init)
bias = jnp.asarray(bias, dtype)
y = y + bias
if compensate_padding:
y = padding_compensate(inputs, kernel_size, lhs_dilation, padding,
precision, rhs_dilation, strides, y)
return y
| apache-2.0 | -6,611,590,661,728,791,000 | 36.624724 | 83 | 0.648791 | false |
iandees/all-the-places | locations/spiders/whitecastle.py | 1 | 3336 | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class WhiteCastleSpider(scrapy.Spider):
name = "whitecastle"
allowed_domains = ["www.whitecastle.com"]
timeregex = re.compile('^([0-9:]+)(AM|PM)$')
start_urls = (
'https://www.whitecastle.com/api/location/search?form=%7B%22origin%22%3A%7B%22latitude%22%3A40.75368539999999%2C%22longitude%22%3A-73.9991637%7D%2C%22count%22%3A9999999%2C%22skip%22%3A0%2C%22targets%22%3A%5B%22Castle%22%5D%7D',
)
def normalize_time(self, time_str):
time, ampm = self.timeregex.search(time_str).groups()
hour, minute = time.split(':')
hour = int(hour)
if ampm == 'PM' and hour != 12:
hour = hour + 12
return '%02d:%s' % (hour, minute)
def normalize_dayrange(self, dayrange):
replacements = [
["Monday", "Mo"],
["Tuesday", "Tu"],
["Wednesday", "We"],
["Thursday", "Th"],
["Friday", "Fr"],
["Saturday", "Sa"],
["Sunday", "Su"],
["Mon", "Mo"],
["Tue", "Tu"],
["Wed", "We"],
["Thu", "Th"],
["Fri", "Fr"],
["Sat", "Sa"],
["Sun", "Su"],
[" - ", "-"]
]
for r in replacements:
dayrange = dayrange.replace(r[0], r[1])
return dayrange
def store_hours(self, timetable):
if (len(timetable) == 1):
hours = timetable[0]
if (hours['dayOfWeek'] == 'Sun - Sat') and \
('24 Hours' in hours['formattedTitles'][0]):
return '24/7'
opening_hours = ""
for time in timetable:
dayrange = time['dayOfWeek']
dayrange = self.normalize_dayrange(dayrange)
times = time['formattedTitles'][0]
if ('24 Hours' in times):
hours = "00:00-24:00"
else:
opentime, closetime = times.split(' - ')
hours = '{}-{}'.format(
self.normalize_time(opentime),
self.normalize_time(closetime),
)
opening_hours += '{dayrange} {hours}; '.format(dayrange=dayrange, hours=hours)
return opening_hours[:-2]
def parse(self, response):
data = json.loads(response.body_as_unicode())
for store in data:
properties = {
"ref": store.get('id'),
"name": store.get('name'),
"addr_full": store.get('address'),
"city": store.get('city'),
"state": store.get('state'),
"postcode": store.get('zip'),
"phone": store.get('telephone'),
}
if store.get('url'):
properties['website'] = 'https://www.whitecastle.com' + store.get('url')
if store.get('latitude'): properties['lat'] = float(store.get('latitude'))
if store.get('longitude'): properties['lon'] = float(store.get('longitude'))
if store.get('timetable'):
opening_hours = self.store_hours(store.get('timetable'))
if opening_hours: properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
| mit | -8,006,970,155,435,999,000 | 33.391753 | 235 | 0.4994 | false |
alicexpp/stock_algorithm_merge | driver.py | 1 | 11179 | #coding=utf-8
from __future__ import with_statement
import sys
from pyke import knowledge_engine
from pyke import krb_compiler
from pyke import krb_traceback
from pyke import goal
import ibm_db
import ibm_db_dbi
import datetime
import time
import recommend_area
import BaseClass
import random
import area_coordinate_trans
Area_information = {'A1C': BaseClass.AREA(), 'A2C': BaseClass.AREA(), 'A3C': BaseClass.AREA(), 'A4C': BaseClass.AREA(),
'A5C': BaseClass.AREA(), 'A6C': BaseClass.AREA(), 'A7C': BaseClass.AREA(), 'A1S': BaseClass.AREA(),
'A2S': BaseClass.AREA(), 'A3S': BaseClass.AREA(), 'A4S': BaseClass.AREA(), 'A5S': BaseClass.AREA()}
engine = knowledge_engine.engine(__file__)
# 激活事实库
engine.activate('fc_area_recommend')
# 判断库满的函数
def fc_test(coil_kind, external_diameter, width, status1=1,Flag='0'):
fc_goal = goal.compile('coil_area.move_area($coil_kind,$area,$status)')
try:
with fc_goal.prove(engine, coil_kind=coil_kind, status=status1) as gen:
for vars, plan in gen:
# 读取数据库中库区的信息
# 当前库区的最大长度
Max_Length = select_data('UACS_STOCK_INFO', vars['area'])[0]
# 当前库区的最大宽度
Max_Width = select_data('UACS_STOCK_INFO', vars['area'])[1]
# 当前库区的库容率
Current_Ratio = select_data('UACS_STOCK_INFO', vars['area'])[2]
# 计算该钢卷放入之后的库容率
Cal_Capacity= Current_Ratio + (external_diameter * width)/ (Max_Length * Max_Width)
# print "若该钢卷放入%s区域,库容率为%f"%(vars['area'],Cal_Capacity)
if Cal_Capacity < 1 and Flag=='0':
print"%s should be played in %s" % (coil_kind, vars['area'])
return vars['area']
if Cal_Capacity>=1 or Flag=='1':
if Flag=='1':
print "the saddle of %s area is full" % (vars['area'])
else:
print "the %s area is full" % (vars['area'])
status_n = status1 + 1
return fc_test(coil_kind,external_diameter,width,status1=status_n)
return "null"
except:
print "something err"
krb_traceback.print_exc()
sys.exit()
# 连接数据库
conn = ibm_db.connect("DRIVER = {IBM DB2 ODBC DRIVER}; DATABASE=UACSDB0; HOSTNAME=10.25.101.8;PORT=50000;PROTOCOL=TCPIP;UID=UACSAPP;PWD=UACSAPP;","","")
conn_ibm_dbi=ibm_db_dbi.connect("DRIVER={IBM DB2 ODBC DRIVER};DATABASE=UACSDB0;HOSTNAME=10.25.101.8;PORT=50000;PROTOCOL=TCPIP;UID=UACSAPP;PWD=UACSAPP;","","")
if conn:
print "connect db2 successed"
# 读取数据库中,每个库区的当前库容量、最大库容量
def select_data(table_name, area_name):
sql="SELECT * FROM %s WHERE STOCK_NAME='%s'"% (table_name,area_name)
stmt = ibm_db.exec_immediate(conn,sql)
row = ibm_db.fetch_assoc(stmt)
return row['MAX_LENGTH'], row['MAX_WIDTH'], row['CURRENT_RATIO']
# 更新数据库,放入一个钢卷,数据库的当前库容量加1
def update_current(table_name, area_name):
old_result=select_data(table_name,area_name)
new_current=old_result[0]+1
update_sql="UPDATE %s SET CURRENT_NO='%d' WHERE STOCK_NAME='%s'"%(table_name,new_current,area_name)
ibm_db.exec_immediate(conn,update_sql)
ibm_db.commit(conn)
return new_current
# 按行堆放的钢卷
def select_position(area, row_number, column_number, current_num):
#第i行
i=current_num/column_number+1
#第j列
j=current_num%column_number
while j==0:
i=i-1
j=4
print 'the coil should put in %s, %d 排,%d 列' % (area,i, j)
# c++调用的函数接口
def place_position(coil_information):
begin = datetime.datetime.now().microsecond
begin1=time.time()
area_name = fc_test(coil_information)
update_current('UACS_STOCK_INFORMATION_TEST', area_name)
end = datetime.datetime.now().microsecond
end1=time.time()
re=float(end-begin)
print "python程序执行时间为:%f ms" % (re/1000.0)
# 统计区域中的可放钢卷位的位数
def count_area_num(table_name, area_name):
select_sql = "SELECT COUNT(*) FROM %s WHERE PLACEMENT_STATUS='0'AND STOCK_NUM='%s'"%(table_name,area_name)
stmt = ibm_db.exec_immediate(conn, select_sql)
# row是字典形式
row = ibm_db.fetch_assoc(stmt)
#返回该区域的可放钢卷的位数
return row['1']
# 先找最小库位号的库位信息
def find_min_region(table_name, area_name):
select_sql = "SELECT MIN(REGION_NUM) FROM %s WHERE PLACEMENT_STATUS='0'AND STOCK_NUM='%s'"%(table_name,area_name)
stmt = ibm_db.exec_immediate(conn, select_sql)
# row是字典形式
row = ibm_db.fetch_assoc(stmt)
return row['1']
# 更新最小库位号的库位状态信息,把状态0改为状态1
def update_min_region(table_name, area_name):
region_num=find_min_region(table_name,area_name)
update_sql = "UPDATE %s SET PLACEMENT_STATUS ='%d' WHERE REGION_NUM='%s'" % (table_name, 1, region_num)
ibm_db.exec_immediate(conn, update_sql)
ibm_db.commit(conn)
return region_num
# 放置钢卷后,更新库区的库容率
def update_area_ratio(table_name, area_name, new_ratio):
update_ratio = "UPDATE %s SET CURRENT_RATIO = '%f' WHERE STOCK_NAME = '%s' "%(table_name, new_ratio, area_name)
ibm_db.exec_immediate(conn, update_ratio)
ibm_db.commit(conn)
return area_name
# 读取库图数据库中的钢卷占用库区状态
def read_stock_status(table_name, area_name):
list = []
sql="SELECT * FROM %s WHERE STOCK_NAME = '%s'" % (table_name,area_name)
c = conn_ibm_dbi.cursor()
c.execute(sql)
rows = c.fetchall()
return rows
# 先判断推荐库位,再根据库位推荐相应的库区的函数
def recommend_stock_position(table_name, coil_information, external_diameter, width, Flag = '0'):
area_name = fc_test(coil_information, float(external_diameter), float(width),Flag=Flag)
Max_Length = select_data(table_name, area_name)[0]
Max_Width = select_data(table_name, area_name)[1]
Current_Capacity = select_data(table_name, area_name)[2]
print "current storage_capacity is:", Current_Capacity
center_x = 1100
center_y = 1050
while float(width) / 2 > center_x:
center_x = center_x + 2200
while float(external_diameter) / 2 > center_y:
center_y = center_y + 600
print "start center_x:", center_x
print "start center_y:", center_y
# steel_information表示小区的rect,所以其坐标也是小区的
steel_information = BaseClass.RECT(llp=BaseClass.POINT(center_x - float(width) / 2,
center_y - float(external_diameter) / 2),
length=float(external_diameter),
width=float(width))
# 获取当前区域的steel_list,每个区域的steel_list不同
# 在该处应该先读取数据库中鞍座的占有情况,将其append到new_steel_list中去
# 读取的是整个库区中的鞍座坐标占用
exist_steel_lists = read_stock_status('UACS_STOCK_STATUS_TEST', area_name)
new_steel_list = []
for item in exist_steel_lists:
CENTER_X = item[1]
CENTER_Y = item[2]
# 将X_CENTER(大区坐标)转换成小区坐标
center_x_exist = area_coordinate_trans.absolute_to_relative(area_name, CENTER_X, CENTER_Y)[0]
# 将Y_CENTER(大区坐标)转换成小区坐标
center_y_exist = area_coordinate_trans.absolute_to_relative(area_name, CENTER_X, CENTER_Y)[1]
external_diameter_exist = item[4]
width_exist = item[5]
steel_exist = BaseClass.RECT(llp=BaseClass.POINT(center_x_exist-width_exist/2.,
center_y_exist-external_diameter/2.),
length = float(external_diameter_exist),
width = float(width_exist))
new_steel_list.append(steel_exist)
# recommend_area.paint_exit_rect(new_steel_list)
# recommend_area.show_all_rect(area_name,Max_Length,Max_Width)
# print "%s 库区中的钢卷个数为 %d" % (area_name,len(new_steel_list))
# print "%s 库区中现有的钢卷为:"% area_name
# print new_steel_list
# recommend_area.paint_exit_rect(new_steel_list)
# recommend_area.show_all_rect(area_name,Max_Length,Max_Width)
recommend_result = recommend_area.find_suit_pos(steel_information, new_steel_list,
Max_Length, Max_Width, area_name, Current_Capacity)
if recommend_result != False:
new_storage_capacity = recommend_result[0]
recommend_saddle_rect = recommend_result[1]
update_area_ratio('UACS_STOCK_INFO', area_name, new_storage_capacity)
print "after place coil the storage_capacity is:", new_storage_capacity
# print "the coil should put in %s area" % area_name
# 推荐的鞍座坐标
saddle_center_x = recommend_area.output_coordinate_x(recommend_saddle_rect)
saddle_center_y = recommend_area.output_coordinate_y(recommend_saddle_rect)
# 更新库区状态数据库
# print area_name,center_x,center_y,coil_information,external_diameter,width
update_stock_status="INSERT INTO UACS_STOCK_STATUS_TEST(STOCK_NAME,X_CENTER,Y_CENTER,COIL_KIND_NAME," \
"COIL_OUT_LENGTH,COIL_WIDTH) VALUES('%s','%.2f',%.2f,'%s',%d,%d)"%\
(area_name,saddle_center_x,saddle_center_y,coil_information,external_diameter,width)
ibm_db.exec_immediate(conn, update_stock_status)
return area_name
else:
# 加入Flag标志位,是为了表示当库容率小于1,但是却没有鞍座可以放置的情况,因此fc_test中也需要加入Flag作为判断
Flag = '1'
return recommend_stock_position(table_name, coil_information, external_diameter, width,Flag=Flag)
if __name__ == "__main__":
while True:
# external_diameter =raw_input("请输入钢卷外径:")
external_diameter=random.randint(1000, 1200)
print "请输入钢卷外径:", external_diameter
# width = raw_input("请输入钢卷宽度:")
width = random.randint(1300, 2000)
print "请输入钢卷宽度:", width
steel_kind_list = ["back_closed_coil","hot_closed_coil","finished_product","back_coil","hot_coil",
"2030","back_retreat_coil","hot_retreat_coil","back_return_coil"]
steel_name=random.sample(steel_kind_list,1)[0]
print "钢卷种类:",steel_name
recommend_stock_position('UACS_STOCK_INFO', steel_name, float(external_diameter),float(width))
# recommend_stock_position('UACS_STOCK_INFO', 'hot_coil', float(external_diameter), float(width))
| gpl-3.0 | -3,430,614,603,294,191,000 | 41.311203 | 158 | 0.620967 | false |
sammyshj/stem | test/integ/version.py | 1 | 2009 | """
Tests that the stem.version functions can handle the tor instance we're
running with.
"""
import unittest
import stem.prereq
import stem.version
import test.runner
from test.runner import require_controller
class TestVersion(unittest.TestCase):
def test_get_system_tor_version(self):
"""
Basic verification checks for the get_system_tor_version() function.
"""
if not stem.util.system.is_available('tor'):
test.runner.skip(self, "(tor isn't in our path)")
return
# Since tor is in our path we should expect to be able to get the version
# that way, though this might not belong to our test instance (if we're
# running against a specific tor binary).
stem.version.get_system_tor_version()
# try running against a command that exists, but isn't tor
self.assertRaises(IOError, stem.version.get_system_tor_version, 'ls')
# try running against a command that doesn't exist
self.assertRaises(IOError, stem.version.get_system_tor_version, 'blarg')
@require_controller
def test_get_system_tor_version_value(self):
"""
Checks that the get_system_tor_version() provides the same value as our
test instance provides.
"""
runner = test.runner.get_runner()
system_tor_version = stem.version.get_system_tor_version(runner.get_tor_command())
self.assertEqual(runner.get_tor_version(), system_tor_version)
@require_controller
def test_getinfo_version_parsing(self):
"""
Issues a 'GETINFO version' query to our test instance and makes sure that
we can parse it.
"""
control_socket = test.runner.get_runner().get_tor_socket()
control_socket.send('GETINFO version')
version_response = control_socket.recv()
control_socket.close()
# the getinfo response looks like...
# 250-version=0.2.3.10-alpha-dev (git-65420e4cb5edcd02)
# 250 OK
tor_version = list(version_response)[0]
tor_version = tor_version[8:].split(' ', 1)[0]
stem.version.Version(tor_version)
| lgpl-3.0 | 171,768,461,828,418,270 | 29.439394 | 86 | 0.699353 | false |
otfbot/otfbot | otfbot/plugins/ircClient/flyspray.py | 1 | 3756 | # This file is part of OtfBot.
#
# OtfBot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OtfBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OtfBot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2009 by Alexander Schier
#
"""
Provide a command to information for a bug in a Flyspray bug tracker
"""
from otfbot.lib import chatMod
from otfbot.lib.pluginSupport.decorators import callback
import urllib2
import re
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError
class versionIDExtractor(HTMLParser):
in_version_select = False
version_right = False
in_option = False
versionID = ""
version = ""
def __init__(self, version):
HTMLParser.__init__(self)
self.version = version
def handle_starttag(self, tag, attrs):
if tag == "select" and ("name", "due[]") in attrs:
self.in_version_select = True
elif self.in_version_select and tag == "option":
versionID = dict(attrs)[value]
self.in_option = True
def handle_endtag(self, tag):
if tag == "select":
self.in_version_select = False
elif tag == "option":
self.in_option = False
def handle_data(self, data):
if self.in_option:
if data == self.version:
self.versionID = data
def get_result(self):
return self.versionID
class Plugin(chatMod.chatMod):
def __init__(self, bot):
self.bot = bot
self.config = bot.root.getServiceNamed("config")
@callback
def command(self, user, channel, command, options):
if not self.config:
self.bot.sendmsg(channel, "I have no config and want to cry.")
return
if command == "flyspray" or command == "fs":
bugid = 0
try:
bugid = int(options)
except ValueError:
self.bot.sendmsg(channel, "Bug ID must be an integer")
return
url = self.config.get("url", "", "flyspray", self.bot.network, channel)
if url:
#TODO: Blocking
handle = urllib2.urlopen(url + "index.php?do=details&task_id=%d" % bugid)
if handle.geturl() == url:
self.bot.sendmsg(channel, "Invalid Bug ID")
return
elif handle.getcode() != 200:
self.bot.sendmsg(channel, "Unknown Error")
return
title = re.match(".*<title>([^<]*)</title>.*", handle.read(), re.S + re.I)
if title:
title = title.group(1)
self.bot.sendmsg(channel, title)
else:
self.bot.sendmsg(channel, "Error parsing the flyspray page")
if command == "fs-ver":
parser = versionIDExtractor(options)
try:
url = self.config.get("url", "", "flyspray", self.bot.network, channel)
if url:
parser.feed(urllib2.urlopen(url).read())
self.bot.sendmsg(channel, parser.get_result())
except HTMLParseError:
pass
| gpl-2.0 | 6,403,037,351,548,345,000 | 33.777778 | 90 | 0.575612 | false |
anomen-s/programming-challenges | rmo/programming/captcha/solve.py | 1 | 2050 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# break simple CAPTCHA
import re
import base64
import urllib.request, urllib.error, urllib.parse
import os.path
import http.cookiejar
import subprocess
from PIL import Image
from PIL import ImageDraw
URL1 = 'http://challenge01.root-me.org/programmation/ch8/'
def main():
''' download file and return it as string '''
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(opener)
inputhtml= urllib.request.urlopen(URL1).readlines()
print(cj)
imgdata = parse(inputhtml)
writedata('img.png', imgdata)
ocrfix()
password = ocrdecode()
print (password)
postdata = post_data(password)
print(postdata)
responsehtml= urllib.request.urlopen(URL1, postdata).readlines()
resultlines = list(map(lambda x: x.decode("utf-8"), responsehtml))
for r in resultlines:
print(r)
def post_data(password):
data = {}
data['cametu'] = password
pdata = urllib.parse.urlencode(data)
return bytes(pdata, 'utf-8')
def parse(data):
print ('*****************')
lines = list(map(lambda x: x.decode("utf-8"), data))
print (lines)
print ('*****************')
p1 = re.compile('base64,([^"]+)')
m1 = p1.search(lines[0])
result = m1.group(1)
decoded = base64.b64decode(result)
print(result)
return decoded
def ocrdecode():
try:
result = subprocess.check_output(['gocr','img-clean.png'])
except subprocess.CalledProcessError as e:
if e.returncode > 1:
print (e.output)
exit(e.returncode)
return False
return result.decode("utf-8").strip()
def writedata(filename, data):
with open(filename, 'wb') as f:
data = f.write(data)
def ocrfix():
BLACK = (0,0,0)
WHITE =(255,255,255)
im = Image.open('img.png')
px = im.load()
for x in range(250):
for y in range(50):
if (px[x,y] == BLACK):
px[x,y] = WHITE
im.save('img-clean.png')
if __name__ =='__main__':
main()
| gpl-2.0 | 2,905,215,987,433,998,000 | 19.5 | 78 | 0.634634 | false |
dims/neutron | neutron/services/qos/qos_plugin.py | 1 | 7479 | # Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import exceptions as n_exc
from neutron.db import api as db_api
from neutron.db import db_base_plugin_common
from neutron.extensions import qos
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import rule as rule_object
from neutron.objects.qos import rule_type as rule_type_object
from neutron.services.qos.notification_drivers import manager as driver_mgr
from neutron.services.qos import qos_consts
class QoSPlugin(qos.QoSPluginBase):
"""Implementation of the Neutron QoS Service Plugin.
This class implements a Quality of Service plugin that
provides quality of service parameters over ports and
networks.
"""
supported_extension_aliases = ['qos']
def __init__(self):
super(QoSPlugin, self).__init__()
self.notification_driver_manager = (
driver_mgr.QosServiceNotificationDriverManager())
@db_base_plugin_common.convert_result_to_dict
def create_policy(self, context, policy):
policy = policy_object.QosPolicy(context, **policy['policy'])
policy.create()
self.notification_driver_manager.create_policy(context, policy)
return policy
@db_base_plugin_common.convert_result_to_dict
def update_policy(self, context, policy_id, policy):
policy = policy_object.QosPolicy(context, **policy['policy'])
policy.id = policy_id
policy.update()
self.notification_driver_manager.update_policy(context, policy)
return policy
def delete_policy(self, context, policy_id):
policy = policy_object.QosPolicy(context)
policy.id = policy_id
self.notification_driver_manager.delete_policy(context, policy)
policy.delete()
def _get_policy_obj(self, context, policy_id):
obj = policy_object.QosPolicy.get_by_id(context, policy_id)
if obj is None:
raise n_exc.QosPolicyNotFound(policy_id=policy_id)
return obj
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy(self, context, policy_id, fields=None):
return self._get_policy_obj(context, policy_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policies(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
return policy_object.QosPolicy.get_objects(context, **filters)
#TODO(QoS): Consider adding a proxy catch-all for rules, so
# we capture the API function call, and just pass
# the rule type as a parameter removing lots of
# future code duplication when we have more rules.
@db_base_plugin_common.convert_result_to_dict
def create_policy_bandwidth_limit_rule(self, context, policy_id,
bandwidth_limit_rule):
# make sure we will have a policy object to push resource update
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
rule = rule_object.QosBandwidthLimitRule(
context, qos_policy_id=policy_id,
**bandwidth_limit_rule['bandwidth_limit_rule'])
rule.create()
policy.reload_rules()
self.notification_driver_manager.update_policy(context, policy)
return rule
@db_base_plugin_common.convert_result_to_dict
def update_policy_bandwidth_limit_rule(self, context, rule_id, policy_id,
bandwidth_limit_rule):
# make sure we will have a policy object to push resource update
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
# check if the rule belong to the policy
policy.get_rule_by_id(rule_id)
rule = rule_object.QosBandwidthLimitRule(
context, **bandwidth_limit_rule['bandwidth_limit_rule'])
rule.id = rule_id
rule.update()
policy.reload_rules()
self.notification_driver_manager.update_policy(context, policy)
return rule
def delete_policy_bandwidth_limit_rule(self, context, rule_id, policy_id):
# make sure we will have a policy object to push resource update
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
rule = policy.get_rule_by_id(rule_id)
rule.delete()
policy.reload_rules()
self.notification_driver_manager.update_policy(context, policy)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_bandwidth_limit_rule(self, context, rule_id,
policy_id, fields=None):
# make sure we have access to the policy when fetching the rule
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
self._get_policy_obj(context, policy_id)
rule = rule_object.QosBandwidthLimitRule.get_by_id(
context, rule_id)
if not rule:
raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id)
return rule
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_policy_bandwidth_limit_rules(self, context, policy_id,
filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
# make sure we have access to the policy when fetching rules
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
self._get_policy_obj(context, policy_id)
filters = filters or dict()
filters[qos_consts.QOS_POLICY_ID] = policy_id
return rule_object.QosBandwidthLimitRule.get_objects(context,
**filters)
# TODO(QoS): enforce rule types when accessing rule objects
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_rule_types(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False):
return rule_type_object.QosRuleType.get_objects(**filters)
| apache-2.0 | -729,154,042,581,872,900 | 45.166667 | 78 | 0.64407 | false |
Yelp/synapse-tools | src/synapse_tools/haproxy/qdisc_tool.py | 1 | 4938 | # -*- coding: utf-8 -*-
""" Command line interface for working with qdiscs """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import subprocess
import sys
import argparse
from synapse_tools.haproxy.qdisc_util import check_setup
from synapse_tools.haproxy.qdisc_util import clear
from synapse_tools.haproxy.qdisc_util import manage_plug
from synapse_tools.haproxy.qdisc_util import needs_setup
from synapse_tools.haproxy.qdisc_util import setup
from synapse_tools.haproxy.qdisc_util import stat
from pwd import getpwnam
log = logging.getLogger(__name__)
# We run haproxy on localhost (yocalhost share's the lo interface)
INTERFACE_NAME = 'lo'
# Traffic comes from the yocalhost IP
SOURCE_IP = '169.254.255.254'
# Log format for logging to console
CONSOLE_FORMAT = '%(asctime)s - %(name)-12s: %(levelname)-8s %(message)s'
def stat_cmd(
args: argparse.Namespace,
) -> int:
return stat(INTERFACE_NAME)
def check_setup_cmd(
args: argparse.Namespace,
) -> int:
return check_setup(INTERFACE_NAME)
def manage_plug_cmd(
args: argparse.Namespace,
) -> int:
if args.action == 'plug':
manage_plug(INTERFACE_NAME, enable_plug=True)
elif args.action == 'unplug':
manage_plug(INTERFACE_NAME, enable_plug=False)
else:
return 1
return 0
def needs_setup_cmd(
args: argparse.Namespace,
) -> int:
return needs_setup(INTERFACE_NAME)
def setup_cmd(
args: argparse.Namespace,
) -> int:
return setup(INTERFACE_NAME, SOURCE_IP)
def clear_cmd(
args: argparse.Namespace,
) -> int:
return clear(INTERFACE_NAME, SOURCE_IP)
def drop_perms() -> None:
user = getpwnam(os.environ.get('SUDO_USER', 'nobody'))
uid = user.pw_uid
gid = user.pw_gid
os.setgroups([])
os.setgid(gid)
os.setuid(uid)
def protect_call_cmd(
args: argparse.Namespace,
) -> int:
if os.getuid() != 0:
print('Only root can execute protected binaries')
return 1
try:
try:
manage_plug(INTERFACE_NAME, enable_plug=True)
except Exception:
# If we fail to plug, it is no big deal, we might
# drop some traffic but let's not fail to run the
# command
log.exception('Failed to enable plug')
subprocess.check_call(
[args.cmd] + args.args,
preexec_fn=drop_perms
)
finally:
# Netlink comms can be unreliable according to the manpage,
# so do some retries to ensure we really turn off the plug
# It would be really bad if we do not turn off the plug
for i in range(3):
try:
manage_plug(INTERFACE_NAME, enable_plug=False)
break
except Exception:
log.exception('Failed to disable plug, try #%d' % i)
return 0
def parse_options() -> argparse.Namespace:
parser = argparse.ArgumentParser(epilog=(
'Setup QoS queueing disciplines for haproxy'
))
parser.add_argument('--verbose', '-v', action='store_true')
subparsers = parser.add_subparsers()
stat_parser = subparsers.add_parser(
'stat', help='Show current qdisc and iptables setup')
stat_parser.set_defaults(func=stat_cmd)
check_parser = subparsers.add_parser(
'check', help='Check qdisc and iptables are as expected')
check_parser.set_defaults(func=check_setup_cmd)
needs_setup_parser = subparsers.add_parser(
'needs_setup', help='Check if qdisc and iptables need setup')
needs_setup_parser.set_defaults(func=needs_setup_cmd)
setup_parser = subparsers.add_parser(
'setup', help='Setup the qdisc')
setup_parser.set_defaults(func=setup_cmd)
clear_parser = subparsers.add_parser(
'clear', help='Clear the qdisc and iptables')
clear_parser.set_defaults(func=clear_cmd)
plug_parser = subparsers.add_parser(
'manage_plug', help='Manage the plug lane')
plug_parser.add_argument(
'action', choices=('plug', 'unplug'),
help='Plug or unplug traffic on the plug qdisc')
plug_parser.set_defaults(func=manage_plug_cmd)
protect_parser = subparsers.add_parser(
'protect', help='Run a command while network traffic is blocked')
protect_parser.add_argument(
dest='cmd', help='Command to run while traffic is blocked')
protect_parser.add_argument(
'args', nargs=argparse.REMAINDER)
protect_parser.set_defaults(func=protect_call_cmd)
return parser.parse_args()
def setup_logging(
args: argparse.Namespace,
) -> None:
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format=CONSOLE_FORMAT)
def main() -> None:
args = parse_options()
setup_logging(args)
sys.exit(args.func(args))
if __name__ == '__main__':
main()
| apache-2.0 | -6,004,412,169,259,848,000 | 26.281768 | 73 | 0.654516 | false |
3dfxsoftware/cbss-addons | purchase_order_discount/__init__.py | 1 | 1061 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_order_discount | gpl-2.0 | -6,035,257,839,058,129,000 | 45.173913 | 78 | 0.611687 | false |
hreeder/r2bot-eve-plugins | Reddit.py | 1 | 2752 | import praw
from collections import deque
from requests.exceptions import HTTPError
from errbot import BotPlugin, botcmd
class Reddit(BotPlugin):
def activate(self):
super(Reddit, self).activate()
self.reddit = praw.Reddit(user_agent='example')
if not self.config:
self.config['banned_subreddits'] = [u'popping', u'spacedicks']
self.shown = deque('', 50)
def get_configuration_template(self):
return {'banned_subreddits': [u'popping']}
@botcmd(split_args_with=' ')
def subreddit(self, msg, args):
subreddit = str(args[0]).lower().replace("%", "")
if unicode(subreddit.lower()) in self.config['banned_subreddits']:
return "NOPE NOT GOING ANYWHERE NEAR THAT, NERD."
try:
return self.get_from_subreddit(subreddit)
except:
return "Something Bad Happened, I wasn't able to get anything for you. Sorry!"
@botcmd
def butt(self, msg, args):
return self.get_from_subreddit("butts")
@botcmd
def boobs(self, msg, args):
return self.get_from_subreddit("boobs")
@botcmd
def cats(self, msg, args):
return self.get_from_subreddit("cats")
def get_from_subreddit(self, subreddit):
subreddit = self.reddit.get_subreddit(subreddit)
over18 = False
try:
over18 = subreddit.over18
except (HTTPError, praw.errors.InvalidSubreddit), e:
return "I was unable to find the subreddit '%s'" % subreddit.display_name
limit = 20
submissions = subreddit.get_hot(limit=limit)
shown_count = 1
for submission in submissions:
if submission.id not in self.shown and not submission.stickied:
output = "[/r/" + subreddit.display_name + "] "
if over18 or submission.over_18:
output += ":nws: NSFW | "
if submission.is_self:
output += submission.title + \
" | Comments: http://redd.it/" + submission.id
else:
output += submission.title + " | " + submission.url + \
" | Comments: http://redd.it/" + submission.id
if over18 or submission.over_18:
output += " | NSFW :nws:"
self.shown.append(submission.id)
return output
elif submission.id in self.shown:
shown_count += 1
if shown_count == limit:
return "That's enough of that subreddit for now. I've pulled all I'm going to right now. Please stop spamming <3"
return "Something Bad Happened, I wasn't able to get anything for you. Sorry!"
| mit | -6,187,730,865,565,259,000 | 31.761905 | 125 | 0.574855 | false |
gsnbng/erpnext | erpnext/hr/doctype/leave_application/test_leave_application.py | 1 | 19635 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.hr.doctype.leave_application.leave_application import LeaveDayBlockedError, OverlapError, NotAnOptionalHoliday, get_leave_balance_on
from frappe.permissions import clear_user_permissions_for_doctype
from frappe.utils import add_days, nowdate, now_datetime, getdate, add_months
from erpnext.hr.doctype.leave_type.test_leave_type import create_leave_type
from erpnext.hr.doctype.leave_allocation.test_leave_allocation import create_leave_allocation
test_dependencies = ["Leave Allocation", "Leave Block List"]
_test_records = [
{
"company": "_Test Company",
"doctype": "Leave Application",
"employee": "_T-Employee-00001",
"from_date": "2013-05-01",
"description": "_Test Reason",
"leave_type": "_Test Leave Type",
"posting_date": "2013-01-02",
"to_date": "2013-05-05"
},
{
"company": "_Test Company",
"doctype": "Leave Application",
"employee": "_T-Employee-00002",
"from_date": "2013-05-01",
"description": "_Test Reason",
"leave_type": "_Test Leave Type",
"posting_date": "2013-01-02",
"to_date": "2013-05-05"
},
{
"company": "_Test Company",
"doctype": "Leave Application",
"employee": "_T-Employee-00001",
"from_date": "2013-01-15",
"description": "_Test Reason",
"leave_type": "_Test Leave Type LWP",
"posting_date": "2013-01-02",
"to_date": "2013-01-15"
}
]
class TestLeaveApplication(unittest.TestCase):
def setUp(self):
for dt in ["Leave Application", "Leave Allocation", "Salary Slip", "Leave Ledger Entry"]:
frappe.db.sql("DELETE FROM `tab%s`" % dt) #nosec
@classmethod
def setUpClass(cls):
set_leave_approver()
def tearDown(self):
frappe.set_user("Administrator")
def _clear_roles(self):
frappe.db.sql("""delete from `tabHas Role` where parent in
("[email protected]", "[email protected]", "[email protected]")""")
def _clear_applications(self):
frappe.db.sql("""delete from `tabLeave Application`""")
def get_application(self, doc):
application = frappe.copy_doc(doc)
application.from_date = "2013-01-01"
application.to_date = "2013-01-05"
return application
def test_overwrite_attendance(self):
'''check attendance is automatically created on leave approval'''
make_allocation_record()
application = self.get_application(_test_records[0])
application.status = 'Approved'
application.from_date = '2018-01-01'
application.to_date = '2018-01-03'
application.insert()
application.submit()
attendance = frappe.get_all('Attendance', ['name', 'status', 'attendance_date'],
dict(attendance_date=('between', ['2018-01-01', '2018-01-03']), docstatus=("!=", 2)))
# attendance created for all 3 days
self.assertEqual(len(attendance), 3)
# all on leave
self.assertTrue(all([d.status == 'On Leave' for d in attendance]))
# dates
dates = [d.attendance_date for d in attendance]
for d in ('2018-01-01', '2018-01-02', '2018-01-03'):
self.assertTrue(getdate(d) in dates)
def test_block_list(self):
self._clear_roles()
from frappe.utils.user import add_role
add_role("[email protected]", "HR User")
clear_user_permissions_for_doctype("Employee")
frappe.db.set_value("Department", "_Test Department - _TC",
"leave_block_list", "_Test Leave Block List")
make_allocation_record()
application = self.get_application(_test_records[0])
application.insert()
application.status = "Approved"
self.assertRaises(LeaveDayBlockedError, application.submit)
frappe.set_user("[email protected]")
# clear other applications
frappe.db.sql("delete from `tabLeave Application`")
application = self.get_application(_test_records[0])
self.assertTrue(application.insert())
def test_overlap(self):
self._clear_roles()
self._clear_applications()
from frappe.utils.user import add_role
add_role("[email protected]", "Employee")
frappe.set_user("[email protected]")
make_allocation_record()
application = self.get_application(_test_records[0])
application.insert()
application = self.get_application(_test_records[0])
self.assertRaises(OverlapError, application.insert)
def test_overlap_with_half_day_1(self):
self._clear_roles()
self._clear_applications()
from frappe.utils.user import add_role
add_role("[email protected]", "Employee")
frappe.set_user("[email protected]")
make_allocation_record()
# leave from 1-5, half day on 3rd
application = self.get_application(_test_records[0])
application.half_day = 1
application.half_day_date = "2013-01-03"
application.insert()
# Apply again for a half day leave on 3rd
application = self.get_application(_test_records[0])
application.from_date = "2013-01-03"
application.to_date = "2013-01-03"
application.half_day = 1
application.half_day_date = "2013-01-03"
application.insert()
# Apply again for a half day leave on 3rd
application = self.get_application(_test_records[0])
application.from_date = "2013-01-03"
application.to_date = "2013-01-03"
application.half_day = 1
application.half_day_date = "2013-01-03"
self.assertRaises(OverlapError, application.insert)
def test_overlap_with_half_day_2(self):
self._clear_roles()
self._clear_applications()
from frappe.utils.user import add_role
add_role("[email protected]", "Employee")
frappe.set_user("[email protected]")
make_allocation_record()
# leave from 1-5, no half day
application = self.get_application(_test_records[0])
application.insert()
# Apply again for a half day leave on 1st
application = self.get_application(_test_records[0])
application.half_day = 1
application.half_day_date = application.from_date
self.assertRaises(OverlapError, application.insert)
def test_overlap_with_half_day_3(self):
self._clear_roles()
self._clear_applications()
from frappe.utils.user import add_role
add_role("[email protected]", "Employee")
frappe.set_user("[email protected]")
make_allocation_record()
# leave from 1-5, half day on 5th
application = self.get_application(_test_records[0])
application.half_day = 1
application.half_day_date = "2013-01-05"
application.insert()
# Apply leave from 4-7, half day on 5th
application = self.get_application(_test_records[0])
application.from_date = "2013-01-04"
application.to_date = "2013-01-07"
application.half_day = 1
application.half_day_date = "2013-01-05"
self.assertRaises(OverlapError, application.insert)
# Apply leave from 5-7, half day on 5th
application = self.get_application(_test_records[0])
application.from_date = "2013-01-05"
application.to_date = "2013-01-07"
application.half_day = 1
application.half_day_date = "2013-01-05"
application.insert()
def test_optional_leave(self):
leave_period = get_leave_period()
today = nowdate()
from datetime import date
holiday_list = 'Test Holiday List for Optional Holiday'
if not frappe.db.exists('Holiday List', holiday_list):
frappe.get_doc(dict(
doctype = 'Holiday List',
holiday_list_name = holiday_list,
from_date = add_months(today, -6),
to_date = add_months(today, 6),
holidays = [
dict(holiday_date = today, description = 'Test')
]
)).insert()
employee = get_employee()
frappe.db.set_value('Leave Period', leave_period.name, 'optional_holiday_list', holiday_list)
leave_type = 'Test Optional Type'
if not frappe.db.exists('Leave Type', leave_type):
frappe.get_doc(dict(
leave_type_name = leave_type,
doctype = 'Leave Type',
is_optional_leave = 1
)).insert()
allocate_leaves(employee, leave_period, leave_type, 10)
date = add_days(today, - 1)
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
company = '_Test Company',
description = "_Test Reason",
leave_type = leave_type,
from_date = date,
to_date = date,
))
# can only apply on optional holidays
self.assertRaises(NotAnOptionalHoliday, leave_application.insert)
leave_application.from_date = today
leave_application.to_date = today
leave_application.status = "Approved"
leave_application.insert()
leave_application.submit()
# check leave balance is reduced
self.assertEqual(get_leave_balance_on(employee.name, leave_type, today), 9)
def test_leaves_allowed(self):
employee = get_employee()
leave_period = get_leave_period()
frappe.delete_doc_if_exists("Leave Type", "Test Leave Type", force=1)
leave_type = frappe.get_doc(dict(
leave_type_name = 'Test Leave Type',
doctype = 'Leave Type',
max_leaves_allowed = 5
)).insert()
date = add_days(nowdate(), -7)
allocate_leaves(employee, leave_period, leave_type.name, 5)
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type.name,
description = "_Test Reason",
from_date = date,
to_date = add_days(date, 2),
company = "_Test Company",
docstatus = 1,
status = "Approved"
))
leave_application.submit()
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type.name,
description = "_Test Reason",
from_date = add_days(date, 4),
to_date = add_days(date, 8),
company = "_Test Company",
docstatus = 1,
status = "Approved"
))
self.assertRaises(frappe.ValidationError, leave_application.insert)
def test_applicable_after(self):
employee = get_employee()
leave_period = get_leave_period()
frappe.delete_doc_if_exists("Leave Type", "Test Leave Type", force=1)
leave_type = frappe.get_doc(dict(
leave_type_name = 'Test Leave Type',
doctype = 'Leave Type',
applicable_after = 15
)).insert()
date = add_days(nowdate(), -7)
frappe.db.set_value('Employee', employee.name, "date_of_joining", date)
allocate_leaves(employee, leave_period, leave_type.name, 10)
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type.name,
description = "_Test Reason",
from_date = date,
to_date = add_days(date, 4),
company = "_Test Company",
docstatus = 1,
status = "Approved"
))
self.assertRaises(frappe.ValidationError, leave_application.insert)
frappe.delete_doc_if_exists("Leave Type", "Test Leave Type 1", force=1)
leave_type_1 = frappe.get_doc(dict(
leave_type_name = 'Test Leave Type 1',
doctype = 'Leave Type'
)).insert()
allocate_leaves(employee, leave_period, leave_type_1.name, 10)
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type_1.name,
description = "_Test Reason",
from_date = date,
to_date = add_days(date, 4),
company = "_Test Company",
docstatus = 1,
status = "Approved"
))
self.assertTrue(leave_application.insert())
frappe.db.set_value('Employee', employee.name, "date_of_joining", "2010-01-01")
def test_max_continuous_leaves(self):
employee = get_employee()
leave_period = get_leave_period()
frappe.delete_doc_if_exists("Leave Type", "Test Leave Type", force=1)
leave_type = frappe.get_doc(dict(
leave_type_name = 'Test Leave Type',
doctype = 'Leave Type',
max_leaves_allowed = 15,
max_continuous_days_allowed = 3
)).insert()
date = add_days(nowdate(), -7)
allocate_leaves(employee, leave_period, leave_type.name, 10)
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type.name,
description = "_Test Reason",
from_date = date,
to_date = add_days(date, 4),
company = "_Test Company",
docstatus = 1,
status = "Approved"
))
self.assertRaises(frappe.ValidationError, leave_application.insert)
def test_leave_balance_near_allocaton_expiry(self):
employee = get_employee()
leave_type = create_leave_type(
leave_type_name="_Test_CF_leave_expiry",
is_carry_forward=1,
expire_carry_forwarded_leaves_after_days=90)
leave_type.submit()
create_carry_forwarded_allocation(employee, leave_type)
self.assertEqual(get_leave_balance_on(employee.name, leave_type.name, nowdate(), add_days(nowdate(), 8)), 21)
def test_earned_leaves_creation(self):
leave_period = get_leave_period()
employee = get_employee()
leave_type = 'Test Earned Leave Type'
if not frappe.db.exists('Leave Type', leave_type):
frappe.get_doc(dict(
leave_type_name = leave_type,
doctype = 'Leave Type',
is_earned_leave = 1,
earned_leave_frequency = 'Monthly',
rounding = 0.5,
max_leaves_allowed = 6
)).insert()
leave_policy = frappe.get_doc({
"doctype": "Leave Policy",
"leave_policy_details": [{"leave_type": leave_type, "annual_allocation": 6}]
}).insert()
frappe.db.set_value("Employee", employee.name, "leave_policy", leave_policy.name)
allocate_leaves(employee, leave_period, leave_type, 0, eligible_leaves = 12)
from erpnext.hr.utils import allocate_earned_leaves
i = 0
while(i<14):
allocate_earned_leaves()
i += 1
self.assertEqual(get_leave_balance_on(employee.name, leave_type, nowdate()), 6)
# validate earned leaves creation without maximum leaves
frappe.db.set_value('Leave Type', leave_type, 'max_leaves_allowed', 0)
i = 0
while(i<6):
allocate_earned_leaves()
i += 1
self.assertEqual(get_leave_balance_on(employee.name, leave_type, nowdate()), 9)
# test to not consider current leave in leave balance while submitting
def test_current_leave_on_submit(self):
employee = get_employee()
leave_type = 'Sick leave'
allocation = frappe.get_doc(dict(
doctype = 'Leave Allocation',
employee = employee.name,
leave_type = leave_type,
from_date = '2018-10-01',
to_date = '2018-10-10',
new_leaves_allocated = 1
))
allocation.insert(ignore_permissions=True)
allocation.submit()
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type,
description = "_Test Reason",
from_date = '2018-10-02',
to_date = '2018-10-02',
company = '_Test Company',
status = 'Approved',
leave_approver = '[email protected]'
))
self.assertTrue(leave_application.insert())
leave_application.submit()
self.assertEqual(leave_application.docstatus, 1)
def test_creation_of_leave_ledger_entry_on_submit(self):
employee = get_employee()
leave_type = create_leave_type(leave_type_name = 'Test Leave Type 1')
leave_type.save()
leave_allocation = create_leave_allocation(employee=employee.name, employee_name=employee.employee_name,
leave_type=leave_type.name)
leave_allocation.submit()
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type.name,
from_date = add_days(nowdate(), 1),
to_date = add_days(nowdate(), 4),
description = "_Test Reason",
company = "_Test Company",
docstatus = 1,
status = "Approved"
))
leave_application.submit()
leave_ledger_entry = frappe.get_all('Leave Ledger Entry', fields='*', filters=dict(transaction_name=leave_application.name))
self.assertEquals(leave_ledger_entry[0].employee, leave_application.employee)
self.assertEquals(leave_ledger_entry[0].leave_type, leave_application.leave_type)
self.assertEquals(leave_ledger_entry[0].leaves, leave_application.total_leave_days * -1)
# check if leave ledger entry is deleted on cancellation
leave_application.cancel()
self.assertFalse(frappe.db.exists("Leave Ledger Entry", {'transaction_name':leave_application.name}))
def test_ledger_entry_creation_on_intermediate_allocation_expiry(self):
employee = get_employee()
leave_type = create_leave_type(
leave_type_name="_Test_CF_leave_expiry",
is_carry_forward=1,
expire_carry_forwarded_leaves_after_days=90)
leave_type.submit()
create_carry_forwarded_allocation(employee, leave_type)
leave_application = frappe.get_doc(dict(
doctype = 'Leave Application',
employee = employee.name,
leave_type = leave_type.name,
from_date = add_days(nowdate(), -3),
to_date = add_days(nowdate(), 7),
description = "_Test Reason",
company = "_Test Company",
docstatus = 1,
status = "Approved"
))
leave_application.submit()
leave_ledger_entry = frappe.get_all('Leave Ledger Entry', '*', filters=dict(transaction_name=leave_application.name))
self.assertEquals(len(leave_ledger_entry), 2)
self.assertEquals(leave_ledger_entry[0].employee, leave_application.employee)
self.assertEquals(leave_ledger_entry[0].leave_type, leave_application.leave_type)
self.assertEquals(leave_ledger_entry[0].leaves, -9)
self.assertEquals(leave_ledger_entry[1].leaves, -2)
def test_leave_application_creation_after_expiry(self):
# test leave balance for carry forwarded allocation
employee = get_employee()
leave_type = create_leave_type(
leave_type_name="_Test_CF_leave_expiry",
is_carry_forward=1,
expire_carry_forwarded_leaves_after_days=90)
leave_type.submit()
create_carry_forwarded_allocation(employee, leave_type)
self.assertEquals(get_leave_balance_on(employee.name, leave_type.name, add_days(nowdate(), -85), add_days(nowdate(), -84)), 0)
def create_carry_forwarded_allocation(employee, leave_type):
# initial leave allocation
leave_allocation = create_leave_allocation(
leave_type="_Test_CF_leave_expiry",
employee=employee.name,
employee_name=employee.employee_name,
from_date=add_months(nowdate(), -24),
to_date=add_months(nowdate(), -12),
carry_forward=0)
leave_allocation.submit()
leave_allocation = create_leave_allocation(
leave_type="_Test_CF_leave_expiry",
employee=employee.name,
employee_name=employee.employee_name,
from_date=add_days(nowdate(), -84),
to_date=add_days(nowdate(), 100),
carry_forward=1)
leave_allocation.submit()
def make_allocation_record(employee=None, leave_type=None):
allocation = frappe.get_doc({
"doctype": "Leave Allocation",
"employee": employee or "_T-Employee-00001",
"leave_type": leave_type or "_Test Leave Type",
"from_date": "2013-01-01",
"to_date": "2019-12-31",
"new_leaves_allocated": 30
})
allocation.insert(ignore_permissions=True)
allocation.submit()
def get_employee():
return frappe.get_doc("Employee", "_T-Employee-00001")
def set_leave_approver():
employee = get_employee()
dept_doc = frappe.get_doc("Department", employee.department)
dept_doc.append('leave_approvers', {
'approver': '[email protected]'
})
dept_doc.save(ignore_permissions=True)
def get_leave_period():
leave_period_name = frappe.db.exists({
"doctype": "Leave Period",
"company": "_Test Company"
})
if leave_period_name:
return frappe.get_doc("Leave Period", leave_period_name[0][0])
else:
return frappe.get_doc(dict(
name = 'Test Leave Period',
doctype = 'Leave Period',
from_date = add_months(nowdate(), -6),
to_date = add_months(nowdate(), 6),
company = "_Test Company",
is_active = 1
)).insert()
def allocate_leaves(employee, leave_period, leave_type, new_leaves_allocated, eligible_leaves=0):
allocate_leave = frappe.get_doc({
"doctype": "Leave Allocation",
"__islocal": 1,
"employee": employee.name,
"employee_name": employee.employee_name,
"leave_type": leave_type,
"from_date": leave_period.from_date,
"to_date": leave_period.to_date,
"new_leaves_allocated": new_leaves_allocated,
"docstatus": 1
}).insert()
allocate_leave.submit() | agpl-3.0 | -2,843,470,968,118,408,000 | 30.317384 | 145 | 0.695493 | false |
mardiros/creds | creds/views/user.py | 1 | 3471 | import asyncio
import logging
import colander
from pyramid.httpexceptions import HTTPNoContent, HTTPNotFound, HTTPBadRequest
from pyramid_aiorest import resource_config, ioschema
from aiorm import orm
from ..models import User, UserGroup, Group
log = logging.getLogger(__name__)
class PostUserParams(colander.MappingSchema):
username = colander.SchemaNode(colander.String(), location='json')
email = colander.SchemaNode(colander.String(), location='json')
password = colander.SchemaNode(colander.String(), location='json')
@colander.instantiate(name='groups', missing=colander.drop,
location='json')
class Groups(colander.SequenceSchema):
_ = colander.SchemaNode(colander.String())
class PostUserReturn(colander.MappingSchema):
location = colander.SchemaNode(colander.String(), location='header')
status_code = colander.SchemaNode(colander.Integer(),
location='status_code',
default=HTTPNoContent.code)
class GetUserParams(colander.MappingSchema):
username = colander.SchemaNode(colander.String(), location='matchdict')
class GetUserReturn(colander.MappingSchema):
username = colander.SchemaNode(colander.String(), location='json')
email = colander.SchemaNode(colander.String(), location='json')
status = colander.SchemaNode(colander.String(), location='json')
@colander.instantiate(name='groups', location='json')
class Groups(colander.SequenceSchema):
_ = colander.SchemaNode(colander.String())
@resource_config(resource_name='user')
class UserResource:
@asyncio.coroutine
@ioschema(request_schema=PostUserParams(),
response_schema=PostUserReturn())
def collection_post(self, request):
params = request.yards
transaction = request.transaction['creds']
user = yield from User.by_username(transaction,
request.yards['username'])
if user:
raise HTTPBadRequest(explanation='Duplicate username')
user = User(username=params['username'], email=params['email'])
user.password = params['password']
yield from orm.Insert(user).run(transaction)
for group_name in params.get('groups', []):
group = yield from Group.by_name(transaction, group_name)
if not group:
group = Group(name=group_name)
yield from orm.Insert(group).run(transaction)
yield from orm.Insert(UserGroup(user_id=user.id,
group_id=group.id)
).run(transaction)
log.info('User {username} created with id {user_id}'
''.format(username=user.username,
user_id=user.id))
return {'location': request.route_path('resource_user',
username=user.username)}
@asyncio.coroutine
@ioschema(request_schema=GetUserParams(),
response_schema=GetUserReturn())
def get(self, request):
user = yield from User.by_username(request.transaction['creds'],
request.yards['username'])
if not user:
raise HTTPNotFound()
userdict = user.to_dict()
userdict['groups'] = [group.name for group in (yield from user.groups)]
return userdict
| bsd-3-clause | -6,032,499,691,041,492,000 | 38 | 79 | 0.624604 | false |
madumlao/oxAuth | Server/integrations.deprecatred/oneid/OneIdExternalAuthenticator.py | 1 | 9545 | # oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
import json
from java.util import Arrays
from oneid import OneID
from org.apache.http.entity import ContentType
from org.xdi.model.custom.script.type.auth import PersonAuthenticationType
from org.xdi.oxauth.security import Identity
from org.xdi.oxauth.service import UserService, AuthenticationService
from org.xdi.oxauth.service.net import HttpService
from org.xdi.service.cdi.util import CdiUtil
from org.xdi.util import ArrayHelper
from org.xdi.util import StringHelper
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "OneId. Initialization"
print "OneId. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "OneId. Destroy"
print "OneId. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
identity = CdiUtil.bean(Identity)
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
httpService = CdiUtil.bean(HttpService)
server_flag = configurationAttributes.get("oneid_server_flag").getValue2()
callback_attrs = configurationAttributes.get("oneid_callback_attrs").getValue2()
creds_file = configurationAttributes.get("oneid_creds_file").getValue2()
# Create OneID
authn = OneID(server_flag)
# Set path to credentials file
authn.creds_file = creds_file
if (step == 1):
print "OneId. Authenticate for step 1"
# Find OneID request
json_data_array = requestParameters.get("json_data")
if ArrayHelper.isEmpty(json_data_array):
print "OneId. Authenticate for step 1. json_data is empty"
return False
request = json_data_array[0]
print "OneId. Authenticate for step 1. request: " + request
if (StringHelper.isEmptyString(request)):
return False
authn.set_credentials()
# Validate request
http_client = httpService.getHttpsClientDefaulTrustStore()
auth_data = httpService.encodeBase64(authn.api_id + ":" + authn.api_key)
http_response = httpService.executePost(http_client, authn.helper_server + "/validate", auth_data, request, ContentType.APPLICATION_JSON)
validation_content = httpService.convertEntityToString(httpService.getResponseContent(http_response))
print "OneId. Authenticate for step 1. validation_content: " + validation_content
if (StringHelper.isEmptyString(validation_content)):
return False
validation_resp = json.loads(validation_content)
print "OneId. Authenticate for step 1. validation_resp: " + str(validation_resp)
if (not authn.success(validation_resp)):
return False
response = json.loads(request)
for x in validation_resp:
response[x] = validation_resp[x]
oneid_user_uid = response['uid']
print "OneId. Authenticate for step 1. oneid_user_uid: " + oneid_user_uid
# Check if the is user with specified oneid_user_uid
find_user_by_uid = userService.getUserByAttribute("oxExternalUid", "oneid:" + oneid_user_uid)
if (find_user_by_uid == None):
print "OneId. Authenticate for step 1. Failed to find user"
print "OneId. Authenticate for step 1. Setting count steps to 2"
identity.setWorkingParameter("oneid_count_login_steps", 2)
identity.setWorkingParameter("oneid_user_uid", oneid_user_uid)
return True
found_user_name = find_user_by_uid.getUserId()
print "OneId. Authenticate for step 1. found_user_name: " + found_user_name
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
credentials.setUsername(found_user_name)
credentials.setUser(find_user_by_uid)
print "OneId. Authenticate for step 1. Setting count steps to 1"
identity.setWorkingParameter("oneid_count_login_steps", 1)
return True
elif (step == 2):
print "OneId. Authenticate for step 2"
sessionAttributes = identity.getSessionId().getSessionAttributes()
if (sessionAttributes == None) or not sessionAttributes.containsKey("oneid_user_uid"):
print "OneId. Authenticate for step 2. oneid_user_uid is empty"
return False
oneid_user_uid = sessionAttributes.get("oneid_user_uid")
passed_step1 = StringHelper.isNotEmptyString(oneid_user_uid)
if (not passed_step1):
return False
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
passed_step1 = StringHelper.isNotEmptyString(user_name)
if (not passed_step1):
return False
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
logged_in = authenticationService.authenticate(user_name, user_password)
if (not logged_in):
return False
# Check if there is user which has oneid_user_uid
# Avoid mapping OneID account to more than one IDP account
find_user_by_uid = userService.getUserByAttribute("oxExternalUid", "oneid:" + oneid_user_uid)
if (find_user_by_uid == None):
# Add oneid_user_uid to user one id UIDs
find_user_by_uid = userService.addUserAttribute(user_name, "oxExternalUid", "oneid:" + oneid_user_uid)
if (find_user_by_uid == None):
print "OneId. Authenticate for step 2. Failed to update current user"
return False
return True
else:
found_user_name = find_user_by_uid.getUserId()
print "OneId. Authenticate for step 2. found_user_name: " + found_user_name
if StringHelper.equals(user_name, found_user_name):
return True
return False
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
identity = CdiUtil.bean(Identity)
authenticationService = CdiUtil.bean(AuthenticationService)
server_flag = configurationAttributes.get("oneid_server_flag").getValue2()
callback_attrs = configurationAttributes.get("oneid_callback_attrs").getValue2()
creds_file = configurationAttributes.get("oneid_creds_file").getValue2()
# Create OneID
authn = OneID(server_flag)
# Set path to credentials file
authn.creds_file = creds_file
if (step == 1):
print "OneId. Prepare for step 1"
facesContext = CdiUtil.bean(FacesContext)
request = facesContext.getExternalContext().getRequest()
validation_page = request.getContextPath() + "/postlogin?" + "request_uri=&" + authenticationService.parametersAsString()
print "OneId. Prepare for step 1. validation_page: " + validation_page
oneid_login_button = authn.draw_signin_button(validation_page, callback_attrs, True)
print "OneId. Prepare for step 1. oneid_login_button: " + oneid_login_button
identity.setWorkingParameter("oneid_login_button", oneid_login_button)
identity.setWorkingParameter("oneid_script_header", authn.script_header)
identity.setWorkingParameter("oneid_form_script", authn.oneid_form_script)
return True
elif (step == 2):
print "OneId. Prepare for step 2"
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
if (step == 2):
return Arrays.asList("oneid_user_uid")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
identity = CdiUtil.bean(Identity)
if (identity.isSetWorkingParameter("oneid_count_login_steps")):
return identity.getWorkingParameter("oneid_count_login_steps")
return 2
def getPageForStep(self, configurationAttributes, step):
if (step == 1):
return "/auth/oneid/oneidlogin.xhtml"
return "/auth/oneid/oneidpostlogin.xhtml"
def logout(self, configurationAttributes, requestParameters):
return True
| mit | 6,092,213,488,643,009,000 | 39.444915 | 149 | 0.636459 | false |
klausman/lib_users | fd_users.py | 1 | 4464 | #!/usr/bin/python -tt
"""
Libusers - a script that finds users of files that have been deleted/replaced
"""
# Released under the GPL-2
# -*- coding: utf8 -*-
import argparse
import sys
import glob
import fnmatch
import os
from collections import defaultdict
from lib_users_util import common
DELSUFFIX = " (deleted)"
PERMWARNING = """Warning: Some files could not be read."""
PERMWARNINGUID0 = """\
Warning: Some files could not be read. Note that fd_users has to be run as
root to get a full list of deleted in-use libraries.\n"""
__version__ = "0.14"
def get_deleted_files(fddir, ign_patterns, ign_literals):
"""
Get list of deleted files listed in fddir.
Args:
fddir: name of the the FD infor directory, typically something like
/proc/12345/fd/
ign_patterns: List of globs for files to ignore
ign_literals: List of fixed strings to ignore
Returns:
List of deleted files.
"""
deletedfds = []
literals = set(ign_literals)
allfds = glob.glob(os.path.join(fddir, "*"))
for onefd in allfds:
# We can't use os.path.exists() since that simply does not work
# correctly on /proc files (broken links look like working ones).
target = os.readlink(onefd)
if target.endswith(DELSUFFIX):
actual_target = target[:-len(DELSUFFIX)]
if actual_target in literals:
continue
if match_any(actual_target, ign_patterns):
continue
deletedfds.append(actual_target)
return deletedfds
def match_any(name, patterns):
"""Return if name matches any of the patterns (globs)"""
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
return True
return False
def main(argv):
"""Main program"""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version',
version='%%(prog)s %s' % (__version__))
parser.add_argument("-m", "--machine-readable", action="store_true",
help="Output machine readable info")
parser.add_argument("-s", "--showfiles", action="store_true",
help="In human readable mode, show deleted files")
parser.add_argument("-S", "--services", action="store_true",
help="Try to find systemd services for lib users")
parser.add_argument("-i", "--ignore-pattern", default=[],
metavar="GLOB", action='append',
help="Ignore deleted files matching %(metavar)s. "
"Can be specified multiple times.")
parser.add_argument("-I", "--ignore-literal", default=[],
metavar="LITERAL", action='append',
help="Ignore deleted files named %(metavar)s. "
"Can be specified multiple times.")
options = parser.parse_args(argv)
options.showitems = options.showfiles
users = defaultdict(lambda: (set(), set()))
read_failure = False
for fddir in glob.glob(common.FDPROCFSPAT):
if (fddir.startswith("/proc/self/fd") or
fddir.startswith("/proc/thread-self/fd") or
fddir.startswith("/proc/%s/fd" % (os.getpid()))):
continue
try:
pid = os.path.normpath(fddir).split("/")[2]
except IndexError:
# This happens if the filenames look different
# than we expect (e.g. the user changed common.FDPROCFSPAT)
pid = "unknown"
try:
deletedfiles = get_deleted_files(fddir, options.ignore_pattern,
options.ignore_literal)
except IOError:
read_failure = True
continue
if deletedfiles:
argv = common.get_progargs(pid)
if not argv:
continue
users[argv][0].add(pid)
users[argv][1].update(deletedfiles)
if read_failure:
if os.geteuid() == 0:
sys.stderr.write(PERMWARNING)
else:
sys.stderr.write(PERMWARNINGUID0)
if len(users) > 0:
if options.machine_readable:
print(common.fmt_machine(users))
else:
print(common.fmt_human(users, options))
if options.services:
print()
print(common.get_services(users))
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-2.0 | -1,691,117,874,858,462,500 | 32.818182 | 77 | 0.578629 | false |
dreamlayers/st220x | libst2205/st2205.py | 1 | 2242 | from ctypes import *
from PIL import Image
# Handle definition for the st2205_* routines
class st2205_handle(Structure):
_fields_ = [("fd", c_int),
("width", c_uint),
("height", c_uint),
("bpp", c_int),
("proto", c_int),
("buff", POINTER(c_char)),
("oldpix", POINTER(c_ubyte)),
("offx", c_int),
("offy", c_int)
]
l = cdll.LoadLibrary("libst2205.so.2")
st2205_open = CFUNCTYPE(POINTER(st2205_handle), c_char_p) \
(("st2205_open", l))
st2205_close = CFUNCTYPE(None, POINTER(st2205_handle)) \
(("st2205_close", l))
st2205_send_data = CFUNCTYPE(None, POINTER(st2205_handle), POINTER(c_ubyte)) \
(("st2205_send_data", l))
st2205_send_partial = CFUNCTYPE(None, POINTER(st2205_handle), POINTER(c_ubyte),
c_int, c_int, c_int, c_int) \
(("st2205_send_partial", l))
st2205_backlight = CFUNCTYPE(None, POINTER(st2205_handle), c_int) \
(("st2205_backlight", l))
st2205_lcd_sleep = CFUNCTYPE(None, POINTER(st2205_handle), c_int) \
(("st2205_lcd_sleep", l))
class ST2205:
def __init__(self, dev = '/dev/disk/by-id/usb-SITRONIX_MULTIMEDIA-0:0'):
self.h = st2205_open(dev)
self.i = None
assert self.h
def close(self):
st2205_close(self.h)
self.h = None
def backlight(self, on):
st2205_backlight(self.h, 1 if on else 0)
def lcd_sleep(self, sleep):
st2205_lcd_sleep(self.h, 1 if sleep else 0)
def get_image(self):
if self.i is None:
self.i = Image.new("RGB",
(self.h.contents.width, self.h.contents.height),
"black")
return self.i
def update(self):
st2205_send_data(self.h, cast(c_char_p(self.i.tobytes()),
POINTER(c_ubyte)))
def update_part(self, xs, ys, xe, ye):
st2205_send_partial(self.h, cast(c_char_p(self.i.tobytes()),
POINTER(c_ubyte)),
xs, ys, xe, ye)
| gpl-3.0 | 3,926,800,652,122,235,400 | 31.970588 | 79 | 0.501338 | false |
eroicaleo/LearningPython | mitx6001/Pset4/ProblemSet4/ps4b/ps4b.py | 1 | 5917 | #!/usr/bin/env python
from ps4a import *
import time
#
#
# Problem #6: Computer chooses a word
#
#
def compChooseWord(hand, wordList, n):
"""
Given a hand and a wordList, find the word that gives
the maximum value score, and return it.
This word should be calculated by considering all the words
in the wordList.
If no words in the wordList can be made from the hand, return None.
hand: dictionary (string -> int)
wordList: list (string)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: string or None
"""
maxScore = 0
maxWord = None
for word in wordList:
newHand = dict(hand)
for c in word:
if c in newHand and newHand[c] > 0:
newHand[c] = newHand[c] - 1
else:
break
else:
s = getWordScore(word, n)
if s > maxScore:
maxScore = s
maxWord = word
return maxWord
# BEGIN PSEUDOCODE <-- Remove this comment when you code this function; do your coding within the pseudocode (leaving those comments in-place!)
# Create a new variable to store the maximum score seen so far (initially 0)
# Create a new variable to store the best word seen so far (initially None)
# For each word in the wordList
# If you can construct the word from your hand
# (hint: you can use isValidWord, or - since you don't really need to test if the word is in the wordList - you can make a similar function that omits that test)
# Find out how much making that word is worth
# If the score for that word is higher than your best score
# Update your best score, and best word accordingly
# return the best word you found.
#
# Problem #7: Computer plays a hand
#
def compPlayHand(hand, wordList, n):
"""
Allows the computer to play the given hand, following the same procedure
as playHand, except instead of the user choosing a word, the computer
chooses it.
1) The hand is displayed.
2) The computer chooses a word.
3) After every valid word: the word and the score for that word is
displayed, the remaining letters in the hand are displayed, and the
computer chooses another word.
4) The sum of the word scores is displayed when the hand finishes.
5) The hand finishes when the computer has exhausted its possible
choices (i.e. compChooseWord returns None).
hand: dictionary (string -> int)
wordList: list (string)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
score = 0
while True:
if calculateHandlen(hand) == 0:
print 'Total score: %d points.\n' % score
break
print 'Current hand: ',
displayHand(hand)
guess = compChooseWord(hand, wordList, n)
if guess == None:
print 'Total score: %d points.\n' % score
break
s = getWordScore(guess, n)
score += s
print '"%s" earned %d points. Total: %d points' % (guess, s, score)
hand = updateHand(hand, guess)
#
# Problem #8: Playing a game
#
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'e', immediately exit the game.
* If the user inputs anything that's not 'n', 'r', or 'e', keep asking them again.
2) Asks the user to input a 'u' or a 'c'.
* If the user inputs anything that's not 'c' or 'u', keep asking them again.
3) Switch functionality based on the above choices:
* If the user inputted 'n', play a new (random) hand.
* Else, if the user inputted 'r', play the last hand again.
* If the user inputted 'u', let the user play the game
with the selected hand, using playHand.
* If the user inputted 'c', let the computer play the
game with the selected hand, using compPlayHand.
4) After the computer or user has played the hand, repeat from step 1
wordList: list (string)
"""
# TO DO... <-- Remove this comment when you code this function
n = HAND_SIZE
hand = dict()
while True:
print 'Enter n to deal a new hand, r to replay the last hand, or e to end game: ',
userInput = raw_input()
if userInput in ['n', 'r']:
hand = dealHand(n) if userInput == 'n' else hand
if len(hand) == 0:
print 'You have not played a hand yet. Please play a new hand first!\n'
continue
while True:
print 'Enter u to have yourself play, c to have the computer play: ',
userInput = raw_input()
if userInput == 'u':
playHand(hand, wordList, n)
break
elif userInput == 'c':
compPlayHand(hand, wordList, n)
break
else:
print 'Invalid command.\n'
elif userInput == 'e':
break
else:
print 'Invalid command.'
# wordList = loadWords()
# print compChooseWord({'a': 1, 'p': 2, 's': 1, 'e': 1, 'l': 1}, wordList, 6)
# print compChooseWord({'a': 2, 'c': 1, 'b': 1, 't': 1}, wordList, 5)
# print compChooseWord({'a': 2, 'e': 2, 'i': 2, 'm': 2, 'n': 2, 't': 2}, wordList, 12)
# print compChooseWord({'x': 2, 'z': 2, 'q': 2, 'n': 2, 't': 2}, wordList, 12)
# compPlayHand({'a': 1, 'p': 2, 's': 1, 'e': 1, 'l': 1}, wordList, 6)
# compPlayHand({'a': 2, 'c': 1, 'b': 1, 't': 1}, wordList, 5)
# compPlayHand({'a': 2, 'e': 2, 'i': 2, 'm': 2, 'n': 2, 't': 2}, wordList, 12)
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
| mit | 5,405,827,712,816,336,000 | 31.690608 | 169 | 0.584249 | false |
ryfeus/lambda-packs | pytorch/source/torch/backends/cudnn/rnn.py | 1 | 1683 | import torch.cuda
import torch.backends.cudnn as cudnn
def get_cudnn_mode(mode):
if mode == 'RNN_RELU':
return cudnn.CUDNN_RNN_RELU
elif mode == 'RNN_TANH':
return cudnn.CUDNN_RNN_TANH
elif mode == 'LSTM':
return cudnn.CUDNN_LSTM
elif mode == 'GRU':
return cudnn.CUDNN_GRU
else:
raise Exception("Unknown mode: {}".format(mode))
# NB: We don't actually need this class anymore (in fact, we could serialize the
# dropout state for even better reproducibility), but it is kept for backwards
# compatibility for old models.
class Unserializable(object):
def __init__(self, inner):
self.inner = inner
def get(self):
return self.inner
def __getstate__(self):
# Note: can't return {}, because python2 won't call __setstate__
# if the value evaluates to False
return "<unserializable>"
def __setstate__(self, state):
self.inner = None
def init_dropout_state(dropout, train, dropout_seed, dropout_state):
dropout_desc_name = 'desc_' + str(torch.cuda.current_device())
dropout_p = dropout if train else 0
if (dropout_desc_name not in dropout_state) or (dropout_state[dropout_desc_name].get() is None):
if dropout_p == 0:
dropout_state[dropout_desc_name] = Unserializable(None)
else:
dropout_state[dropout_desc_name] = Unserializable(torch._cudnn_init_dropout_state(
dropout_p,
train,
dropout_seed,
self_ty=torch.uint8,
device=torch.device('cuda')))
dropout_ts = dropout_state[dropout_desc_name].get()
return dropout_ts
| mit | 2,364,469,853,383,140,400 | 31.365385 | 100 | 0.621509 | false |
ProjectQ-Framework/ProjectQ | projectq/setups/decompositions/crz2cxandrz.py | 1 | 1546 | # -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers a decomposition for controlled z-rotation gates.
It uses 2 z-rotations and 2 C^n NOT gates to achieve this gate.
"""
from projectq.cengines import DecompositionRule
from projectq.meta import get_control_count
from projectq.ops import NOT, Rz, C
def _decompose_CRz(cmd): # pylint: disable=invalid-name
"""Decompose the controlled Rz gate (into CNOT and Rz)."""
qubit = cmd.qubits[0]
ctrl = cmd.control_qubits
gate = cmd.gate
n_controls = get_control_count(cmd)
Rz(0.5 * gate.angle) | qubit
C(NOT, n_controls) | (ctrl, qubit)
Rz(-0.5 * gate.angle) | qubit
C(NOT, n_controls) | (ctrl, qubit)
def _recognize_CRz(cmd): # pylint: disable=invalid-name
"""Recognize the controlled Rz gate."""
return get_control_count(cmd) >= 1
#: Decomposition rules
all_defined_decomposition_rules = [DecompositionRule(Rz, _decompose_CRz, _recognize_CRz)]
| apache-2.0 | 1,042,759,315,601,558,500 | 33.355556 | 89 | 0.707633 | false |
igor-rangel7l/igorrangelteste.repository | script.module.urlresolver/lib/urlresolver/plugins/nowvideo.py | 1 | 2485 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
class NowvideoResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "nowvideo"
domains = ['nowvideo.eu', 'nowvideo.ch', 'nowvideo.sx', 'nowvideo.co', 'nowvideo.li']
pattern = '(?://|\.)(nowvideo\.(?:eu|ch|sx|co|li))/(?:video/|embed\.php\?v=)([A-Za-z0-9]+)'
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
r = re.search('flashvars.filekey=(.+?);', html)
if r:
r = r.group(1)
try: filekey = re.compile('\s+%s="(.+?)"' % r).findall(html)[-1]
except: filekey = r
player_url = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (filekey, media_id)
html = self.net.http_GET(player_url).content
r = re.search('url=(.+?)&', html)
if r:
stream_url = r.group(1)
else:
raise UrlResolver.ResolverError('File Not Found or removed')
return stream_url
def get_url(self, host, media_id):
return 'http://embed.nowvideo.sx/embed.php?v=%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
| gpl-2.0 | 7,151,518,445,339,459,000 | 33.513889 | 105 | 0.624547 | false |
lukaskubis/forepycast | test/__init__.py | 1 | 1819 | import os
import pickle
import unittest
import darksky
import requests
class TestPickle(unittest.TestCase):
""" Forecast pickling """
@classmethod
def setUpClass(cls):
def mock_request_get(*args, **kwargs):
response = type('Response', (object,), {})
response.headers = {}
response.status_code = 200
with open('./test/response.json', 'r') as fixture:
response.text = fixture.read()
return response
cls.request_get = requests.get
requests.get = mock_request_get
@classmethod
def tearDownClass(cls):
os.system('find . -name "*.pickle" -exec rm {} \;')
requests.get = cls.request_get
def test_pickle(self):
location = -77.843906, 166.686520 # McMurdo station, antarctica
# This doesn't actually hit the API since we mocked out the request lib
forecast = darksky.forecast('test_key', *location)
# Make sure we got the right data, via our mock
self.assertEqual(forecast.currently.temperature, -23.58)
# Ensure pickling by actually pickling
with open('./forecast.pickle', 'wb') as outfile:
pickle.dump(forecast, outfile)
# Check that the file exists
self.assertTrue(os.path.exists('./forecast.pickle'))
def test_unpickle(self):
# Check that the previous test, which writes out the pickle, succeeded
self.assertTrue(os.path.exists('./forecast.pickle'))
# Load the pickle file
with open('./forecast.pickle', 'rb') as infile:
forecast = pickle.load(infile)
# Make sure it loaded right
self.assertTrue(forecast)
self.assertEqual(forecast.currently.temperature, -23.58)
if __name__ == '__main__':
unittest.main()
| mit | 4,226,306,309,789,645,300 | 28.819672 | 79 | 0.615723 | false |
pculture/unisubs | task_settings.py | 1 | 1696 | # Amara, universalsubtitles.org
#
# Copyright (C) 2014 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
"""task_settings -- settings for periodic tasks."""
from datetime import timedelta
# Tasks that we schedule using rq-schedule
REPEATING_JOBS = [
{
'job': 'auth.tasks.expire_login_tokens',
'crontab': dict(minute=10, hour=23),
},
{
'job': 'teams.tasks.add_videos_notification_daily',
'crontab': dict(minute=0, hour=23),
},
{
'job': 'teams.tasks.add_videos_notification_hourly',
'crontab': dict(minute=0),
},
{
'job': 'teams.tasks.expire_tasks',
'crontab': dict(minute=0, hour=7),
},
{
'job': 'videos.tasks.cleanup',
'crontab': dict(hour=3, day_of_week=1),
},
{
'job': 'externalsites.tasks.retry_failed_sync',
'period': timedelta(seconds=10),
},
{
'job': 'notifications.tasks.prune_notification_history',
'crontab': dict(hour=0),
},
]
__all__ = ['REPEATING_JOBS']
| agpl-3.0 | 4,040,941,370,891,340,300 | 29.836364 | 74 | 0.648585 | false |
Michal-Fularz/codingame_solutions | codingame_solutions/Clash_of_Code/reverse.py | 1 | 3723 | __author__ = 'Amin'
import sys
import math
# Test 1 -
# Provided Input: Abcde fghij klmno pqrs tuv wxyz
# Expected Output: true
# Test 2 -
# Provided Input: this sentence does not have what it should
# Expected Output: false
# Test 3 -
# Provided Input: Portez ce vieux whisky au juge blond qui fume
# Expected Output: true
# Test 4 -
# Provided Input: abcde ghijklmnopqrstuvwxyz
# Expected Output: false
# Test 5 -
# Provided Input: abcde fghij klmno pqrs tuv wxyy
# Expected Output: false
def upper_letter():
s = input()
flag_upper = False
for c in s:
if c.isupper():
flag_upper = True
if flag_upper:
print("true")
else:
print("false")
# Test 1 -
# Provided Input: 169 104
# Expected Output: 13
# Test 2 -
# Provided Input: 100 250
# Expected Output: 50
# Test 3 -
# Provided Input: 1 1
# Expected Output: 1
# Test 4 -
# Provided Input: 1000000 5
# Expected Output: 5
# Test 5 -
# Provided Input: 104711 104717
# Expected Output: 1
# Test 6 -
# Provided Input: 98304 65536
# Expected Output: 32768
def _gcd(m, n):
while True: # petla, czyli "wroc do kroku", tylko ze oznaczone u celu, a nie na poczatku skoku. W pythonie nie ma goto (prawie...;))
r = m % n # przypisanie reszty
if not r: # jesli r rowne 0 to
return n # zwroc n
m, n = n, r # w przeciwnym przypadku przypisz co trzeba i powtorz
def greatest_common_divisor():
a, b = [int(i) for i in input().split()]
m = _gcd(a, b)
print(m)
# Test 1 -
# Provided Input: 5 2
# Expected Output: 1 2 4 8 16
def print_powers():
n, r = [int(i) for i in input().split()]
l = []
for i in range(n):
l.append(pow(r, i))
p = ""
for k in l:
p += str(k) + " "
print(p[:-1])
# convert int values to 0-1 representation
def _convert_to_bin_string(x):
l = []
flag_c = True
v = x
while flag_c:
if v > 1:
if v % 2 == 1:
l.append(1)
else:
l.append(0)
v = v // 2
else:
if v == 1:
l.append(1)
else:
l.append(0)
flag_c = False
r = ""
for i in reversed(l):
r += str(i)
return r
def print_as_binary():
n = int(input())
for i in range(n):
x = int(input())
# three different ways
#print(_convert_to_bin_string(x))
#print(bin(x)[2:])
print("{0:b}".format(x))
def _convert_to_int(c):
if c == "F":
return 15
elif c == "E":
return 14
elif c == "D":
return 13
elif c == "C":
return 12
elif c == "B":
return 11
elif c == "A":
return 10
else:
return int(c)
# you are provided with a number in hexadecimal format and should convert it to decimal value
def hex_string_to_int_value():
number = input()
print("number: " + str(number), file=sys.stderr)
r=0
for i, c in enumerate(reversed(number)):
v = _convert_to_int(c)
r += 16**i * v
print(r)
# you get n words separated by new line characters
# print them in following order:
# line 0
# line 2
# line 4
# line 1
# line 3
# line 5
# line 6
def mixed_lines():
n = int(input())
lines = []
for i in range(0, n):
lines.append(input())
r = ""
r += lines[0] + "\n"
for i in range(2, len(lines), 2):
r += lines[i]
for i in range(1, len(lines), 2):
r += lines[i]
r += lines[-1]
print(r)
if __name__ == "__main__":
upper_letter()
greatest_common_divisor()
print_powers()
print_as_binary()
hex_string_to_int_value()
mixed_lines()
| mit | -8,887,315,492,864,588,000 | 17.994898 | 137 | 0.541767 | false |
Debatrix/cycas | ui.py | 1 | 6547 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
import sys
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1000, 700)
self.setWindowTitle("图标")
self.setWindowIcon(QtGui.QIcon(r'ooopic_1459042536.ico'))
with open('cycas.qss', 'r') as q:#挂载样式表
self.setStyleSheet(q.read())#挂载样式表
self.pushButton_1 = QtWidgets.QPushButton(MainWindow)
self.pushButton_1.setGeometry(QtCore.QRect(310, 440, 81, 51))
self.pushButton_1.setObjectName("pushButton_1")
self.pushButton_2 = QtWidgets.QPushButton(MainWindow)
self.pushButton_2.setGeometry(QtCore.QRect(460, 440, 81, 51))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(MainWindow)
self.pushButton_3.setGeometry(QtCore.QRect(610, 440, 81, 51))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(MainWindow)
self.pushButton_4.setGeometry(QtCore.QRect(310, 510, 81, 51))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(MainWindow)
self.pushButton_5.setGeometry(QtCore.QRect(460, 510, 81, 51))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(MainWindow)
self.pushButton_6.setGeometry(QtCore.QRect(610, 510, 81, 51))
self.pushButton_6.setObjectName("pushButton_6")
self.lineEdit = QtWidgets.QLineEdit(MainWindow)
self.lineEdit.setGeometry(QtCore.QRect(130, 380, 121, 31))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(MainWindow)
self.lineEdit_2.setGeometry(QtCore.QRect(130, 430, 121, 31))
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(MainWindow)
self.lineEdit_3.setGeometry(QtCore.QRect(130, 480, 121, 31))
self.lineEdit_3.setObjectName("lineEdit_3")
self.timeEdit = QtWidgets.QTimeEdit(MainWindow)
self.timeEdit.setGeometry(QtCore.QRect(30, 590, 221, 51))
self.timeEdit.setObjectName("timeEdit")
self.dateEdit = QtWidgets.QDateEdit(MainWindow)
self.dateEdit.setGeometry(QtCore.QRect(29, 541, 221, 41))
self.dateEdit.setObjectName("dateEdit")
self.label_1 = QtWidgets.QLabel(MainWindow)
self.label_1.setGeometry(QtCore.QRect(40, 380, 61, 31))
self.label_1.setObjectName("label_1")
self.label_2 = QtWidgets.QLabel(MainWindow)
self.label_2.setGeometry(QtCore.QRect(40, 430, 61, 31))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(MainWindow)
self.label_3.setGeometry(QtCore.QRect(40, 480, 61, 31))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(MainWindow)#显示图片
self.label_4.setGeometry(QtCore.QRect(30, 30, 241, 221))
self.label_4.setText("")
self.label_4.setObjectName("label_4")
pic = QtGui.QPixmap(r'npuc.png').scaled(self.label_4.width(), self.label_4.height())
self.label_4.setPixmap(pic)#显示图片
self.label_5 = QtWidgets.QLabel(MainWindow)
self.label_5.setGeometry(QtCore.QRect(730, 30, 231, 601,))
self.label_5.setObjectName("label_5")
#self.label_5.adjustSize()
self.label_5.setAlignment(Qt.AlignLeft | Qt.AlignTop)#对齐命令
#self.label_5.setMargin(50)#设置外边距
#self.label_5.setWordWrap(True)#换行命令
#self.label_5.resize(50, 50)#换行命令
self.label = QtWidgets.QLabel(MainWindow)
self.label.setGeometry(QtCore.QRect(310, 30, 381, 381))
self.label.setText("")
pic = QtGui.QPixmap(r'npuc.png').scaled(self.label.width(), self.label.height())
self.label.setPixmap(pic)
self.label.setObjectName("label")
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1000, 23))
self.menuBar.setObjectName("menuBar")
self.menu = QtWidgets.QMenu(self.menuBar)
self.menu.setObjectName("menu")
self.menu_2 = QtWidgets.QMenu(self.menuBar)
self.menu_2.setObjectName("menu_2")
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
self.video = QtWidgets.QAction(MainWindow)
self.video.setObjectName("video")
self.picture = QtWidgets.QAction(MainWindow)
self.picture.setObjectName("picture")
self.menu.addAction(self.video)
self.menu.addAction(self.picture)
self.menuBar.addAction(self.menu.menuAction())
self.menuBar.addAction(self.menu_2.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.pushButton_1.clicked.connect(self.paly)
# self.pushButton_2.clicked.connect(self.openimage2)
# self.pushButton_3.clicked.connect(self.list)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "监控识别"))
self.pushButton_1.setText(_translate("MainWindow", "停止"))
self.pushButton_2.setText(_translate("MainWindow", "播放"))
self.pushButton_3.setText(_translate("MainWindow", "暂停"))
self.pushButton_4.setText(_translate("MainWindow", "获取系统时间"))
self.pushButton_5.setText(_translate("MainWindow", "截取图片"))
self.pushButton_6.setText(_translate("MainWindow", "截取帧数时间"))
self.menu.setTitle(_translate("MainWindow", "菜单"))
self.menu_2.setTitle(_translate("MainWindow", "工具"))
self.video.setText(_translate("MainWindow", "打开视频"))
self.picture.setText(_translate("MainWindow", "打开图片"))
self.label_1.setText(_translate("MainWindow", "播放速度"))
self.label_2.setText(_translate("MainWindow", "精确度"))
self.label_3.setText(_translate("MainWindow", "抽帧数"))
| apache-2.0 | 5,716,910,754,664,739,000 | 48.434109 | 92 | 0.671005 | false |
avanzosc/avanzosc6.1 | avanzosc_invoice_duplicate/invoice.py | 1 | 1506 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import fields, osv
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def copy(self, cr, uid, id, default=None, context=None):
default['invoice_number'] = False
res = super(account_invoice, self).copy(cr, uid, id, default, context)
self.pool.get('account.invoice').write(cr,uid,res,{'invoice_number':False})
name =False
self.pool.get('account.invoice')._number(cr, uid, [res], name, context)
return res
account_invoice() | agpl-3.0 | -959,162,032,762,465,700 | 40.861111 | 83 | 0.608898 | false |
mozilla/django-tidings | tidings/utils.py | 1 | 4656 | from zlib import crc32
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMessage
from django.template import Context, loader
from django.urls import reverse as django_reverse
from django.utils.module_loading import import_string
from .compat import next, string_types
def collate(*iterables, **kwargs):
"""Return an iterable ordered collation of the already-sorted items
from each of ``iterables``, compared by kwarg ``key``.
If ``reverse=True`` is passed, iterables must return their results in
descending order rather than ascending.
"""
key = kwargs.pop('key', lambda a: a)
reverse = kwargs.pop('reverse', False)
min_or_max = max if reverse else min
rows = [iter(iterable) for iterable in iterables if iterable]
next_values = {}
by_key = []
def gather_next_value(row, index):
try:
next_value = next(row)
except StopIteration:
pass
else:
next_values[index] = next_value
by_key.append((key(next_value), index))
for index, row in enumerate(rows):
gather_next_value(row, index)
while by_key:
key_value, index = min_or_max(by_key)
by_key.remove((key_value, index))
next_value = next_values.pop(index)
yield next_value
gather_next_value(rows[index], index)
def hash_to_unsigned(data):
"""If ``data`` is a string or unicode string, return an unsigned 4-byte int
hash of it. If ``data`` is already an int that fits those parameters,
return it verbatim.
If ``data`` is an int outside that range, behavior is undefined at the
moment. We rely on the ``PositiveIntegerField`` on
:class:`~tidings.models.WatchFilter` to scream if the int is too long for
the field.
We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose
hash function, it has no collisions on a dictionary of 38,470 English
words, which should be fine for the small sets that :class:`WatchFilters
<tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is
fast and available as a built-in function in some DBs. If your set of
filter values is very large or has different CRC32 distribution properties
than English words, you might want to do your own hashing in your
:class:`~tidings.events.Event` subclass and pass ints when specifying
filter values.
"""
if isinstance(data, string_types):
# Return a CRC32 value identical across Python versions and platforms
# by stripping the sign bit as on
# http://docs.python.org/library/zlib.html.
return crc32(data.encode('utf-8')) & 0xffffffff
else:
return int(data)
def emails_with_users_and_watches(
subject, template_path, vars, users_and_watches,
from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs):
"""Return iterable of EmailMessages with user and watch values substituted.
A convenience function for generating emails by repeatedly rendering a
Django template with the given ``vars`` plus a ``user`` and ``watches`` key
for each pair in ``users_and_watches``
:arg template_path: path to template file
:arg vars: a map which becomes the Context passed in to the template
:arg extra_kwargs: additional kwargs to pass into EmailMessage constructor
"""
template = loader.get_template(template_path)
context = Context(vars)
for u, w in users_and_watches:
context['user'] = u
# Arbitrary single watch for compatibility with 0.1
# TODO: remove.
context['watch'] = w[0]
context['watches'] = w
yield EmailMessage(subject,
template.render(context),
from_email,
[u.email],
**extra_kwargs)
def import_from_setting(setting_name, fallback):
"""Return the resolution of an import path stored in a Django setting.
:arg setting_name: The name of the setting holding the import path
:arg fallback: An alternate object to use if the setting is empty or
doesn't exist
Raise ImproperlyConfigured if a path is given that can't be resolved.
"""
path = getattr(settings, setting_name, None)
if path:
try:
return import_string(path)
except ImportError:
raise ImproperlyConfigured('%s: No such path.' % path)
else:
return fallback
# Here to be imported by others:
reverse = import_from_setting('TIDINGS_REVERSE', django_reverse) # no QA
| bsd-3-clause | 8,167,331,132,899,926,000 | 34.541985 | 79 | 0.664519 | false |
aureooms/checkio | home/04-count-neighbours.py | 1 | 1235 | def count_neighbours( grid , row , col ) :
left = max( 0 , row - 1 )
right = min( row + 2 , len( grid ) )
top = max( 0 , col - 1 )
bottom = min( col + 2 , len( grid[0] ) )
return sum( grid[i][j] for i in range( left , right ) for j in range( top , bottom ) if i != row or j != col )
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert count_neighbours(((1, 0, 0, 1, 0),
(0, 1, 0, 0, 0),
(0, 0, 1, 0, 1),
(1, 0, 0, 0, 0),
(0, 0, 1, 0, 0),), 1, 2) == 3, "1st example"
assert count_neighbours(((1, 0, 0, 1, 0),
(0, 1, 0, 0, 0),
(0, 0, 1, 0, 1),
(1, 0, 0, 0, 0),
(0, 0, 1, 0, 0),), 0, 0) == 1, "2nd example"
assert count_neighbours(((1, 1, 1),
(1, 1, 1),
(1, 1, 1),), 0, 2) == 3, "Dense corner"
assert count_neighbours(((0, 0, 0),
(0, 1, 0),
(0, 0, 0),), 1, 1) == 0, "Single"
| agpl-3.0 | 2,556,771,150,453,167,000 | 43.107143 | 114 | 0.351417 | false |
ttfseiko/openerp-trunk | openerp/addons/account/report/report_vat.py | 1 | 11569 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web import http
from openerp.addons.web.http import request
from common_report_header import common_report_header
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import xlwt
class tax_report(http.Controller, common_report_header):
@http.route(['/report/account.report_vat'], type='http', auth='user', website=True, multilang=True)
def report_account_tax(self, **data):
report_obj = request.registry['report']
self.cr, self.uid, self.pool = request.cr, request.uid, request.registry
data = report_obj.eval_params(data)
res = {}
self.period_ids = []
period_obj = self.pool.get('account.period')
self.display_detail = data['form']['display_detail']
res['periods'] = ''
res['fiscalyear'] = data['form'].get('fiscalyear_id', False)
if data['form'].get('period_from', False) and data['form'].get('period_to', False):
self.period_ids = period_obj.build_ctx_periods(self.cr, self.uid, data['form']['period_from'], data['form']['period_to'])
docargs = {
'fiscalyear': self._get_fiscalyear(data),
'account': self._get_account(data),
'based_on': self._get_basedon(data),
'period_from': self.get_start_period(data),
'period_to': self.get_end_period(data),
'taxlines': self._get_lines(self._get_basedon(data), company_id=data['form']['company_id']),
}
return request.registry['report'].render(self.cr, self.uid, [], 'account.report_vat', docargs)
def _get_basedon(self, form):
return form['form']['based_on']
def _get_lines(self, based_on, company_id=False, parent=False, level=0, context=None):
period_list = self.period_ids
res = self._get_codes(based_on, company_id, parent, level, period_list, context=context)
if period_list:
res = self._add_codes(based_on, res, period_list, context=context)
else:
self.cr.execute ("select id from account_fiscalyear")
fy = self.cr.fetchall()
self.cr.execute ("select id from account_period where fiscalyear_id = %s",(fy[0][0],))
periods = self.cr.fetchall()
for p in periods:
period_list.append(p[0])
res = self._add_codes(based_on, res, period_list, context=context)
i = 0
top_result = []
while i < len(res):
res_dict = { 'code': res[i][1].code,
'name': res[i][1].name,
'debit': 0,
'credit': 0,
'tax_amount': res[i][1].sum_period,
'type': 1,
'level': res[i][0],
'pos': 0
}
top_result.append(res_dict)
res_general = self._get_general(res[i][1].id, period_list, company_id, based_on, context=context)
ind_general = 0
while ind_general < len(res_general):
res_general[ind_general]['type'] = 2
res_general[ind_general]['pos'] = 0
res_general[ind_general]['level'] = res_dict['level']
top_result.append(res_general[ind_general])
ind_general+=1
i+=1
return top_result
def _get_general(self, tax_code_id, period_list, company_id, based_on, context=None):
if not self.display_detail:
return []
res = []
obj_account = self.pool.get('account.account')
periods_ids = tuple(period_list)
if based_on == 'payments':
self.cr.execute('SELECT SUM(line.tax_amount) AS tax_amount, \
SUM(line.debit) AS debit, \
SUM(line.credit) AS credit, \
COUNT(*) AS count, \
account.id AS account_id, \
account.name AS name, \
account.code AS code \
FROM account_move_line AS line, \
account_account AS account, \
account_move AS move \
LEFT JOIN account_invoice invoice ON \
(invoice.move_id = move.id) \
WHERE line.state<>%s \
AND line.tax_code_id = %s \
AND line.account_id = account.id \
AND account.company_id = %s \
AND move.id = line.move_id \
AND line.period_id IN %s \
AND ((invoice.state = %s) \
OR (invoice.id IS NULL)) \
GROUP BY account.id,account.name,account.code', ('draft', tax_code_id,
company_id, periods_ids, 'paid',))
else:
self.cr.execute('SELECT SUM(line.tax_amount) AS tax_amount, \
SUM(line.debit) AS debit, \
SUM(line.credit) AS credit, \
COUNT(*) AS count, \
account.id AS account_id, \
account.name AS name, \
account.code AS code \
FROM account_move_line AS line, \
account_account AS account \
WHERE line.state <> %s \
AND line.tax_code_id = %s \
AND line.account_id = account.id \
AND account.company_id = %s \
AND line.period_id IN %s\
AND account.active \
GROUP BY account.id,account.name,account.code', ('draft', tax_code_id,
company_id, periods_ids,))
res = self.cr.dictfetchall()
i = 0
while i<len(res):
res[i]['account'] = obj_account.browse(self.cr, self.uid, res[i]['account_id'], context=context)
i+=1
return res
def _get_codes(self, based_on, company_id, parent=False, level=0, period_list=None, context=None):
obj_tc = self.pool.get('account.tax.code')
ids = obj_tc.search(self.cr, self.uid, [('parent_id','=',parent),('company_id','=',company_id)], order='sequence', context=context)
res = []
for code in obj_tc.browse(self.cr, self.uid, ids, {'based_on': based_on}):
res.append(('.'*2*level, code))
res += self._get_codes(based_on, company_id, code.id, level+1, context=context)
return res
def _add_codes(self, based_on, account_list=None, period_list=None, context=None):
if account_list is None:
account_list = []
if period_list is None:
period_list = []
res = []
obj_tc = self.pool.get('account.tax.code')
for account in account_list:
ids = obj_tc.search(self.cr, self.uid, [('id','=', account[1].id)], context=context)
sum_tax_add = 0
for period_ind in period_list:
for code in obj_tc.browse(self.cr, self.uid, ids, {'period_id':period_ind,'based_on': based_on}):
sum_tax_add = sum_tax_add + code.sum_period
code.sum_period = sum_tax_add
res.append((account[0], code))
return res
def _get_currency(self, form, context=None):
return self.pool.get('res.company').browse(self.cr, self.uid, form['company_id'], context=context).currency_id.name
def sort_result(self, accounts, context=None):
result_accounts = []
ind=0
old_level=0
while ind<len(accounts):
#
account_elem = accounts[ind]
#
#
# we will now check if the level is lower than the previous level, in this case we will make a subtotal
if (account_elem['level'] < old_level):
bcl_current_level = old_level
bcl_rup_ind = ind - 1
while (bcl_current_level >= int(accounts[bcl_rup_ind]['level']) and bcl_rup_ind >= 0 ):
res_tot = { 'code': accounts[bcl_rup_ind]['code'],
'name': '',
'debit': 0,
'credit': 0,
'tax_amount': accounts[bcl_rup_ind]['tax_amount'],
'type': accounts[bcl_rup_ind]['type'],
'level': 0,
'pos': 0
}
if res_tot['type'] == 1:
# on change le type pour afficher le total
res_tot['type'] = 2
result_accounts.append(res_tot)
bcl_current_level = accounts[bcl_rup_ind]['level']
bcl_rup_ind -= 1
old_level = account_elem['level']
result_accounts.append(account_elem)
ind+=1
return result_accounts
@http.route(['/report/account.report_vat_xls'], type='http', auth='user', website=True, multilang=True)
def report_account_tax_xls(self, **data):
report_obj = request.registry['report']
self.cr, self.uid, self.pool = request.cr, request.uid, request.registry
data = report_obj.eval_params(data)
res = {}
self.period_ids = []
period_obj = self.pool.get('account.period')
self.display_detail = data['form']['display_detail']
res['periods'] = ''
res['fiscalyear'] = data['form'].get('fiscalyear_id', False)
if data['form'].get('period_from', False) and data['form'].get('period_to', False):
self.period_ids = period_obj.build_ctx_periods(self.cr, self.uid, data['form']['period_from'], data['form']['period_to'])
content = ''
lines = self._get_lines(self._get_basedon(data), company_id=data['form']['company_id'])
if lines:
xls = StringIO.StringIO()
xls_workbook = xlwt.Workbook()
vat_sheet = xls_workbook.add_sheet('report_vat')
for x in range(0, len(lines)):
for y in range(0, len(lines[0])):
vat_sheet.write(x, y, lines[x].values()[y])
xls_workbook.save(xls)
xls.seek(0)
content = xls.read()
response = request.make_response(content, headers=[
('Content-Type', 'application/vnd.ms-excel'),
('Content-Disposition', 'attachment; filename=report_vat.xls;')
])
return response
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,585,930,821,350,867,000 | 41.690037 | 139 | 0.522604 | false |
prometheanfire/portage | pym/portage/versions.py | 1 | 16223 | # versions.py -- core Portage functionality
# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
__all__ = [
'best', 'catpkgsplit', 'catsplit',
'cpv_getkey', 'cpv_getversion', 'cpv_sort_key', 'pkgcmp', 'pkgsplit',
'ververify', 'vercmp'
]
import re
import sys
import warnings
if sys.hexversion < 0x3000000:
_unicode = unicode
else:
_unicode = str
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.repository.config:_gen_valid_repo',
'portage.util:cmp_sort_key',
)
from portage import _unicode_decode
from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidData
from portage.localization import _
_unknown_repo = "__unknown__"
# \w is [a-zA-Z0-9_]
# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_slot = r'([\w+][\w+.-]*)'
# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_cat = r'[\w+][\w+.-]*'
# 2.1.2 A package name may contain any of the characters [A-Za-z0-9+_-].
# It must not begin with a hyphen,
# and must not end in a hyphen followed by one or more digits.
_pkg = {
"dots_disallowed_in_PN": r'[\w+][\w+-]*?',
"dots_allowed_in_PN": r'[\w+][\w+.-]*?',
}
_v = r'(cvs\.)?(\d+)((\.\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\d*)*)'
_rev = r'\d+'
_vr = _v + '(-r(' + _rev + '))?'
_cp = {
"dots_disallowed_in_PN": '(' + _cat + '/' + _pkg['dots_disallowed_in_PN'] + '(-' + _vr + ')?)',
"dots_allowed_in_PN": '(' + _cat + '/' + _pkg['dots_allowed_in_PN'] + '(-' + _vr + ')?)',
}
_cpv = {
"dots_disallowed_in_PN": '(' + _cp['dots_disallowed_in_PN'] + '-' + _vr + ')',
"dots_allowed_in_PN": '(' + _cp['dots_allowed_in_PN'] + '-' + _vr + ')',
}
_pv = {
"dots_disallowed_in_PN": '(?P<pn>' + _pkg['dots_disallowed_in_PN'] + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
"dots_allowed_in_PN": '(?P<pn>' + _pkg['dots_allowed_in_PN'] + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
}
ver_regexp = re.compile("^" + _vr + "$")
suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
_slot_re_cache = {}
def _get_slot_re(eapi_attrs):
cache_key = eapi_attrs.slot_operator
slot_re = _slot_re_cache.get(cache_key)
if slot_re is not None:
return slot_re
if eapi_attrs.slot_operator:
slot_re = _slot + r'(/' + _slot + r')?'
else:
slot_re = _slot
slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
_slot_re_cache[cache_key] = slot_re
return slot_re
_pv_re_cache = {}
def _get_pv_re(eapi_attrs):
cache_key = eapi_attrs.dots_in_PN
pv_re = _pv_re_cache.get(cache_key)
if pv_re is not None:
return pv_re
if eapi_attrs.dots_in_PN:
pv_re = _pv['dots_allowed_in_PN']
else:
pv_re = _pv['dots_disallowed_in_PN']
pv_re = re.compile(r'^' + pv_re + r'$', re.VERBOSE | re.UNICODE)
_pv_re_cache[cache_key] = pv_re
return pv_re
def ververify(myver, silent=1):
if ver_regexp.match(myver):
return True
else:
if not silent:
print(_("!!! syntax error in version: %s") % myver)
return False
def vercmp(ver1, ver2, silent=1):
"""
Compare two versions
Example usage:
>>> from portage.versions import vercmp
>>> vercmp('1.0-r1','1.2-r3')
negative number
>>> vercmp('1.3','1.2-r3')
positive number
>>> vercmp('1.0_p3','1.0_p3')
0
@param pkg1: version to compare with (see ver_regexp in portage.versions.py)
@type pkg1: string (example: "2.1.2-r3")
@param pkg2: version to compare againts (see ver_regexp in portage.versions.py)
@type pkg2: string (example: "2.1.2_rc5")
@rtype: None or float
@return:
1. positive if ver1 is greater than ver2
2. negative if ver1 is less than ver2
3. 0 if ver1 equals ver2
4. None if ver1 or ver2 are invalid (see ver_regexp in portage.versions.py)
"""
if ver1 == ver2:
return 0
match1 = ver_regexp.match(ver1)
match2 = ver_regexp.match(ver2)
# checking that the versions are valid
if not match1 or not match1.groups():
if not silent:
print(_("!!! syntax error in version: %s") % ver1)
return None
if not match2 or not match2.groups():
if not silent:
print(_("!!! syntax error in version: %s") % ver2)
return None
# shortcut for cvs ebuilds (new style)
if match1.group(1) and not match2.group(1):
return 1
elif match2.group(1) and not match1.group(1):
return -1
# building lists of the version parts before the suffix
# first part is simple
list1 = [int(match1.group(2))]
list2 = [int(match2.group(2))]
# this part would greatly benefit from a fixed-length version pattern
if match1.group(3) or match2.group(3):
vlist1 = match1.group(3)[1:].split(".")
vlist2 = match2.group(3)[1:].split(".")
for i in range(0, max(len(vlist1), len(vlist2))):
# Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
# would be ambiguous if two versions that aren't literally equal
# are given the same value (in sorting, for example).
if len(vlist1) <= i or len(vlist1[i]) == 0:
list1.append(-1)
list2.append(int(vlist2[i]))
elif len(vlist2) <= i or len(vlist2[i]) == 0:
list1.append(int(vlist1[i]))
list2.append(-1)
# Let's make life easy and use integers unless we're forced to use floats
elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
list1.append(int(vlist1[i]))
list2.append(int(vlist2[i]))
# now we have to use floats so 1.02 compares correctly against 1.1
else:
# list1.append(float("0."+vlist1[i]))
# list2.append(float("0."+vlist2[i]))
# Since python floats have limited range, we multiply both
# floating point representations by a constant so that they are
# transformed into whole numbers. This allows the practically
# infinite range of a python int to be exploited. The
# multiplication is done by padding both literal strings with
# zeros as necessary to ensure equal length.
max_len = max(len(vlist1[i]), len(vlist2[i]))
list1.append(int(vlist1[i].ljust(max_len, "0")))
list2.append(int(vlist2[i].ljust(max_len, "0")))
# and now the final letter
# NOTE: Behavior changed in r2309 (between portage-2.0.x and portage-2.1).
# The new behavior is 12.2.5 > 12.2b which, depending on how you look at,
# may seem counter-intuitive. However, if you really think about it, it
# seems like it's probably safe to assume that this is the behavior that
# is intended by anyone who would use versions such as these.
if len(match1.group(5)):
list1.append(ord(match1.group(5)))
if len(match2.group(5)):
list2.append(ord(match2.group(5)))
for i in range(0, max(len(list1), len(list2))):
if len(list1) <= i:
return -1
elif len(list2) <= i:
return 1
elif list1[i] != list2[i]:
a = list1[i]
b = list2[i]
rval = (a > b) - (a < b)
return rval
# main version is equal, so now compare the _suffix part
list1 = match1.group(6).split("_")[1:]
list2 = match2.group(6).split("_")[1:]
for i in range(0, max(len(list1), len(list2))):
# Implicit _p0 is given a value of -1, so that 1 < 1_p0
if len(list1) <= i:
s1 = ("p","-1")
else:
s1 = suffix_regexp.match(list1[i]).groups()
if len(list2) <= i:
s2 = ("p","-1")
else:
s2 = suffix_regexp.match(list2[i]).groups()
if s1[0] != s2[0]:
a = suffix_value[s1[0]]
b = suffix_value[s2[0]]
rval = (a > b) - (a < b)
return rval
if s1[1] != s2[1]:
# it's possible that the s(1|2)[1] == ''
# in such a case, fudge it.
try:
r1 = int(s1[1])
except ValueError:
r1 = 0
try:
r2 = int(s2[1])
except ValueError:
r2 = 0
rval = (r1 > r2) - (r1 < r2)
if rval:
return rval
# the suffix part is equal to, so finally check the revision
if match1.group(10):
r1 = int(match1.group(10))
else:
r1 = 0
if match2.group(10):
r2 = int(match2.group(10))
else:
r2 = 0
rval = (r1 > r2) - (r1 < r2)
return rval
def pkgcmp(pkg1, pkg2):
"""
Compare 2 package versions created in pkgsplit format.
Example usage:
>>> from portage.versions import *
>>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
-1
>>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
1
@param pkg1: package to compare with
@type pkg1: list (example: ['test', '1.0', 'r1'])
@param pkg2: package to compare againts
@type pkg2: list (example: ['test', '1.0', 'r1'])
@rtype: None or integer
@return:
1. None if package names are not the same
2. 1 if pkg1 is greater than pkg2
3. -1 if pkg1 is less than pkg2
4. 0 if pkg1 equals pkg2
"""
if pkg1[0] != pkg2[0]:
return None
return vercmp("-".join(pkg1[1:]), "-".join(pkg2[1:]))
def _pkgsplit(mypkg, eapi=None):
"""
@param mypkg: pv
@return:
1. None if input is invalid.
2. (pn, ver, rev) if input is pv
"""
m = _get_pv_re(_get_eapi_attrs(eapi)).match(mypkg)
if m is None:
return None
if m.group('pn_inval') is not None:
# package name appears to have a version-like suffix
return None
rev = m.group('rev')
if rev is None:
rev = '0'
rev = 'r' + rev
return (m.group('pn'), m.group('ver'), rev)
_cat_re = re.compile('^%s$' % _cat, re.UNICODE)
_missing_cat = 'null'
def catpkgsplit(mydata, silent=1, eapi=None):
"""
Takes a Category/Package-Version-Rev and returns a list of each.
@param mydata: Data to split
@type mydata: string
@param silent: suppress error messages
@type silent: Boolean (integer)
@rype: list
@return:
1. If each exists, it returns [cat, pkgname, version, rev]
2. If cat is not specificed in mydata, cat will be "null"
3. if rev does not exist it will be '-r0'
"""
try:
return mydata.cpv_split
except AttributeError:
pass
mysplit = mydata.split('/', 1)
p_split=None
if len(mysplit)==1:
cat = _missing_cat
p_split = _pkgsplit(mydata, eapi=eapi)
elif len(mysplit)==2:
cat = mysplit[0]
if _cat_re.match(cat) is not None:
p_split = _pkgsplit(mysplit[1], eapi=eapi)
if not p_split:
return None
retval = (cat, p_split[0], p_split[1], p_split[2])
return retval
class _pkg_str(_unicode):
"""
This class represents a cpv. It inherits from str (unicode in python2) and
has attributes that cache results for use by functions like catpkgsplit and
cpv_getkey which are called frequently (especially in match_from_list).
Instances are typically created in dbapi.cp_list() or the Atom contructor,
and propagate from there. Generally, code that pickles these objects will
manually convert them to a plain unicode object first.
"""
def __new__(cls, cpv, metadata=None, settings=None, eapi=None,
repo=None, slot=None):
return _unicode.__new__(cls, cpv)
def __init__(self, cpv, metadata=None, settings=None, eapi=None,
repo=None, slot=None):
if not isinstance(cpv, _unicode):
# Avoid TypeError from _unicode.__init__ with PyPy.
cpv = _unicode_decode(cpv)
_unicode.__init__(cpv)
if metadata is not None:
self.__dict__['_metadata'] = metadata
slot = metadata.get('SLOT', slot)
repo = metadata.get('repository', repo)
eapi = metadata.get('EAPI', eapi)
if settings is not None:
self.__dict__['_settings'] = settings
if eapi is not None:
self.__dict__['eapi'] = eapi
self.__dict__['cpv_split'] = catpkgsplit(cpv, eapi=eapi)
if self.cpv_split is None:
raise InvalidData(cpv)
self.__dict__['cp'] = self.cpv_split[0] + '/' + self.cpv_split[1]
if self.cpv_split[-1] == "r0" and cpv[-3:] != "-r0":
self.__dict__['version'] = "-".join(self.cpv_split[2:-1])
else:
self.__dict__['version'] = "-".join(self.cpv_split[2:])
# for match_from_list introspection
self.__dict__['cpv'] = self
if slot is not None:
eapi_attrs = _get_eapi_attrs(eapi)
slot_match = _get_slot_re(eapi_attrs).match(slot)
if slot_match is None:
# Avoid an InvalidAtom exception when creating SLOT atoms
self.__dict__['slot'] = '0'
self.__dict__['sub_slot'] = '0'
self.__dict__['slot_invalid'] = slot
else:
if eapi_attrs.slot_operator:
slot_split = slot.split("/")
self.__dict__['slot'] = slot_split[0]
if len(slot_split) > 1:
self.__dict__['sub_slot'] = slot_split[1]
else:
self.__dict__['sub_slot'] = slot_split[0]
else:
self.__dict__['slot'] = slot
self.__dict__['sub_slot'] = slot
if repo is not None:
repo = _gen_valid_repo(repo)
if not repo:
repo = _unknown_repo
self.__dict__['repo'] = repo
def __setattr__(self, name, value):
raise AttributeError("_pkg_str instances are immutable",
self.__class__, name, value)
@property
def stable(self):
try:
return self._stable
except AttributeError:
try:
metadata = self._metadata
settings = self._settings
except AttributeError:
raise AttributeError('stable')
if not settings.local_config:
# Since repoman uses different config instances for
# different profiles, our local instance does not
# refer to the correct profile.
raise AssertionError('invalid context')
stable = settings._isStable(self)
self.__dict__['_stable'] = stable
return stable
def pkgsplit(mypkg, silent=1, eapi=None):
"""
@param mypkg: either a pv or cpv
@return:
1. None if input is invalid.
2. (pn, ver, rev) if input is pv
3. (cp, ver, rev) if input is a cpv
"""
catpsplit = catpkgsplit(mypkg, eapi=eapi)
if catpsplit is None:
return None
cat, pn, ver, rev = catpsplit
if cat is _missing_cat and '/' not in mypkg:
return (pn, ver, rev)
else:
return (cat + '/' + pn, ver, rev)
def cpv_getkey(mycpv, eapi=None):
"""Calls catpkgsplit on a cpv and returns only the cp."""
try:
return mycpv.cp
except AttributeError:
pass
mysplit = catpkgsplit(mycpv, eapi=eapi)
if mysplit is not None:
return mysplit[0] + '/' + mysplit[1]
warnings.warn("portage.versions.cpv_getkey() " + \
"called with invalid cpv: '%s'" % (mycpv,),
DeprecationWarning, stacklevel=2)
myslash = mycpv.split("/", 1)
mysplit = _pkgsplit(myslash[-1], eapi=eapi)
if mysplit is None:
return None
mylen = len(myslash)
if mylen == 2:
return myslash[0] + "/" + mysplit[0]
else:
return mysplit[0]
def cpv_getversion(mycpv, eapi=None):
"""Returns the v (including revision) from an cpv."""
try:
return mycpv.version
except AttributeError:
pass
cp = cpv_getkey(mycpv, eapi=eapi)
if cp is None:
return None
return mycpv[len(cp+"-"):]
def cpv_sort_key(eapi=None):
"""
Create an object for sorting cpvs, to be used as the 'key' parameter
in places like list.sort() or sorted(). This calls catpkgsplit() once for
each cpv and caches the result. If a given cpv is invalid or two cpvs
have different category/package names, then plain string (> and <)
comparison is used.
@rtype: key object for sorting
@return: object for use as the 'key' parameter in places like
list.sort() or sorted()
"""
split_cache = {}
def cmp_cpv(cpv1, cpv2):
split1 = split_cache.get(cpv1, False)
if split1 is False:
split1 = None
try:
split1 = cpv1.cpv
except AttributeError:
try:
split1 = _pkg_str(cpv1, eapi=eapi)
except InvalidData:
pass
split_cache[cpv1] = split1
split2 = split_cache.get(cpv2, False)
if split2 is False:
split2 = None
try:
split2 = cpv2.cpv
except AttributeError:
try:
split2 = _pkg_str(cpv2, eapi=eapi)
except InvalidData:
pass
split_cache[cpv2] = split2
if split1 is None or split2 is None or split1.cp != split2.cp:
return (cpv1 > cpv2) - (cpv1 < cpv2)
return vercmp(split1.version, split2.version)
return cmp_sort_key(cmp_cpv)
def catsplit(mydep):
return mydep.split("/", 1)
def best(mymatches, eapi=None):
"""Accepts None arguments; assumes matches are valid."""
if not mymatches:
return ""
if len(mymatches) == 1:
return mymatches[0]
bestmatch = mymatches[0]
try:
v2 = bestmatch.version
except AttributeError:
v2 = _pkg_str(bestmatch, eapi=eapi).version
for x in mymatches[1:]:
try:
v1 = x.version
except AttributeError:
v1 = _pkg_str(x, eapi=eapi).version
if vercmp(v1, v2) > 0:
bestmatch = x
v2 = v1
return bestmatch
| gpl-2.0 | -8,129,053,868,656,408,000 | 27.815275 | 152 | 0.636812 | false |
ddico/odoo | addons/base_address_city/models/res_partner.py | 3 | 3521 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from odoo import api, models, fields
from odoo.tools.translate import _
class Partner(models.Model):
_inherit = 'res.partner'
country_enforce_cities = fields.Boolean(related='country_id.enforce_cities', readonly=True)
city_id = fields.Many2one('res.city', string='City of Address')
@api.onchange('city_id')
def _onchange_city_id(self):
if self.city_id:
self.city = self.city_id.name
self.zip = self.city_id.zipcode
self.state_id = self.city_id.state_id
@api.model
def _fields_view_get_address(self, arch):
arch = super(Partner, self)._fields_view_get_address(arch)
# render the partner address accordingly to address_view_id
doc = etree.fromstring(arch)
if doc.xpath("//field[@name='city_id']"):
return arch
replacement_xml = """
<div>
<field name="country_enforce_cities" invisible="1"/>
<field name="parent_id" invisible="1"/>
<field name='city' placeholder="%(placeholder)s" class="o_address_city"
attrs="{
'invisible': [('country_enforce_cities', '=', True), '|', ('city_id', '!=', False), ('city', 'in', ['', False ])],
'readonly': [('type', '=', 'contact')%(parent_condition)s]
}"
/>
<field name='city_id' placeholder="%(placeholder)s" string="%(placeholder)s" class="o_address_city"
context="{'default_country_id': country_id,
'default_name': city,
'default_zipcode': zip,
'default_state_id': state_id}"
domain="[('country_id', '=', country_id)]"
attrs="{
'invisible': [('country_enforce_cities', '=', False)],
'readonly': [('type', '=', 'contact')%(parent_condition)s]
}"
/>
</div>
"""
replacement_data = {
'placeholder': _('City'),
}
def _arch_location(node):
in_subview = False
view_type = False
parent = node.getparent()
while parent is not None and (not view_type or not in_subview):
if parent.tag == 'field':
in_subview = True
elif parent.tag in ['list', 'tree', 'kanban', 'form']:
view_type = parent.tag
parent = parent.getparent()
return {
'view_type': view_type,
'in_subview': in_subview,
}
for city_node in doc.xpath("//field[@name='city']"):
location = _arch_location(city_node)
replacement_data['parent_condition'] = ''
if location['view_type'] == 'form' or not location['in_subview']:
replacement_data['parent_condition'] = ", ('parent_id', '!=', False)"
replacement_formatted = replacement_xml % replacement_data
for replace_node in etree.fromstring(replacement_formatted).getchildren():
city_node.addprevious(replace_node)
parent = city_node.getparent()
parent.remove(city_node)
arch = etree.tostring(doc, encoding='unicode')
return arch
| agpl-3.0 | 1,858,012,566,167,103,000 | 39.471264 | 138 | 0.510934 | false |
docwhite/appleseed | scripts/oslextractmeta.py | 1 | 16465 | #!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2015-2016 Hans Hoogenboom, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import json
import glob
import os.path
import sys
import argparse
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
import datetime
import getpass
import subprocess
FileTypes = {'.oso' : "openshadinglanguage"}
# metadata according to the OSL specification
_shaderTypes = ["surface", "displacement", "light", "volume", "shader"]
_dataTypes = ["int", "float", "point", "vector", "normal", "color", "matrix", "string", "void"]
_shaderKeys = ["name", "label", "type", "help", "url", "value", "page", "widget", "units"]
# These osl parameters are not part of the official shadinglanguage but more guidelines as how to
# make up the interface of the shader inside a 3rd party program. Not yet decided what to do with it...
#_parmWidgets = ["number", "string", "boolean", "checkBox", "popup", "mapper", "filename", "null"]
#_parmInteger = ["min", "max", "sensitivity", "slider"]
#_parmFloat = _parmInteger + ["digits"]
#_parmSlider = ["slidermin", "slidermax", "slidercenter", "sliderexponent"]
#_parmKeyword = ["output"]
#----------------------------------------------------------
# Functions to sanitize olsinfo output
#----------------------------------------------------------
def _error(msg, crash=False):
sys.stderr.write(msg)
sys.stderr.write('\n')
if crash:
sys.exit(1)
return False
def _fatalError(msg):
_error(msg,True)
def _formatVal(st):
value = st.replace('"','',2)
value = value.strip()
return value
def _getKeyValue(st):
signPos = st.index('=')
value = st[signPos+1:]
key = st[:signPos-1]
key = key.split()
key = key[-1].strip()
return (key, value)
#----------------------------------------------------------
# File handling
#----------------------------------------------------------
def isValidFile(filename, filetypes):
(head, tail) = os.path.splitext(filename)
return (os.path.isfile(filename) and tail in filetypes)
def isValidExtension(fp, filetypes):
return (os.path.splitext(fp)[1] in filetypes)
def createFileList(filetypes, osl_cfg, recursive=False, args=None, pathfile=None):
filelist = list()
# files/dirs from external file
if pathfile:
for fp in pathfile:
try:
fp = open(pathfile)
for line in fp:
filelist.append(line)
fp.close()
except:
_error("Could not read from file %s" % pathfile)
# files/dirs from command line arguments
if args:
for arg in args:
filelist.append(arg)
# files/dirs from config file
osl_dir = osl_cfg.get('settings', 'osldir')
if len(osl_dir) > 0:
osldir_list = osl_dir.split(',')
for arg in osldir_list:
filelist.append(arg)
# expand vars
args_expanded = list()
for arg in filelist:
args_expanded.append(os.path.expandvars(arg))
# clear filelist and glob
filelist = list()
for arg in args_expanded:
filelist.extend([x for x in glob.iglob(arg)])
# split files from directories
dirlist = list()
dirlist = [x for x in filelist if os.path.isdir(x)]
filelist[:] = [x for x in filelist if isValidFile(x, filetypes)]
# travel directories and add shader files to filelist
for directory in dirlist:
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
(head, tail) = os.path.splitext(filename)
if tail in filetypes:
filelist.append(os.path.join(dirpath, filename))
else:
dirpath, dirnames, filenames = next(os.walk(directory))
for filename in filenames:
(head, tail) = os.path.splitext(filename)
if tail in filetypes:
filelist.append(os.path.join(dirpath, filename))
# clear duplicate entries, do not care for order
filelist = list (set(filelist))
# if there are no files/paths quit
if len(filelist) < 1:
_fatalError("No files or directories found, exiting.")
return filelist
#----------------------------------------------------------
# Functions for parsing *.oso files
#----------------------------------------------------------
def parseOslInfo(compiledShader, osl_cfg):
oslpath = osl_cfg.get('settings', 'oslpath')
if os.path.isfile(oslpath):
cmd = str(oslpath) + ' -v %s' % compiledShader
else:
cmd = 'oslinfo -v %s' % compiledShader
cmd = cmd.split()
try:
fp = subprocess.check_output(cmd)
except subprocess.CalledProcessError as fp_ret:
_fatalError("Could not run oslinfo, exiting.\nReturncode: %s" % fp_ret.returncode)
# check if output of oslinfo is correct
# if false skip shader and write error message to console
lines = fp.splitlines()
if not lines:
_error('Missing shader definition for %s' % compiledShader)
return False
count = 0
shaderDef = lines[ count ]
args = shaderDef.split()
# tempShader stores all the data
tempShader = dict()
# store the order in which oslinfo outputs its data
# and separate the parameters from general shader data
parmlist = list()
if args[0] not in _shaderTypes:
_error("Not a valid shader type: %s" % args[0])
return False
else:
tempShader['type'] = _formatVal(args[0])
tempShader['name'] = _formatVal(args[1])
tempShader['hasMetaData'] = False
tempShader['hasParmHelp'] = False
# parse the rest of the file to get parameters
# number of entries in lines
length = len(lines) - 1
# lines iterator
count = 1
while True:
line = lines[ count ]
if not line:
_error("No more lines to read, invalid shader %s?" % compiledShader)
break
args = line.split()
# find parameter name
if args[0] not in ["Default", "metadata:"]: # or args[0] == "export":
tempparm = dict()
if len(args) < 3:
tempparm['name'] = _formatVal(args[0])
tempparm['type'] = _formatVal(args[1])
else:
tempparm['output'] = True
tempparm['name'] = _formatVal(args[0])
tempparm['type'] = _formatVal(args[2])
condition = True
widget = str()
while condition:
# read next line
count += 1
if count > length:
break
line = lines[count]
parmargs = line.split()
if parmargs[0] == "Default":
tempparm['value'] = _formatVal(' '.join(parmargs[2:]))
elif parmargs[0] == "metadata:":
(key, value) = _getKeyValue(line)
value = _formatVal(value)
if key != 'widget':
tempparm[key] = value
else:
widget = value
else:
condition = False
# move one line back
count -= 1
if len(widget) > 0 and 'widget' not in tempparm:
tempparm['widget'] = widget
tempShader[tempparm['name']] = tempparm
parmlist.append(tempparm['name'])
if 'help' in tempparm:
tempShader['hasParmHelp'] = True
# we didn't find a parameter yet, so there must be some general stuff
else:
if args[0] == "metadata:":
(key, value) = _getKeyValue(line)
value = _formatVal(value)
tempparm[key] = value
tempShader['hasMetaData'] = True
if count > length:
break
else:
count += 1
# parsed all lines
tempShader['parmlist'] = parmlist
return tempShader
def parseShaderInfo(compiledShader, FileTypes, osl_cfg):
(name, extension) = os.path.splitext(compiledShader)
shaderUI = None
if extension == '.oso':
shaderUI = parseOslInfo(compiledShader, osl_cfg)
if not shaderUI:
_error("Could not process %s" % compiledShader)
return None
else:
compShader = dict()
compShader['name'] = shaderUI['name']
compShader['path'] = compiledShader
compShader['mtime'] = str(os.path.getmtime(compiledShader))
compShader['ctime'] = str(datetime.datetime.now())
compShader['language']= FileTypes[extension]
# holds the output of parseOslInfo (the actual shader metadata/ui)
compShader['ui'] = shaderUI
return compShader
#----------------------------------------------------------
# Functions for handling the shader dictionary
#----------------------------------------------------------
def getNumberOfShaders(jsonFile):
return len(jsonFile['shaders'])
def cleanJsonShaders(jsonDict):
num_del = 0
for shaderpath in jsonDict.keys():
if not os.path.isfile(shaderpath):
del jsonDict[shaderpath]
num_del += 1
return (num_del, jsonDict)
def existsJsonShader(jsonFile, shaderName):
for shader in jsonFile['shaders']:
if shader['name'] == shaderName:
return True
else:
return False
def writeJsonHeader(filename, numElements):
headerDict = dict()
headerDict['creator'] = getpass.getuser()
headerDict['creation date'] = str(datetime.datetime.now())
headerDict['name'] = os.path.basename(filename)
headerDict['elements'] = numElements
headerDict['last update'] = str(datetime.datetime.now())
return headerDict
def updateJsonHeader(jsonFile, numElements):
headerDict = jsonFile
headerDict['last update'] = str(datetime.datetime.now())
headerDict['elements'] = numElements
return headerDict
def cli():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description = '''
oslextractmetadata stores the user interface and metadata of a
compiled OSL (openshadinglanguage) shader(s) into a JSON file.
The JSON dictionary consists of a 'header' and a 'shader' part.
jsondict['shader'] will return a dictionary with all shaders. The
user interface of the shader is stored as a sub-dictionary, the
metadata can be retrieved using the 'ui' key on the elements, e.g.:
for x in jsondict['shaders'].values():
print x['ui']
''')
parser.add_argument('-i', '--input', nargs='+', action='store', dest='files', metavar='compiled shaders',help='List of file(s) to parse.')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbosity', help='Increase output verbosity.')
parser.add_argument('-o', '--output', nargs=1, action='store', dest='output', required=True, metavar='output file', help="Store shader UI in file.")
parser.add_argument('-f', '--file', nargs='+', action='store', dest='read_file', metavar='file', help="Read file paths from file(s).")
parser.add_argument('-U', '--update', action='store_true', dest='update', help="Update existing shader file.")
parser.add_argument('-O', '--overwrite', action='store_true', dest='overwrite', help="Overwrite existing files.")
parser.add_argument('-c', '--clean', action='store_true', dest='clean', help="Clean file, remove non existant shaders.")
parser.add_argument('-r', '--recursive', action='store_true', dest='recursive', help="Add directories recursively.")
args = parser.parse_args()
# user input checks
output = args.output[0]
existingFile = os.path.exists(output)
if not existingFile:
args.overwrite = False
args.update = False
args.clean = False
if args.overwrite:
args.update = False
args.clean = False
return (args, output, existingFile)
#----------------------------------------------------------
# Main body
#----------------------------------------------------------
def main():
(args, output, existingFile) = cli()
# read configuration file
cfg_defaults = {'oslpath' : '/usr/bin/oslinfo'}
osl_cfg = ConfigParser(cfg_defaults)
osl_cfg.read('oslextractmeta.conf')
# create list of files specified on cli or read from file
files = createFileList(FileTypes, osl_cfg, args.recursive, args.files, args.read_file)
# parse files for shader metadata
shaders = dict()
for shaderfile in files:
if args.verbosity:
print("Processing file %s" % shaderfile)
shaderUI = parseShaderInfo(shaderfile, FileTypes, osl_cfg)
if shaderUI:
shaders[shaderUI['path']] = shaderUI
jsonDict = dict()
# retrieve existing values in case of updating or cleaning
if existingFile and not args.overwrite:
with open(output, 'r') as fp:
try:
jsonDict = json.load(fp)
except:
_fatalError("JSON object could not be decoded.")
# create/update/clean json shader and header dictionaries
changes = 0
if args.clean:
(changes, jsonDict['shaders']) = cleanJsonShaders(jsonDict['shaders'])
if args.verbosity:
print("Removed %s shaders." % changes)
if args.update:
changes = len(shaders)
jsonDict['shaders'].update(shaders)
if args.verbosity:
print("%s shaders updated." % changes)
if args.overwrite:
changes = len(shaders)
jsonDict['header'] = writeJsonHeader(output, changes)
jsonDict['shaders'] = shaders
if args.verbosity:
print("%s shaders added to %s" % (changes, output))
# only adding new shaders
else:
temp_changes = changes
if jsonDict.has_key('shaders'):
existing_keys = jsonDict['shaders'].keys()
for key in shaders:
if key not in existing_keys:
jsonDict['shaders'][key] = shaders[key]
changes += 1
else:
jsonDict['shaders'] = shaders
changes = len(shaders)
if args.verbosity:
added_shaders = changes - temp_changes
print("Added %s shaders." % added_shaders)
# write to file shaders to file if changed
if existingFile and changes:
with open(output, 'w') as fp:
fp.seek(0)
fp.truncate()
jsonDict['header'] = updateJsonHeader(jsonDict['header'], len(jsonDict['shaders']))
json.dump(jsonDict, fp)
elif not existingFile and changes:
with open(output, 'w') as fp:
jsonDict['header'] = writeJsonHeader(output, len(shaders))
json.dump(jsonDict, fp)
elif args.verbosity:
print("No shaders found for adding to %s, exiting." % output)
return 0
# call main function
if __name__ == "__main__":
main()
| mit | -7,828,603,925,393,885,000 | 34.949782 | 152 | 0.586456 | false |
Neurosim-lab/netpyne | doc/build.py | 1 | 4489 | """
This is a script to build the NetPyNE html documentation
All steps should be executed from netpyne/doc
The following are required:
1) Sphinx documentation generator: https://www.sphinx-doc.org/en/master/
2) Sphinx RTD Theme
3) Autodoc summary table: https://autodocsumm.readthedocs.io/en/latest/index.html
4) Wheel packager: https://wheel.readthedocs.io/en/stable/
5) Twine packager: https://twine.readthedocs.io/en/latest/
Which can be installed with:
python3 -m pip install -U sphinx
python3 -m pip install -U sphinx_rtd_theme
python3 -m pip install -U autodocsumm
python3 -m pip install -U wheel
python3 -m pip install -U twine
Here are the steps to release a new version of NetPyNE
(step 10 is completed by executing this file):
1) Go through Pull Requests and merge acceptable ones into Development
2) Ensure all tests pass after the last commit
3) Ensure all new features are described in the documentation
4) Update CHANGES.md
5) Update the __init__.py version number
6) Update the version number in the Sphinx documentation (netpyne/doc/source/conf.py)
7) Commit with the message “VERSION #.#.#”
8) Create a Pull Request from Development to Master
8a) Title it “PR from development to master - VERSION #.#.#”
8b) Ensure the Pull Request passes all tests
8c) Merge the Pull Request
9) Start a new Release on GitHub
9a) Title it and tag it “v#.#.#”
9b) Copy the text in CHANGES.md into the Release description
9c) Publish the Release
10) Rebuild the documentation (execute build.py to accomplish these steps)
10a) It will delete the old build directory
10b) It will delete any old .rst files except those listed
10c) It will generate new .rst files for the API (package index)
10d) It will fix the Package Index file
10e) It will build the html files
11) Post the documentation
11a) ssh [email protected] "rm -r ~/public_html"
11b) scp -r build [email protected]:///home/gkaue9v7ctjf/public_html
11c) ssh [email protected] "cp -r ~/redirect_html/. ~/public_html/"
12) Update PYPI (pip) with the latest release
12a) cd netpyne
12b) python3 setup.py bdist_wheel --universal
12c) python setup.py upload_via_twine
Username: salvadord
13) Announce the new release
13a) New release announcement text:
NetPyNE v#.#.# is now available. For a complete list of changes and bug fixes see: https://github.com/Neurosim-lab/netpyne/releases/tag/v#.#.#
See here for instructions to install or update to the latest version: http://www.netpyne.org/install.html
13b) Announce on NEURON forum:
https://www.neuron.yale.edu/phpBB/viewtopic.php?f=45&t=3685&sid=9c380fe3a835babd47148c81ae71343e
13c) Announce to Google group:
https://groups.google.com/forum/#!forum/netpyne-mailing
13d) Announce on Slack in #netpyne channel
13e) Announce on Twitter
Username: _netpyne_
14) Bask in the glory that is NetPyNE
"""
import shutil
import os
# Delete the build directory to start with a blank slate
print('Deleting build directory.')
shutil.rmtree('build', ignore_errors=True)
# All .rst files but those listed here will be deleted during this process
keep = ['about.rst', 'advanced.rst', 'index.rst', 'install.rst', 'reference.rst', 'tutorial.rst']
print('Deleting old .rst files.')
for file in os.listdir('source'):
if file.endswith('.rst') and file not in keep:
os.remove(os.path.join('source', file))
# Generate new .rst files for the API (package index)
print('Generating new index .rst files.')
os.system('sphinx-apidoc -f -e -M -T --templatedir=source/apidoc/ -o source/ ../netpyne')
# -f -- force overwriting
# -e -- put each module documentation on its own page
# -M -- put module documentation before submodules
# -T -- do not create a table of contents file
# --templatedir=source/apidoc/ -- use our custom templates
# -o source/ -- where to put the output
# ../netpyne -- the module we want to document
# sphinx-apidoc produces a file called "netpyne" that we want to call "Package Index"
print('Fixing Package Index file.')
os.system('mv source/netpyne.rst source/package_index.rst')
with open('source/package_index.rst') as f:
lines = f.readlines()
lines[0] = 'Package Index\n'
lines[1] = '=============\n'
with open('source/package_index.rst', 'w') as f:
f.writelines(lines)
# Generate the html files
print('Building html files.')
os.system('sphinx-build source ./build')
| mit | 3,677,396,400,044,773,400 | 41.638095 | 151 | 0.722359 | false |
h-mayorquin/g_node_data_analysis_205 | 4_day/parameter_search.py | 1 | 1085 | import numpy as np
from load_data import X, Y, Xtest
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from scipy.io import savemat
from sklearn.cross_validation import StratifiedKFold
def return_best_model(X, Y, N, C, penalties):
"""
Returns the best model for X data and Y targets
"""
skf = StratifiedKFold(Y, N)
# We define the logistic regression
lg = LogisticRegression()
param_grid = [{'C': C, 'penalty': penalties}]
rsearch = GridSearchCV(estimator=lg, param_grid=param_grid, cv=skf)
rsearch.fit(X, Y)
g
return rsearch.best_estimator_, rsearch.best_score_, rsearch
Y = Y.astype('int')
C = np.arange(0.05, 2, 0.05)
C = C.tolist()
penalties = ['l1', 'l2']
N = 4
skf = StratifiedKFold(Y, N)
# We define the logistic regression
lg = LogisticRegression()
param_grid = [{'C': C, 'penalty': penalties}]
x, y, rsearch = return_best_model(X, Y, 3, C, penalties)
print y
Ydata = rsearch.predict(Xtest)
dict_to_save = {'Y': Ydata}
file = 'ramon_labels.mat'
savemat(file, dict_to_save)
| bsd-2-clause | 7,063,958,237,785,864,000 | 22.586957 | 71 | 0.685714 | false |
onexeno/plow | lib/python/plow/gui/panels/nodes.py | 1 | 8167 | import os
import logging
from datetime import datetime
from functools import partial
import plow.client
from plow.gui import constants
from plow.gui.manifest import QtCore, QtGui
from plow.gui.panels import Panel
from plow.gui.event import EventManager
from plow.gui.common import models
from plow.gui.common.widgets import TableWidget, ResourceDelegate
from plow.gui.util import formatDuration
NODE_STATES = {}
for a in dir(plow.client.NodeState):
if a.startswith('_'):
continue
val = getattr(plow.client.NodeState, a)
NODE_STATES[val] = a
ObjectRole = QtCore.Qt.UserRole + 1
LOGGER = logging.getLogger(__name__)
class NodePanel(Panel):
def __init__(self, name="Nodes", parent=None):
Panel.__init__(self, name, "Nodes", parent)
self.setAttr("refreshSeconds", 10)
self.setWidget(NodeWidget(self.attrs, self))
self.setWindowTitle(name)
def init(self):
titleBar = self.titleBarWidget()
titleBar.addAction(QtGui.QIcon(":/images/locked.png"),
"Lock Selected Clusters",
partial(self.__setNodesLocked, True))
titleBar.addAction(QtGui.QIcon(":/images/unlocked.png"),
"Unlock Selected Clusters",
partial(self.__setNodesLocked, False))
def refresh(self):
self.widget().refresh()
def __setNodesLocked(self, locked):
try:
for node in self.widget().getSelectedNodes():
node.lock(locked)
finally:
self.refresh()
class NodeWidget(QtGui.QWidget):
def __init__(self, attrs, parent=None):
super(NodeWidget, self).__init__(parent)
self.__attrs = attrs
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(4,0,4,4)
self.__model = model = NodeModel(self)
self.__proxy = proxy = models.AlnumSortProxyModel(self)
proxy.setSourceModel(model)
self.__view = view = TableWidget(self)
view.setModel(proxy)
view.sortByColumn(0, QtCore.Qt.AscendingOrder)
layout.addWidget(view)
view.setColumnWidth(0, 150)
view.setColumnWidth(model.HEADERS.index('Locked'), 60)
view.setColumnWidth(model.HEADERS.index('Cores (Total)'), 90)
view.setColumnWidth(model.HEADERS.index('Cores (Idle)'), 90)
view.setColumnHidden(model.HEADERS.index('Ram (Total)'), True)
view.setColumnHidden(model.HEADERS.index('Swap (Total)'), True)
view.setItemDelegateForColumn(model.HEADERS.index('Ram (Free)'),
ResourceDelegate(parent=self))
view.setItemDelegateForColumn(model.HEADERS.index('Swap (Free)'),
ResourceDelegate(warn=.75, critical=.25, parent=self))
view.doubleClicked.connect(self.__itemDoubleClicked)
def model(self):
return self.proxyModel().sourceModel()
def setModel(self, model):
try:
self.proxyModel().sourceModel().deleteLater()
except:
pass
self.proxyModel().setSourceModel(model)
def refresh(self):
self.__model.refresh()
def getSelectedNodes(self):
rows = self.__view.selectionModel().selectedRows()
return [index.data(ObjectRole) for index in rows]
def __itemDoubleClicked(self, index):
uid = index.data(ObjectRole).id
EventManager.emit("NODE_OF_INTEREST", uid)
class NodeModel(QtCore.QAbstractTableModel):
HEADERS = [
"Name", "Cluster",
"State", "Locked", "Cores (Total)", "Cores (Idle)",
"Ram (Total)", "Ram (Free)", "Swap (Total)",
"Swap (Free)", "Ping", "Uptime"
]
HEADER_CALLBACKS = {
0 : lambda n: n.name,
1 : lambda n: n.clusterName,
2 : lambda n: NODE_STATES.get(n.state, ''),
3 : lambda n: str(bool(n.locked)),
4 : lambda n: n.totalCores,
5 : lambda n: n.idleCores,
6 : lambda n: n.system.totalRamMb,
7 : lambda n: n.system.freeRamMb,
8 : lambda n: n.system.totalSwapMb,
9 : lambda n: n.system.freeSwapMb,
10: lambda n: formatDuration(n.updatedTime),
11: lambda n: formatDuration(n.bootTime),
}
def __init__(self, parent=None):
super(NodeModel, self).__init__(parent)
self.__items = []
def hasChildren(self, parent):
return False
def reload(self):
nodes = plow.client.get_nodes()
self.setNodeList(nodes)
def refresh(self):
if not self.__items:
self.reload()
return
rows = self.__index
colCount = self.columnCount()
parent = QtCore.QModelIndex()
nodes = plow.client.get_nodes()
nodes_ids = set()
to_add = set()
# Update
for node in nodes:
nodes_ids.add(node.id)
if node.id in self.__index:
row = rows[node.id]
self.__items[row] = node
start = self.index(row, 0)
end = self.index(row, colCount-1)
self.dataChanged.emit(start, end)
LOGGER.debug("updating %s %s", node.id, node.name)
else:
to_add.add(node)
# Add new
if to_add:
size = len(to_add)
start = len(self.__items)
end = start + size - 1
self.beginInsertRows(parent, start, end)
self.__items.extend(to_add)
self.endInsertRows()
LOGGER.debug("adding %d new nodes", size)
# Remove
to_remove = set(self.__index.iterkeys()).difference(nodes_ids)
for row, old_id in sorted(((rows[old_id], old_id) for old_id in to_remove), reverse=True):
self.beginRemoveRows(parent, row, row)
node = self.__items.pop(row)
self.endRemoveRows()
LOGGER.debug("removing %s %s", old_id, node.name)
self.__index = dict((n.id, row) for row, n in enumerate(self.__items))
def rowCount(self, parent):
return len(self.__items)
def columnCount(self, parent=None):
return len(self.HEADERS)
def data(self, index, role):
if not index.isValid():
return
row = index.row()
col = index.column()
node = self.__items[row]
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.ToolTipRole:
return self.HEADER_CALLBACKS[col](node)
elif role == QtCore.Qt.UserRole:
if col == 7:
return node.system.freeRamMb / float(node.system.totalRamMb)
elif col == 9:
return node.system.freeSwapMb / float(node.system.totalSwapMb)
else:
return self.HEADER_CALLBACKS[col](node)
elif role == QtCore.Qt.TextAlignmentRole:
if col != 0:
return QtCore.Qt.AlignCenter
elif role == QtCore.Qt.BackgroundRole:
if node.state == plow.client.NodeState.DOWN:
return constants.RED
if node.locked:
return constants.BLUE
return None
elif role == ObjectRole:
return node
def headerData(self, section, orientation, role):
if role == QtCore.Qt.TextAlignmentRole:
if section == 0:
return QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
else:
return QtCore.Qt.AlignCenter
if role != QtCore.Qt.DisplayRole:
return None
if orientation == QtCore.Qt.Vertical:
return section
return self.HEADERS[section]
def nodeFromIndex(self, idx):
if not idx.isValid():
return None
node = self.__items[idx.row()]
return node
def setNodeList(self, nodeList):
self.beginResetModel()
self.__items = nodeList
self.__index = dict((n.id, row) for row, n in enumerate(nodeList))
self.endResetModel()
| apache-2.0 | -7,333,923,241,697,357,000 | 29.588015 | 98 | 0.566548 | false |
Azure/azure-sdk-for-python | sdk/storagepool/azure-mgmt-storagepool/azure/mgmt/storagepool/operations/_operations.py | 1 | 4855 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~storage_pool_management.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.StoragePoolOperationListResult"]
"""Gets a list of StoragePool operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StoragePoolOperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~storage_pool_management.models.StoragePoolOperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StoragePoolOperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('StoragePoolOperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.StoragePool/operations'} # type: ignore
| mit | 8,943,583,677,242,620,000 | 43.136364 | 133 | 0.644284 | false |
CARocha/estudiocafe | estudiocafe/settings.py | 1 | 4877 | # Django settings for estudiocafe project.
from local_settings import *
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.environ.get('MEDIA_ROOT',os.path.join(SITE_ROOT, 'media'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.environ.get('STATIC_ROOT',os.path.join(SITE_ROOT, 'static'))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths
os.path.join(SITE_ROOT,'static_media/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&sh49-5ri=6y8z%f(^i2_e@+2-^6(_)$!%6n!=ip2&_3!^k0^c'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
ROOT_URLCONF = 'estudiocafe.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'estudiocafe.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT,'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'encuesta',
'produccion_finca',
'vulnerabilidades_finca',
'produccion_cafe_finca',
'roya',
'lugar',
'south',
'geoposition',
'sorl.thumbnail',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | 4,594,011,873,983,549,000 | 33.104895 | 88 | 0.697765 | false |
spuddybuddy/dotfiles | bin/chrome.py | 1 | 1414 | #!/usr/bin/python3
import getopt
import os
import sys
import chrome_common
# Where the Chrome binary lives
CHROME_PATHS = {
"stable": "/opt/google/chrome",
"beta": "/opt/google/chrome-beta",
"dev": "/opt/google/chrome-unstable"
}
# Where Chrome profile data lives, relative to $HOME
CHROME_USER_DIRS = {
"stable": ".config/google-chrome",
"beta": ".config/google-chrome-beta",
"dev": ".config/google-chrome-unstable"
}
def RunChromeChannel(channel, extra_args):
home = os.getenv("HOME")
chrome_common.RunChrome(os.path.join(CHROME_PATHS[channel], "chrome"),
channel,
chrome_common.CHROME_FEATURES,
os.path.join(home, CHROME_USER_DIRS[channel]),
[
"--enable-logging",
"--also-log-to-stderr",
"--no-proxy-server",
"--show-component-extension-options",
],
extra_args)
def main(argv):
try:
channel = "stable"
opts, extra_args = getopt.getopt(argv,"c:",["channel="])
for opt, value in opts:
if opt in ("-c", "--channel"):
channel = value
if channel not in CHROME_PATHS.keys():
raise getopt.GetoptError
RunChromeChannel(channel, extra_args)
except getopt.GetoptError:
print (sys.argv[0], " [-c [stable|beta|dev]]")
sys.exit(2)
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | 6,546,616,115,252,384,000 | 24.25 | 72 | 0.582744 | false |
freaxmind/miage-m1 | csi/FreeMAP/utilisateurs/views.py | 1 | 1863 | # -*- coding: utf-8 -*-
from django.shortcuts import render_to_response, redirect
from django.template.context import RequestContext
def index(request):
from gestion.models import Constitution, TypeProduit
from datetime import datetime
week_number = datetime.today().isocalendar()[1]
constitutions = Constitution.par_periode(week_number)
produits = []
for c in constitutions:
type_produit = TypeProduit.objects.get(pk=c.type_produit)
produits.append([type_produit, c.quantite])
var = {'produits': produits,}
return render_to_response('index.html', var,
context_instance=RequestContext(request))
def bravo(request):
return render_to_response('bravo.html',
context_instance=RequestContext(request))
def abonnement(request):
from utilisateurs.forms import AbonnementForm
from gestion.models import Panier, Abonnement, Client
from datetime import datetime
week_number = datetime.today().isocalendar()[1]
nb_paniers, trimestre = Panier.get_nb_by_periode(week_number)
if request.method == 'POST':
form = AbonnementForm(request.POST)
if form.is_valid():
paniers_par_semaine = form.cleaned_data['paniers']
jour = form.cleaned_data['jour']
client = form.save()
abonnement = Abonnement()
abonnement.trimestre = trimestre
abonnement.client = client
abonnement.paniers = paniers_par_semaine
abonnement.jour = jour
abonnement.save()
return bravo(request)
else:
form = AbonnementForm()
var = {'form': form, 'nb': nb_paniers+1, 'trimestre': trimestre}
return render_to_response('abonnement.html', var,
context_instance=RequestContext(request))
| gpl-3.0 | 8,481,809,654,422,840,000 | 33.5 | 71 | 0.640365 | false |
blechta/fenapack | fenapack/_field_split_utils.py | 1 | 2709 | # Copyright (C) 2015-2017 Jan Blechta
#
# This file is part of FENaPack.
#
# FENaPack is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FENaPack is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
"""Compiled extensions for fieldsplit modules"""
from dolfin import compile_cpp_code
import petsc4py
import os
__all__ = ['dofmap_dofs_is', 'SubfieldBC']
dofmap_dofs_is_cpp_code = """
#include <pybind11/pybind11.h>
#include <pybind11/eigen.h>
namespace py = pybind11;
#include <vector>
#include <petscis.h>
#include <dolfin/fem/GenericDofMap.h>
#include <dolfin/la/PETScObject.h>
#include <dolfin/log/log.h>
#include "petsc_casters.h"
IS dofmap_dofs_is(const dolfin::GenericDofMap& dofmap)
{
PetscErrorCode ierr;
const std::vector<dolfin::la_index> dofs = dofmap.dofs();
IS is;
dolfin_assert(dofmap.index_map());
ierr = ISCreateGeneral(dofmap.index_map()->mpi_comm(), dofs.size(),
dofs.data(), PETSC_COPY_VALUES, &is);
if (ierr != 0)
dolfin::PETScObject::petsc_error(ierr, "field_split.py", "ISCreateGeneral");
return is;
}
PYBIND11_MODULE(SIGNATURE, m)
{
m.def("dofmap_dofs_is", &dofmap_dofs_is);
}
namespace pybind11
{
namespace detail
{
PETSC_CASTER_MACRO(IS, is);
}
}
"""
# Load and wrap compiled function dofmap_dofs_is
path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
module_dofs = compile_cpp_code(dofmap_dofs_is_cpp_code,
include_dirs=[path, petsc4py.get_include()])
def dofmap_dofs_is(dofmap):
"""Converts DofMap::dofs() to IS.
This function is intended to circumvent NumPy which would be
involved in code like::
iset = PETSc.IS().createGeneral(dofmap.dofs(),
comm=dofmap.index_map().mpi_comm())
"""
iset = module_dofs.dofmap_dofs_is(dofmap)
iset.decRef()
assert iset.getRefCount() == 1
return iset
dofmap_dofs_is.__doc__ += module_dofs.dofmap_dofs_is.__doc__
# Load compiled class SubfieldBC
path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
code = open(os.path.join(path, "SubfieldBC.h")).read()
module_bc = compile_cpp_code(code, include_dirs=[path, petsc4py.get_include()])
SubfieldBC = module_bc.SubfieldBC
| lgpl-3.0 | 1,074,199,007,870,595,700 | 29.1 | 80 | 0.688077 | false |
marrow/interface | test/test_schema.py | 1 | 7649 | # encoding: utf-8
"""Test the schema objects."""
from unittest import TestCase
from marrow.interface import schema
class TestAttributeSuccesses(TestCase):
foo = 27
basic = schema.Attribute()
basic.__name__ = 'basic'
value = schema.Attribute(value=27)
value.__name__ = 'foo'
exact = schema.Attribute()
exact.__name__ = 'exact'
exact.exact = exact
validator = schema.Attribute(validator=lambda v: 20 < v < 30)
validator.__name__ = 'foo'
def test_basic_repr(self):
self.assertEqual(repr(self.basic), "Attribute(basic)")
def test_basic_call(self):
self.assertTrue(self.basic(self))
def test_value_call(self):
self.assertTrue(self.value(self))
def test_exact_call(self):
self.assertTrue(self.exact(self))
def test_validator_call(self):
self.assertTrue(self.validator(self))
class TestAttributeFailure(TestCase):
foo = 42
basic = schema.Attribute()
basic.__name__ = 'bar'
value = schema.Attribute(value=27)
value.__name__ = 'foo'
exact = schema.Attribute()
exact.__name__ = 'exact'
exact.exact = None
validator = schema.Attribute(validator=lambda v: 20 < v < 30)
validator.__name__ = 'foo'
def test_basic_call(self):
self.assertFalse(self.basic(self))
def test_value_call(self):
self.assertFalse(self.value(self))
def test_exact_call(self):
self.assertFalse(self.exact(self))
def test_validator_call(self):
self.assertFalse(self.validator(self))
class TestProperty(TestCase):
foo = 27
bar = "baz"
good = schema.Property(type=int)
good.__name__ = 'foo'
bad = schema.Property(type=int)
bad.__name__ = 'bar'
def test_property_success(self):
self.assertTrue(self.good(self))
def test_property_failure(self):
self.assertFalse(self.bad(self))
class TestClassProperty(TestCase):
foo = 27
good = schema.ClassProperty()
good.__name__ = 'foo'
bad = schema.ClassProperty()
bad.__name__ = 'bar'
def __init__(self, *args, **kw):
super(TestClassProperty, self).__init__(*args, **kw)
self.bar = 42
def test_class_property_success(self):
self.assertTrue(self.good(self))
def test_class_property_failure(self):
self.assertFalse(self.bad(self))
class TestInstanceProperty(TestCase):
foo = 27
bar = 42
good1 = schema.InstanceProperty()
good1.__name__ = 'bar'
good2 = schema.InstanceProperty()
good2.__name__ = 'baz'
bad = schema.InstanceProperty()
bad.__name__ = 'foo'
def __init__(self, *args, **kw):
super(TestInstanceProperty, self).__init__(*args, **kw)
self.bar = 27
self.baz = 42
def test_instance_property_override_success(self):
self.assertTrue(self.good1(self))
def test_instance_property_unique_success(self):
self.assertTrue(self.good2(self))
def test_instance_property_failure(self):
self.assertFalse(self.bad(self))
class BaseCallables:
foo = "foo"
def callable1(self, arg1, arg2=None):
pass
@classmethod
def callable2(cls, *args, **kw):
pass
@staticmethod
def callable3():
pass
class TestCallableBasics(TestCase, BaseCallables):
good = schema.Callable()
good.__name__ = 'callable1'
bad = schema.Callable()
bad.__name__ = 'foo'
notdictionary = object()
error = schema.Callable()
error.__name__ = '__getitem__'
def test_callable_base_success(self):
self.assertTrue(self.good(self))
def test_callable_base_failure(self):
self.assertFalse(self.bad(self))
def test_callable_introspect_fail(self):
self.assertFalse(self.error(self.notdictionary))
class TestCallableArgspecSuccess(TestCase, BaseCallables):
# like=None, args=None, optional=None, names=None, vargs=None, kwargs=None
args = schema.Callable(args=1)
optional = schema.Callable(optional=1)
names = schema.Callable(names=('arg1', 'arg2'))
args.__name__ = optional.__name__ = names.__name__ = 'callable1'
args.skip = optional.skip = names.skip = 1
vargs = schema.Callable(vargs=True)
kwargs = schema.Callable(kwargs=True)
vargs.__name__ = kwargs.__name__ = 'callable2'
vargs.skip = kwargs.skip = 1
like_basic = schema.Callable(like=BaseCallables.callable1)
like_basic.__name__ = 'callable1'
like_variable = schema.Callable(like=BaseCallables.callable2)
like_variable.__name__ = 'callable2'
like_override = schema.Callable(like=BaseCallables.callable1, args=2)
like_override.__name__ = 'callable1'
def test_callable_args(self):
self.assertTrue(self.args(self))
def test_callable_optional(self):
self.assertTrue(self.optional(self))
def test_callable_names(self):
self.assertTrue(self.names(self))
def test_callable_vargs(self):
self.assertTrue(self.vargs(self))
def test_callable_kwargs(self):
self.assertTrue(self.kwargs(self))
def test_callable_like_basic(self):
self.assertTrue(self.like_basic(self))
def test_callable_like_variable(self):
self.assertTrue(self.like_variable(self))
def test_callable_like_override(self):
self.assertTrue(self.like_override(self))
class TestCallableArgspecFailures(TestCase, BaseCallables):
# like=None, args=None, optional=None, names=None, vargs=None, kwargs=None
args = schema.Callable(args=1)
optional = schema.Callable(optional=1)
names = schema.Callable(names=('arg1', 'arg2'))
args.__name__ = optional.__name__ = names.__name__ = 'callable2'
args.skip = optional.skip = names.skip = 1
vargs = schema.Callable(vargs=True)
kwargs = schema.Callable(kwargs=True)
vargs.__name__ = kwargs.__name__ = 'callable1'
vargs.skip = kwargs.skip = 1
like_basic = schema.Callable(like=BaseCallables.callable1)
like_basic.__name__ = 'callable2'
like_variable = schema.Callable(like=BaseCallables.callable2)
like_variable.__name__ = 'callable1'
def test_callable_args(self):
self.assertFalse(self.args(self))
def test_callable_optional(self):
self.assertFalse(self.optional(self))
def test_callable_names(self):
self.assertFalse(self.names(self))
def test_callable_vargs(self):
self.assertFalse(self.vargs(self))
def test_callable_kwargs(self):
self.assertFalse(self.kwargs(self))
def test_callable_like_basic(self):
self.assertFalse(self.like_basic(self))
def test_callable_like_variable(self):
self.assertFalse(self.like_variable(self))
class TestMethod(TestCase, BaseCallables):
good1 = schema.Method()
good1.__name__ = 'callable1'
good2 = schema.Method()
good2.__name__ = 'callable1'
bad = schema.Method()
bad.__name__ = 'callable3'
def test_method_success(self):
self.assertTrue(self.good1(self))
def test_class_method_success(self):
self.assertTrue(self.good2(self))
def test_method_failure(self):
self.assertFalse(self.bad(self))
class TestClassMethod(TestCase, BaseCallables):
good = schema.ClassMethod()
good.__name__ = 'callable2'
bad1 = schema.ClassMethod()
bad1.__name__ = 'callable1'
bad2 = schema.ClassMethod()
bad2.__name__ = 'callable3'
def test_class_method_success(self):
self.assertTrue(self.good(self))
def test_method_failure(self):
self.assertFalse(self.bad1(self))
def test_static_method_failure(self):
self.assertFalse(self.bad2(self))
class TestStaticMethod(TestCase, BaseCallables):
good = schema.StaticMethod()
good.__name__ = 'callable3'
bad1 = schema.StaticMethod()
bad1.__name__ = 'callable1'
bad2 = schema.StaticMethod()
bad2.__name__ = 'callable2'
invalid = schema.StaticMethod(args=1)
invalid.__name__ = 'callable3'
def test_static_method_success(self):
self.assertTrue(self.good(self))
def test_method_failure(self):
self.assertFalse(self.bad1(self))
def test_class_method_failure(self):
self.assertFalse(self.bad2(self))
def test_static_method_parent_failure(self):
self.assertFalse(self.invalid(self))
| mit | -6,076,662,451,609,924,000 | 22.391437 | 75 | 0.702576 | false |
CurryBoy/ProtoML-Deprecated | protoml/feature/tests/test_feature.py | 1 | 4375 | try:
from ..feature import Feature
except:
import imp
Feature = imp.load_source("feature", "../feature.py").Feature
import numpy as np
import pandas as pd
NUM_RANDOM_TESTS = 10
ROWS = 5
COLUMNS = 5
def test_as_matrix():
df = np.arange(ROWS * COLUMNS).reshape(ROWS, COLUMNS)
foo = Feature()
foo._data_frame = pd.DataFrame(df)
assert np.all(foo.as_matrix() == df)
def test_random_as_matrix():
for count in range(NUM_RANDOM_TESTS):
df = np.random.randn(ROWS, COLUMNS)
foo = Feature()
foo._data_frame = pd.DataFrame(df)
assert np.all(foo.as_matrix() == df)
def compare_data_frames(df1, df2):
return np.all(df1.as_matrix() == df2.as_matrix())
def test_add_data():
df = pd.DataFrame(np.arange(ROWS * COLUMNS).reshape(ROWS, COLUMNS))
foo = Feature()
foo.add_data(df)
assert compare_data_frames(foo, df)
foo.add_data()
assert compare_data_frames(foo, df)
foo.add_data([])
assert compare_data_frames(foo, df)
foo.add_data(None)
assert compare_data_frames(foo, df)
foo.add_data(df)
assert not compare_data_frames(foo, df)
def test_random_add_data():
for count in range(NUM_RANDOM_TESTS):
df = pd.DataFrame(np.random.randn(ROWS, COLUMNS))
foo = Feature()
foo.add_data(df)
assert compare_data_frames(foo, df)
foo.add_data()
assert compare_data_frames(foo, df)
foo.add_data([])
assert compare_data_frames(foo, df)
foo.add_data(None)
assert compare_data_frames(foo, df)
foo.add_data(df)
assert not compare_data_frames(foo, df)
def make_feature(random=True):
if random:
temp = np.random.randn(10, 4)
else:
temp = np.arange(ROWS * COLUMNS).reshape(ROWS, COLUMNS)
foo = Feature()
foo.add_data(temp)
return foo
def test__data_frame(random=False):
df = make_feature(random)
foo = Feature()
foo.add_data(df)
assert compare_data_frames(foo._data_frame, df)
foo.add_data()
assert compare_data_frames(foo._data_frame, df)
foo.add_data([])
assert compare_data_frames(foo._data_frame, df)
foo.add_data(None)
assert compare_data_frames(foo._data_frame, df)
foo.add_data(df)
assert not compare_data_frames(foo._data_frame, df)
def test_random__data_frame():
for count in range(NUM_RANDOM_TESTS):
test__data_frame(True)
def test___init__(random=False):
df = make_feature(random)
foo = Feature()
foo.add_data(df)
foo2 = Feature(df)
assert compare_data_frames(foo, foo2)
# test get item here
def test_rename(random=False):
df = make_feature(random)
foo = Feature()
foo.add_data(df)
assert compare_data_frames(foo, df)
assert compare_data_frames(foo["\\d"], df)
foo.rename("\\d", "boo")
assert compare_data_frames(foo["boo"], df)
assert compare_data_frames(foo["bo"], df)
assert compare_data_frames(foo["b"], df)
def test_random_rename():
for count in range(NUM_RANDOM_TESTS):
test_rename(True)
def test_delete(random=False):
df = make_feature(random)
foo = Feature(df)
foo.delete("[^0]")
assert compare_data_frames(foo, pd.DataFrame(df.ix[:, 0]))
foo.delete("0")
foo.add_data(df)
assert compare_data_frames(foo, df)
def test_random_delete():
for count in range(NUM_RANDOM_TESTS):
test_delete(True)
def test_hash(random=False):
df = pd.DataFrame(make_feature(random)._data_frame)
foo = Feature(df)
foo2 = Feature(df.as_matrix())
assert foo.hash() == foo2.hash()
def test_random_hash():
for count in range(NUM_RANDOM_TESTS):
test_hash(True)
##### insert stuff here
def test_example_1():
foo = make_feature()
foo.add_data(foo)
foo["bara"] = np.arange(10)
foo.add_transforms([("nums", "bara", lambda x: 2 * x), ("arr", "\\d", lambda x: 0 * x, 1)])
# should have first column 0 to 9, second column double the first, and the rest 0's
assert np.sum(foo.fit_transform()) == 135
foo._ftransforms = list()
foo.add_transforms([("hello", "arr|bara", None, True)])
assert np.sum(foo.fit_transform()) == 90
assert np.all(foo.fit_transform().flatten() == 2 * np.arange(10))
if __name__ == "__main__":
foo = make_feature(False)
print foo
foo[1] = 2 * foo["1"]
print foo.columns[5]
| bsd-3-clause | -3,868,092,748,690,401,300 | 25.355422 | 95 | 0.622857 | false |
andarms/pyweek24 | src/bootstrap.py | 1 | 1850 | import os
import pygame as pg
SCREEN_SIZE = (1024, 640)
ORIGINAL_CAPTION = "Game"
# init pygame and create the window
pg.init()
pg.font.init()
os.environ['SDL_VIDEO_CENTERED'] = "TRUE"
pg.display.set_caption(ORIGINAL_CAPTION)
SCREEN = pg.display.set_mode(SCREEN_SIZE)
SCREEN_RECT = SCREEN.get_rect()
# load all the assets
def load_all_gfx(directory,colorkey=(0,0,0),accept=(".png",".jpg",".bmp")):
"""
Load all graphics with extensions in the accept argument. If alpha
transparency is found in the image the image will be converted using
convert_alpha(). If no alpha transparency is detected image will be
converted using convert() and colorkey will be set to colorkey.
"""
graphics = {}
for pic in os.listdir(directory):
name,ext = os.path.splitext(pic)
if ext.lower() in accept:
img = pg.image.load(os.path.join(directory, pic))
if img.get_alpha():
img = img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
graphics[name]=img
return graphics
def load_all_music(directory, accept=(".wav", ".mp3", ".ogg", ".mdi")):
"""
Create a dictionary of paths to music files in given directory
if their extensions are in accept.
"""
songs = {}
for song in os.listdir(directory):
name,ext = os.path.splitext(song)
if ext.lower() in accept:
songs[name] = os.path.join(directory, song)
return songs
def load_all_fonts(directory, accept=(".ttf",)):
"""
Create a dictionary of paths to font files in given directory
if their extensions are in accept.
"""
return load_all_music(directory, accept)
GFX = load_all_gfx(os.path.join("assets", "graphics"))
FONTS = load_all_fonts(os.path.join("assets", "fonts")) | mit | -2,816,071,308,563,415,600 | 30.372881 | 75 | 0.634595 | false |
nyu-devops/lab-flask-tdd | tests/factories.py | 1 | 1157 | # Copyright 2016, 2019 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Factory to make fake objects for testing
"""
import factory
from factory.fuzzy import FuzzyChoice
from service.models import Pet, Gender
class PetFactory(factory.Factory):
"""Creates fake pets that you don't have to feed"""
class Meta:
model = Pet
id = factory.Sequence(lambda n: n)
name = factory.Faker("first_name")
category = FuzzyChoice(choices=["dog", "cat", "bird", "fish"])
available = FuzzyChoice(choices=[True, False])
gender = FuzzyChoice(choices=[Gender.Male, Gender.Female, Gender.Unknown])
| apache-2.0 | 4,718,903,675,179,706,000 | 34.060606 | 78 | 0.731201 | false |
OpenPathView/batchPanoMaker | opv_import/model/lot_partition.py | 1 | 1176 | # coding: utf-8
# Copyright (C) 2017 Open Path View, Maison Du Libre
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Benjamin BERNARD <[email protected]>
# Email: [email protected]
# Description: Represent a lot partition, a set a lot with start and end indexes.
from typing import NamedTuple, List
from opv_import.model import Lot
LotPartition = NamedTuple('LotPartition', [
('ref_lot', Lot),
('lots', List[Lot]),
('start_imgset_index', int),
('start_meta_index', int),
('break_reason', str),
('number_of_good_associations', int)
])
| gpl-3.0 | -1,035,541,762,079,809,900 | 39.551724 | 81 | 0.735544 | false |
z411/weabot | markdown.py | 1 | 83700 | #!/usr/bin/env python
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<http://code.google.com/p/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extras (see -x|--extras option below):
* code-friendly: Disable _ and __ for em and strong.
* code-color: Pygments-based syntax coloring of <code> sections.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* xml: Passes one-liner processing instructions and namespaced XML tags.
"""
# Dev Notes:
# - There is already a Python markdown processor
# (http://www.freewisdom.org/projects/python-markdown/).
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (1, 0, 1, 17) # first three nums match Markdown.pl
__version__ = '1.0.1.17'
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
from urllib import quote
#---- Python version compat
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
def _unicode_decode(s, encoding, errors='xmlcharrefreplace'):
return unicode(s, encoding, errors)
else:
def _unicode_decode(s, encoding, errors='strict'):
return s.decode(encoding, errors)
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
try:
import uuid
except ImportError:
SECRET_SALT = str(randint(0, 1000000))
else:
SECRET_SALT = str(uuid.uuid4())
def _hash_ascii(s):
#return md5(s).hexdigest() # Markdown.pl effectively does this.
return 'md5-' + md5(SECRET_SALT + s).hexdigest()
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_ascii(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
#text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
return rv
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in emacs_vars.items():
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError, ex:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError, ex:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title.replace('"', '"')
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_res = [
re.compile(r"^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$", re.M),
re.compile(r"^[ ]{0,2}([ ]?\-[ ]?){3,}[ \t]*$", re.M),
re.compile(r"^[ ]{0,2}([ ]?\_[ ]?){3,}[ \t]*$", re.M),
]
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
#text = self._do_headers(text)
# Do Horizontal Rules:
#hr = "\n<hr"+self.empty_element_suffix+"\n"
#for hr_re in self._hr_res:
# text = hr_re.sub(hr, text)
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
#text = self._do_code_spans(text) - El AA !
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
#text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
# Do hard breaks:
text = re.sub(r"\n", "<br%s" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', g_escape_table['*'])
.replace('_', g_escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in self.html_spans.items():
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_tail_of_inline_link_re = re.compile(r'''
# Match tail of: [text](/url/) or [text](/url/ "title")
\( # literal paren
[ \t]*
(?P<url> # \1
<.*?>
|
.*?
)
[ \t]*
( # \2
(['"]) # quote char = \3
(?P<title>.*?)
\3 # matching quote
)? # title is optional
\)
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
match = self._tail_of_inline_link_re.match(text, p)
if match:
# Handle an inline anchor or img.
#is_img = start_idx > 0 and text[start_idx-1] == "!"
#if is_img:
# start_idx -= 1
is_img = False
url, title = match.group("url"), match.group("title")
if url and url[0] == '<':
url = url[1:-1] # '<url>' -> 'url'
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_'])
if title:
title_str = ' title="%s"' \
% title.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_']) \
.replace('"', '"')
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, self.empty_element_suffix)
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
#is_img = start_idx > 0 and text[start_idx-1] == "!"
#if is_img:
# start_idx -= 1
is_img = False
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_'])
title = self.titles.get(link_id)
if title:
title = title.replace('*', g_escape_table['*']) \
.replace('_', g_escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, self.empty_element_suffix)
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
"""
header_id = _slugify(text)
if prefix:
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, name))
_setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M)
def _setext_h_sub(self, match):
n = {"=": 1, "-": 2}[match.group(2)[0]]
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(1),
prefix=self.extras["header-ids"])
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(1))
if "toc" in self.extras:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
_atx_h_re = re.compile(r'''
^(\#{1,6}) # \1 = string of #'s
[ \t]*
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
''', re.X | re.M)
def _atx_h_sub(self, match):
n = len(match.group(1))
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(2),
prefix=self.extras["header-ids"])
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(2))
if "toc" in self.extras:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
text = self._setext_h_re.sub(self._setext_h_sub, text)
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
text = self._atx_h_re.sub(self._atx_h_sub, text)
return text
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
for marker_pat in (self._marker_ul, self._marker_ol):
# Re-usable pattern to match any entire ul or ol list:
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
# We use a different prefix before nested lists than top-level lists.
# See extended comment in _process_list_items().
#
# Note: There's a bit of duplication here. My original implementation
# created a scalar regex pattern as the conditional result of the test on
# $g_list_level, and then only ran the $text =~ s{...}{...}egmx
# substitution once, using the scalar as the pattern. This worked,
# everywhere except when running under MT on my hosting account at Pair
# Networks. There, this caused all rebuilds to be killed by the reaper (or
# perhaps they crashed, but that seems incredibly unlikely given that the
# same script on the same server ran fine *except* under MT. I've spent
# more time trying to figure out why this is happening than I'd like to
# admit. My only guess, backed up by the fact that this workaround works,
# is that Perl optimizes the substition when it can figure out that the
# pattern will never change, and when this optimization isn't on, we run
# afoul of the reaper. Thus, the slightly redundant code to that uses two
# static s/// patterns rather than one conditional pattern.
if self.list_level:
sub_list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
text = sub_list_re.sub(self._list_sub, text)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
text = list_re.sub(self._list_sub, text)
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter = HtmlCodeFormatter(cssclass="codehilite", **formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match):
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
lexer = self._get_pygments_lexer(lexer_name)
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
if lexer:
formatter_opts = self.extras['code-color'] or {}
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
# Now, escape characters that are magic in Markdown:
('*', g_escape_table['*']),
('_', g_escape_table['_']),
('{', g_escape_table['{']),
('}', g_escape_table['}']),
('[', g_escape_table['[']),
(']', g_escape_table[']']),
('\\', g_escape_table['\\']),
]
for before, after in replacements:
text = text.replace(before, after)
return text
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
#_spoiler_re = re.compile(r"###(?=\S)(.+?[*_]*)(?<=\S)###", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
#text = self._spoiler_re.sub("<del>\\1</del>", text)
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[^>] # '>' at the start of a line
.+\n # rest of the first line
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
#bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = bq.strip('\n')
bq = self._run_span_gamut(bq)
#bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in g_escape_table.items():
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "[email protected]"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', g_escape_table['*'])
.replace('_', g_escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in link_from_hash.items():
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in g_escape_table.items():
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
_toc = None
@property
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append(u'%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(flag_from_char.keys())))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line)
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print "dedent: margin=%r" % margin
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print "dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin)
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<http://code.google.com/p/python-markdown2/wiki/Extras>.")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
for path in paths:
if opts.compare:
print "==== Markdown.pl ===="
perl_cmd = 'perl %s "%s"' % (markdown_pl, path)
o = os.popen(perl_cmd)
perl_html = o.read()
o.close()
sys.stdout.write(perl_html)
print "==== markdown2.py ===="
html = markdown_path(path, encoding=opts.encoding,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
sys.stdout.write(
html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print "==== match? %r ====" % (norm_perl_html == norm_html)
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| agpl-3.0 | -4,520,632,514,260,235,000 | 38.949119 | 121 | 0.482796 | false |
jamesabel/osnap | launchers/launch.py | 1 | 6079 |
# This file exists under 2 names. The 'real' one is launch.py.
# launch.pyw is a hard link to launch.py
# (launch.pyw - a .pyw since we're launching without a console window)
import appdirs
import glob
import logging
import logging.config
import os
import platform
import sys
import subprocess
# Just for the launcher, not the user's app that OSNAP is launching
AUTHOR = 'abel'
APPLICATION = 'osnap_launcher'
PROGRAM = 'main.py'
def find_osnapy(path_leaf):
"""
go up directory levels until we find our python interpreter
this is necessary the way various operating systems (e.g. Mac) launch in a subdirectory (e.g. Contents/MacOS)
"""
LOGGER = logging.getLogger('osnap_launcher')
path = path_leaf
while path != os.path.dirname(path):
potential_path = os.path.join(path, 'osnapy')
LOGGER.debug("potential_path : %s" % potential_path)
if os.path.exists(potential_path):
LOGGER.debug("Found osnapy at %s", potential_path)
return potential_path
# special directories to follow back 'up'
for d in ['MacOS', 'osnapp']:
potential_path = os.path.join(path, d, 'osnapy')
if os.path.exists(potential_path):
LOGGER.debug("Found osnapy at %s", potential_path)
return potential_path
path = os.path.dirname(path)
return None
def pick_osnapy(python_folder):
"Find the osnapy directory and chdir to it"
LOGGER = logging.getLogger('osnap_launcher')
potential_paths = []
if len(sys.argv) > 1:
# first, try the folder that contains our target
potential_paths.append(os.path.dirname(sys.argv[1]))
# next, try the folder that contains the launcher
potential_paths.append(os.path.dirname(sys.argv[0]))
# finally, try the folder we are starting from
potential_paths.append(os.getcwd())
LOGGER.debug('looking in %s' % potential_paths)
for potential_path in potential_paths:
osnapy_path = find_osnapy(potential_path)
if osnapy_path:
if os.path.exists(osnapy_path):
os.chdir(os.path.dirname(osnapy_path))
return
def launch():
VERSION = '0.0.6'
LOGGER = logging.getLogger('osnap_launcher')
# conventions
python_folder = 'osnapy'
if platform.system().lower()[0] == 'w':
# windows
python_binary = 'pythonw.exe'
python_path = os.path.join(python_folder, python_binary)
elif platform.system().lower()[0] == 'd':
# macOS/OSX reports 'Darwin'
python_binary = 'python3'
python_path = os.path.join(python_folder, 'bin', python_binary)
else:
raise NotImplementedError
LOGGER.info('launcher version : %s', VERSION)
LOGGER.info('sys.path : %s', sys.path)
LOGGER.info('sys.argv : %s', sys.argv)
LOGGER.info('original cwd : %s', os.getcwd())
pick_osnapy(python_folder)
if not os.path.exists(python_path):
raise Exception('{} does not exist - exiting'.format(python_path))
# set up environment variables (if needed)
if platform.system().lower()[0] == 'w':
env_vars = None
elif platform.system().lower()[0] == 'd':
site_packages_pattern = python_folder + os.sep + 'lib' + os.sep + 'python*' + os.sep + 'site-packages' + os.sep
site_packages_glob = glob.glob(site_packages_pattern)
if len(site_packages_glob) == 0:
raise Exception('"{}" could not be found - exiting'.format(site_packages_pattern))
elif len(site_packages_glob) > 1:
LOGGER.warning('warning : "%s" yielded mulitple results', site_packages_glob)
env_vars = {'PYTHONPATH': site_packages_glob[0]}
else:
raise NotImplementedError("The platform '{}' is not supported by OSNAP yet".format(platform.system()))
call_parameters = ' '.join([python_path, PROGRAM])
LOGGER.info('calling : %s with env=%s', call_parameters, env_vars)
return_code = subprocess.call(call_parameters, env=env_vars, shell=True)
LOGGER.info('return code : %s', return_code)
def main():
logfile = os.path.join(appdirs.user_log_dir(APPLICATION, AUTHOR), 'osnap_launcher.log')
logdir = os.path.dirname(logfile)
if not os.path.exists(logdir):
os.makedirs(logdir)
logging.config.dictConfig({
'version' : 1,
'formatters' : {
'detailed' : {
'format' : '[%(asctime)s] %(levelname)s pid:%(process)d %(name)s:%(lineno)d %(message)s',
'dateformat': '%d/%b/%Y:%H:%M:%S %z',
},
'simple' : {
'format' : '[%(asctime)s] %(levelname)s %(name)s:%(lineno)d %(message)s',
'dateformat': '%d/%b/%Y:%H:%M:%S %z',
},
},
'handlers' : {
'console' : {
'class' : 'logging.StreamHandler',
# 'level' needs to be WARNING or above - if it's DEBUG Windows will try to make a log file for GUI apps,
# which is either an access error or pops up as an annoying dialog box.
'level' : 'ERROR',
'formatter' : 'simple',
},
'file' : {
'class' : 'logging.FileHandler',
'filename' : logfile,
'formatter' : 'detailed',
'level' : 'DEBUG',
},
},
'loggers' : {
'' : {
'handlers' : ['file', 'console'],
'level' : 'DEBUG',
'propogate' : True,
},
},
'root' : {
'level' : 'DEBUG',
'handlers' : ['file', 'console'],
},
})
logging.getLogger().info("Installed logging")
try:
launch()
return 0
except Exception as e:
logging.getLogger().exception("Unhandled exception in launcher: %s", e)
return 1
if __name__ == '__main__':
sys.exit(main())
| mit | 6,852,532,226,349,078,000 | 35.184524 | 120 | 0.566376 | false |
schnapptack/gskompetenzen | features/gsaudit/migrations/0001_initial.py | 1 | 24302 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'School'
db.create_table('gsaudit_school', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('gsaudit', ['School'])
# Adding model 'Teacher'
db.create_table('gsaudit_teacher', (
('user_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, primary_key=True)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=1)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('school', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.School'])),
))
db.send_create_signal('gsaudit', ['Teacher'])
# Adding model 'Grade'
db.create_table('gsaudit_grade', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('school', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.School'])),
))
db.send_create_signal('gsaudit', ['Grade'])
# Adding model 'Pupil'
db.create_table('gsaudit_pupil', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=1)),
('picture', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('street', self.gf('django.db.models.fields.CharField')(max_length=255)),
('zipcode', self.gf('django.db.models.fields.CharField')(max_length=255)),
('city', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone_home_fix', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone_mother_business', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone_mother_mobile', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone_father_business', self.gf('django.db.models.fields.CharField')(max_length=255)),
('phone_father_mobile', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('gsaudit', ['Pupil'])
# Adding model 'Skill'
db.create_table('gsaudit_skill', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='children', null=True, to=orm['gsaudit.Skill'])),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('gsaudit', ['Skill'])
# Adding model 'Subject'
db.create_table('gsaudit_subject', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('skill', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Skill'])),
))
db.send_create_signal('gsaudit', ['Subject'])
# Adding model 'Audit'
db.create_table('gsaudit_audit', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('grade', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Grade'])),
('subject', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Subject'])),
('date', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('gsaudit', ['Audit'])
# Adding model 'GradeParticipant'
db.create_table('gsaudit_gradeparticipant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('grade', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Grade'])),
('pupil', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Pupil'])),
))
db.send_create_signal('gsaudit', ['GradeParticipant'])
# Adding model 'PupilAuditSkill'
db.create_table('gsaudit_pupilauditskill', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('rating', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pupil', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Pupil'])),
('audit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Audit'])),
('skill', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Skill'])),
))
db.send_create_signal('gsaudit', ['PupilAuditSkill'])
# Adding model 'AuditSkill'
db.create_table('gsaudit_auditskill', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('weight', self.gf('django.db.models.fields.IntegerField')(default=1)),
('audit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Audit'])),
('skill', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Skill'])),
))
db.send_create_signal('gsaudit', ['AuditSkill'])
# Adding model 'TeachingAssignment'
db.create_table('gsaudit_teachingassignment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('teacher', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Teacher'])),
('grade', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Grade'])),
('subject', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Subject'])),
))
db.send_create_signal('gsaudit', ['TeachingAssignment'])
# Adding model 'PupilTAInfo'
db.create_table('gsaudit_pupiltainfo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('modified', self.gf('django.db.models.fields.DateTimeField')()),
('jsondata', self.gf('jsonfield.JSONField')(blank=True)),
('info', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('pupil', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.Pupil'])),
('teaching_assignment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gsaudit.TeachingAssignment'])),
))
db.send_create_signal('gsaudit', ['PupilTAInfo'])
# Adding unique constraint on 'PupilTAInfo', fields ['pupil', 'teaching_assignment']
db.create_unique('gsaudit_pupiltainfo', ['pupil_id', 'teaching_assignment_id'])
def backwards(self, orm):
# Removing unique constraint on 'PupilTAInfo', fields ['pupil', 'teaching_assignment']
db.delete_unique('gsaudit_pupiltainfo', ['pupil_id', 'teaching_assignment_id'])
# Deleting model 'School'
db.delete_table('gsaudit_school')
# Deleting model 'Teacher'
db.delete_table('gsaudit_teacher')
# Deleting model 'Grade'
db.delete_table('gsaudit_grade')
# Deleting model 'Pupil'
db.delete_table('gsaudit_pupil')
# Deleting model 'Skill'
db.delete_table('gsaudit_skill')
# Deleting model 'Subject'
db.delete_table('gsaudit_subject')
# Deleting model 'Audit'
db.delete_table('gsaudit_audit')
# Deleting model 'GradeParticipant'
db.delete_table('gsaudit_gradeparticipant')
# Deleting model 'PupilAuditSkill'
db.delete_table('gsaudit_pupilauditskill')
# Deleting model 'AuditSkill'
db.delete_table('gsaudit_auditskill')
# Deleting model 'TeachingAssignment'
db.delete_table('gsaudit_teachingassignment')
# Deleting model 'PupilTAInfo'
db.delete_table('gsaudit_pupiltainfo')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gsaudit.audit': {
'Meta': {'object_name': 'Audit'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Subject']"})
},
'gsaudit.auditskill': {
'Meta': {'object_name': 'AuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'gsaudit.grade': {
'Meta': {'object_name': 'Grade'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.School']"})
},
'gsaudit.gradeparticipant': {
'Meta': {'object_name': 'GradeParticipant'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"})
},
'gsaudit.pupil': {
'Meta': {'ordering': "('first_name', 'last_name')", 'object_name': 'Pupil'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'phone_father_business': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone_father_mobile': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone_home_fix': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone_mother_business': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone_mother_mobile': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'gsaudit.pupilauditskill': {
'Meta': {'object_name': 'PupilAuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"})
},
'gsaudit.pupiltainfo': {
'Meta': {'unique_together': "(('pupil', 'teaching_assignment'),)", 'object_name': 'PupilTAInfo'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"}),
'teaching_assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.TeachingAssignment']"})
},
'gsaudit.school': {
'Meta': {'object_name': 'School'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'gsaudit.skill': {
'Meta': {'object_name': 'Skill'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['gsaudit.Skill']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'gsaudit.subject': {
'Meta': {'object_name': 'Subject'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"})
},
'gsaudit.teacher': {
'Meta': {'object_name': 'Teacher', '_ormbases': ['auth.User']},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.School']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'gsaudit.teachingassignment': {
'Meta': {'object_name': 'TeachingAssignment'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Subject']"}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Teacher']"})
}
}
complete_apps = ['gsaudit']
| agpl-3.0 | -7,491,058,684,858,263,000 | 63.291005 | 182 | 0.571393 | false |
cwells/franz | lib/prompt_repl.py | 1 | 2968 | import os
import time
from pprint import pprint
from pathlib import Path
from prompt_toolkit import prompt, AbortAction
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.styles import style_from_pygments
from prompt_toolkit.token import Token
from prompt_toolkit.layout.lexers import PygmentsLexer
from lark.common import UnexpectedToken
from lark.lexer import UnexpectedInput
from .pygments import FranzLexer, repl_styles
def prepare_style(style):
style.styles.update({
# menu
Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
Token.Menu.Completions.ProgressButton: 'bg:#003333',
Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
# toolbar
Token.Toolbar: '#ffffff bg:#333333',
# prompt
Token.Prompt: '#444444',
Token.Continuation: '#333333',
})
return style_from_pygments(style)
def get_prompt_tokens(cli):
return [ (Token.Prompt, '>>> ') ]
def get_continuation_tokens(cli, width):
return [(Token.Continuation, '.' * (width - 1))]
def repl(parser, interpreter, style_name='default'):
print("Franz v0.0\n")
history_file = os.path.join(str(Path.home()), '.franz-history')
history = FileHistory(history_file)
toolbar_value = 'Press [Alt+Enter] to evaluate an expression. [Ctrl+d] to exit. History saved in %s.' % history_file
style = prepare_style(repl_styles[style_name])
while True:
name_completer = WordCompleter(sorted(interpreter.context))
code = prompt(
multiline = True,
completer = name_completer,
history = history,
style = style,
mouse_support = False,
lexer = PygmentsLexer(FranzLexer),
auto_suggest = AutoSuggestFromHistory(),
on_abort = AbortAction.RETRY,
patch_stdout = True,
true_color = True,
get_bottom_toolbar_tokens = lambda cli: [(Token.Toolbar, toolbar_value)],
get_prompt_tokens = get_prompt_tokens,
get_continuation_tokens = get_continuation_tokens,
)
if not code.strip(): continue
if code == '\\var':
pprint(interpreter.context)
continue
try:
ast = parser.parse(code)
except (UnexpectedToken, UnexpectedInput) as e:
toolbar_value = str(e)
continue
try:
start_eval_time = time.time()
retval = interpreter.eval(ast)
except Exception as e:
toolbar_value = "Error: %s" % e.args
continue
else:
toolbar_value = "Time: {:0.4f}, Value: {}".format(time.time() - start_eval_time, str(retval))
| mit | 8,384,828,813,928,798,000 | 33.114943 | 120 | 0.622305 | false |
CSD-Public/stonix | src/stonix_resources/ServiceHelper.py | 1 | 28129 | ###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
"""
Created on Aug 9, 2012
@author: Dave Kennel
@change: 2015/10/15 Eric Ball Added method names to debug output
@change: 2015/10/15 Eric Ball disableservice now checks audit and isrunning
@change: 2016/06/10 Dave Kennel wrapped audit in try catch in case service is not
installed.
@change: 2016/11/03 Roy Nielsen upgrading the interface to allow for more flexibility.
@change: 2017/01/31 Breen Malmberg clarified the difference between auditservice
and isrunning methods in the documentation; clarified the nature of the
two parameters in each of those methods in the doc strings as well
@author: 2017-23-2017 Roy Nielsen modified/simplified to second generation
service helper with **kwargs concept
@change: 2018/02/27 Brandon Gonzales Fixed Traceback caused by using self.logger
@change: 2019/04/17 dwalker - added submethods getStartCommand, getStopCommand,
getEnableCommand, getDisableCommand for use by rule when recording
change events
@change: 2019/07/30 Brandon R. Gonzales - Add conditional for chkconfig systems
to make sure that the 'service' command is available
"""
import os
import re
from stonix_resources import SHchkconfig
from stonix_resources import SHrcupdate
from stonix_resources import SHupdaterc
from stonix_resources import SHsystemctl
from stonix_resources import SHsvcadm
from stonix_resources import SHrcconf
from stonix_resources import SHlaunchd
from stonix_resources import SHlaunchdTwo
from stonix_resources.logdispatcher import LogPriority
from stonix_resources.get_libc import getLibc
class ServiceHelper(object):
'''The ServiceHelper class serves as an abstraction layer between rules that
need to manipulate services and the actual implementation of changing
service status on various operating systems.
@Note: Interface methods abstracted to allow for different parameter
lists for different helpers. This moves the requirement for
input validation the the concrete helpers.
@author: Dave Kennel
'''
def __init__(self, environ, logger):
"""
The ServiceHelper needs to receive the STONIX environment and
logdispatcher objects as parameters to init.
@param environ: environment object reference
@param logger: logdispatcher object reference
@author: ???
@change: Breen Malmberg - 1/24/2017 - doc string edit
@change: Breen Malmberg - 2/27/2018 - added libc object instantiation and
call to libc function .sync() to fix an issue with mac os x cacheing
related to diable and enable service functions
"""
self.environ = environ
self.logdispatcher = logger
self.isHybrid = False
self.isdualparameterservice = False
self.svchelper = None
self.secondary = None
self.service = ""
self.servicename = ""
try:
self.lc = getLibc()
except Exception as err:
self.logdispatcher.log(LogPriority.ERROR, str(err))
raise
systemctl_paths = ["/usr/bin/systemctl", "/bin/systemctl"]
# Red Hat, CentOS, SUSE
foundchkconfig = os.path.exists('/sbin/chkconfig')
foundservice = os.path.exists('/sbin/service') or \
os.path.exists('/usr/sbin/service')
if foundchkconfig and foundservice:
ischkconfig = True
else:
ischkconfig = False
# Gentoo
if os.path.exists('/sbin/rc-update'):
isrcupdate = True
else:
isrcupdate = False
# Ubuntu, Debian
if os.path.exists('/usr/sbin/update-rc.d'):
isupdaterc = True
else:
isupdaterc = False
# Fedora, RHEL 7
if any(os.path.exists(p) for p in systemctl_paths):
issystemctl = True
else:
issystemctl = False
# Solaris
if os.path.exists('/usr/sbin/svcadm'):
issvcadm = True
else:
issvcadm = False
# FreeBSD
if os.path.exists('/etc/rc.conf') and \
os.path.exists('/etc/rc.d/LOGIN'):
isrcconf = True
else:
isrcconf = False
# OS X
if os.path.exists('/sbin/launchd'):
islaunchd = True
self.isdualparameterservice = True
else:
islaunchd = False
truecount = 0
for svctype in [ischkconfig, isrcupdate, isupdaterc,
issystemctl, issvcadm, isrcconf, islaunchd]:
if svctype:
truecount += 1
if truecount == 0:
raise RuntimeError("Could not identify service management programs")
elif truecount == 1:
if ischkconfig:
self.svchelper = SHchkconfig.SHchkconfig(self.environ,
self.logdispatcher)
elif isrcupdate:
self.svchelper = SHrcupdate.SHrcupdate(self.environ,
self.logdispatcher)
elif isupdaterc:
self.svchelper = SHupdaterc.SHupdaterc(self.environ,
self.logdispatcher)
elif issystemctl:
self.svchelper = SHsystemctl.SHsystemctl(self.environ,
self.logdispatcher)
elif issvcadm:
self.svchelper = SHsvcadm.SHsvcadm(self.environ,
self.logdispatcher)
elif isrcconf:
self.svchelper = SHrcconf.SHrcconf(self.environ,
self.logdispatcher)
elif islaunchd:
if re.match("10.11", self.environ.getosver()):
self.svchelper = SHlaunchd.SHlaunchd(self.environ,
self.logdispatcher)
else:
self.svchelper = SHlaunchdTwo.SHlaunchdTwo(self.environ,
self.logdispatcher)
else:
raise RuntimeError("Could not identify service management programs")
elif truecount > 1:
self.isHybrid = True
count = 0
if issystemctl:
self.svchelper = SHsystemctl.SHsystemctl(self.environ,
self.logdispatcher)
count = 1
if ischkconfig:
if count == 0:
self.svchelper = SHchkconfig.SHchkconfig(self.environ,
self.logdispatcher)
count = 1
elif count == 1:
self.secondary = SHchkconfig.SHchkconfig(self.environ,
self.logdispatcher)
if isrcupdate:
if count == 0:
self.svchelper = SHrcupdate.SHrcupdate(self.environ,
self.logdispatcher)
count = 1
elif count == 1:
self.secondary = SHrcupdate.SHrcupdate(self.environ,
self.logdispatcher)
if isupdaterc:
if count == 0:
self.svchelper = SHupdaterc.SHupdaterc(self.environ,
self.logdispatcher)
count = 1
elif count == 1:
self.secondary = SHupdaterc.SHupdaterc(self.environ,
self.logdispatcher)
if issvcadm:
if count == 0:
self.svchelper = SHsvcadm.SHsvcadm(self.environ,
self.logdispatcher)
count = 1
elif count == 1:
self.secondary = SHsvcadm.SHsvcadm(self.environ,
self.logdispatcher)
if isrcconf:
if count == 0:
self.svchelper = SHrcconf.SHrcconf(self.environ,
self.logdispatcher)
count = 1
elif count == 1:
self.secondary = SHrcconf.SHrcconf(self.environ,
self.logdispatcher)
if islaunchd:
self.svchelper = SHlaunchd.SHlaunchd(self.environ,
self.logdispatcher)
count = 1
self.logdispatcher.log(LogPriority.DEBUG,
'ischkconfig: ' + str(ischkconfig))
self.logdispatcher.log(LogPriority.DEBUG,
'isrcupdate: ' + str(isrcupdate))
self.logdispatcher.log(LogPriority.DEBUG,
'isupdaterc: ' + str(isupdaterc))
self.logdispatcher.log(LogPriority.DEBUG,
'issystemctl: ' + str(issystemctl))
self.logdispatcher.log(LogPriority.DEBUG,
'issvcadm: ' + str(issvcadm))
self.logdispatcher.log(LogPriority.DEBUG,
'isrcconf: ' + str(isrcconf))
self.logdispatcher.log(LogPriority.DEBUG,
'ishybrid: ' + str(self.isHybrid))
self.logdispatcher.log(LogPriority.DEBUG,
'isdualparameterservice: ' +
str(self.isdualparameterservice))
def getService(self):
'''
:returns: self.service
:rtype: string
@author: Roy Nielsen
'''
return self.service
def getServiceName(self):
'''
:returns: self.servicename
:rtype: string
@author: Roy Nielsen
'''
return self.servicename
def isServiceVarValid(self, service):
'''Input validator for the service variable
@author: Roy Nielsen
:param service:
:returns: serviceValid
:rtype: bool
'''
serviceValid = False
self.logdispatcher.log(LogPriority.DEBUG, "Validating service name")
try:
#####
# Generic factory input validation, only for "service", the
# rest of the parameters need to be validated by the concrete
# service helper instance.
if not isinstance(service, str):
raise TypeError("Service: " + str(service) +
" is not a string as expected.")
elif not service: # if service is an empty string
raise ValueError('service specified is blank. ' +
'No action will be taken!')
elif service : # service is a string of one or more characters
self.logdispatcher.log(LogPriority.DEBUG,
'Service name set to: ' + service)
serviceValid = True
except Exception as err:
self.logdispatcher.log(LogPriority.DEBUG, str(err))
raise
return serviceValid
def setService(self, service, **kwargs):
'''Update the name of the service being worked with.
:param service:
:param **kwargs:
:returns: setServiceSuccess
:rtype: bool
@author: Roy Nielsen
'''
self.logdispatcher.log(LogPriority.DEBUG, "Setting service name")
setServiceSuccess = True
servicenames = ["servicename", "serviceName"]
self.servicename = ""
if self.isServiceVarValid(service):
self.service = service
else:
setServiceSuccess = False
self.service = ""
for sn in servicenames:
if sn in kwargs:
try:
self.servicename = kwargs.get(sn)
except Exception as err:
setServiceSuccess = False
self.logdispatcher.log(LogPriority.DEBUG, str(err))
break
if not setServiceSuccess:
self.logdispatcher.log(LogPriority.DEBUG, "Failed to set service name")
return setServiceSuccess
def disableService(self, service, **kwargs):
'''Disables the service and terminates it if it is running.
:param service: string: Name of the service to be disabled
:param **kwargs:
:returns: Bool indicating success status
'''
disabled = True
systemctl_disabled = False
chkconfig_disabled = False
audit_success = True
if self.setService(service):
service = self.getService()
self.logdispatcher.log(LogPriority.DEBUG, "Disabling service: " + service)
if self.isHybrid:
systemctl_disabled = self.svchelper.disableService(service, **kwargs)
chkconfig_disabled = self.secondary.disableService(service, **kwargs)
disabled = bool(systemctl_disabled or chkconfig_disabled)
else:
disabled = self.svchelper.disableService(service, **kwargs)
else:
disabled = False
# sync OS cache to filesystem (force write)
# this was added to eliminate the delay on mac between
# issuing the service disable command and when the service
# actually gets disabled
try:
self.lc.sync()
except Exception:
raise
if self.isHybrid:
if bool(self.svchelper.auditService(service, **kwargs) or self.secondary.auditService(service, **kwargs)):
audit_success = False
else:
if self.svchelper.auditService(service, **kwargs):
audit_success = False
if not audit_success:
self.logdispatcher.log(LogPriority.DEBUG, "Post-disable audit indicates service not actually disabled after operation")
disabled = False
if disabled:
self.logdispatcher.log(LogPriority.DEBUG, "Successfully disabled service: " + service)
else:
self.logdispatcher.log(LogPriority.DEBUG, "Failed to disable service: " + service)
return disabled
def enableService(self, service, **kwargs):
'''Enables a service and starts it if it is not running as long as we are
not in install mode
:param service: string: Name of the service to be disabled
:param **kwargs:
:returns: enabledSuccess
:rtype: bool
@author: Roy Nielsen
'''
enabledSuccess = True
systemctl_enabled = False
chkconfig_enabled = False
audit_success = False
if self.setService(service):
service = self.getService()
self.logdispatcher.log(LogPriority.DEBUG, "Enabling service: " + service)
if self.isHybrid:
systemctl_enabled = self.svchelper.enableService(service, **kwargs)
chkconfig_enabled = self.secondary.enableService(service, **kwargs)
enabledSuccess = bool(systemctl_enabled or chkconfig_enabled)
else:
enabledSuccess = self.svchelper.enableService(service, **kwargs)
else:
enabledSuccess = False
# sync OS cache to filesystem (force write)
# this was added to eliminate the delay on mac between
# issuing the service enable command and when the service
# actually gets enabled
try:
self.lc.sync()
except Exception:
raise
if self.isHybrid:
audit_success = bool(self.svchelper.auditService(service, **kwargs) or self.secondary.auditService(service, **kwargs))
else:
audit_success = self.svchelper.auditService(service, **kwargs)
if not audit_success:
self.logdispatcher.log(LogPriority.DEBUG, "Post-enable audit indicates service not actually enabled after operation")
enabledSuccess = False
if enabledSuccess:
self.logdispatcher.log(LogPriority.DEBUG, "Successfully enabled service: " + service)
else:
self.logdispatcher.log(LogPriority.DEBUG, "Failed to enable service: " + service)
return enabledSuccess
def auditService(self, service, **kwargs):
'''Checks the status of a service and returns a bool indicating whether or
not the service is configured to run or not.
:param service: string: Name of the service to be disabled
:param **kwargs:
:returns: enabled
:rtype: bool
@author: Roy Nielsen
'''
enabled = True
systemctl_audit = False
chkconfig_audit = False
if self.setService(service):
service = self.getService()
self.logdispatcher.log(LogPriority.DEBUG, "Checking configuration status of service: " + service)
if self.isHybrid:
systemctl_audit = self.svchelper.auditService(service, **kwargs)
chkconfig_audit = self.secondary.auditService(service, **kwargs)
enabled = bool(systemctl_audit or chkconfig_audit)
else:
enabled = self.svchelper.auditService(service, **kwargs)
else:
enabled = False
if enabled:
self.logdispatcher.log(LogPriority.DEBUG, "Service: " + service + " is ENABLED")
else:
self.logdispatcher.log(LogPriority.DEBUG, "Service: " + service + " is DISABLED")
return enabled
def isRunning(self, service, **kwargs):
'''Check to see if a service is currently running. The enable service uses
this so that we're not trying to start a service that is already
running.
:param service: string: Name of the service to be disabled
:param **kwargs:
:returns: isRunning
:rtype: bool
@author: Roy Nielsen
'''
isrunning = True
systemctl_running = False
chkconfig_running = False
if self.setService(service):
service = self.getService()
self.logdispatcher.log(LogPriority.DEBUG, "Checking run status of service: " + service)
if self.isHybrid:
systemctl_running = self.svchelper.isRunning(service, **kwargs)
chkconfig_running = self.secondary.isRunning(service, **kwargs)
isrunning = bool(systemctl_running or chkconfig_running)
else:
isrunning = self.svchelper.isRunning(service, **kwargs)
else:
isrunning = False
if isrunning:
self.logdispatcher.log(LogPriority.DEBUG, "Service: " + service + " IS running")
else:
self.logdispatcher.log(LogPriority.DEBUG, "Service: " + service + " is NOT running")
return isrunning
def reloadService(self, service, **kwargs):
'''Reload (HUP) a service so that it re-reads it's config files. Called
by rules that are configuring a service to make the new configuration
active. This method ignores services that do not return true when
self.isrunning() is called. The assumption being that this method is
being called due to a change in a conf file, and a service that isn't
currently running will pick up the change when (if) it is started.
:param service: string: Name of the service to be disabled
:param **kwargs:
:returns: reloadSuccess
:rtype: bool
@author: Roy Nielsen
'''
servicenames = ["serviceName", "servicename"]
self.servicename = ""
reloadSuccess = True
systemctl_reload = False
chkconfig_reload = False
for sn in servicenames:
if sn in kwargs:
self.servicename = kwargs.get(sn)
break
if self.setService(service, servicename=self.servicename):
service = self.getService()
self.logdispatcher.log(LogPriority.DEBUG, "Reloading service: " + service)
if self.isHybrid:
systemctl_reload = self.svchelper.reloadService(service, **kwargs)
chkconfig_reload = self.secondary.reloadService(service, **kwargs)
reloadSuccess = bool(systemctl_reload or chkconfig_reload)
else:
reloadSuccess = self.svchelper.reloadService(service, **kwargs)
else:
reloadSuccess = False
if reloadSuccess:
try:
self.lc.sync()
except Exception:
raise
self.logdispatcher.log(LogPriority.DEBUG, "Successfully reloaded service: " + service)
else:
self.logdispatcher.log(LogPriority.DEBUG, "Failed to reload service: " + service)
return reloadSuccess
def listServices(self):
'''List the services installed on the system.
:returns: serviceList
:rtype: list
@author: Roy Nielsen
'''
self.logdispatcher.log(LogPriority.DEBUG, "Getting list of services")
errmsg = ""
try:
if self.isHybrid:
self.svchelper_services = self.svchelper.listServices()
self.secondary_services = self.secondary.listServices()
serviceList = self.svchelper_services + self.secondary_services
else:
self.svchelper_services = self.svchelper.listServices()
serviceList = self.svchelper_services
except Exception as err:
serviceList = []
errmsg = str(err)
if not serviceList:
self.logdispatcher.log(LogPriority.DEBUG, "Failed to get service list")
if errmsg:
self.logdispatcher.log(LogPriority.DEBUG, errmsg)
return serviceList
def startService(self, service, **kwargs):
'''start the given service
:param service: string; name of service
:param kwargs: return: started
:param **kwargs:
:returns: started
:rtype: bool
@author: Breen Malmberg
'''
self.logdispatcher.log(LogPriority.DEBUG, "Starting service: " + service)
started = True
primstart = False
running_success = False
try:
if self.isHybrid:
primstart = self.svchelper.startService(service, **kwargs)
secondstart = self.secondary.startService(service, **kwargs)
started = bool(primstart or secondstart)
else:
if not self.svchelper.startService(service, **kwargs):
started = False
# if one helper does not have the start method then rely on the other one
except AttributeError:
started = primstart
# any other exception, raise
except:
raise
if self.isHybrid:
running_success = bool(self.svchelper.isRunning(service, **kwargs) or self.secondary.isRunning(service, **kwargs))
else:
running_success = self.svchelper.isRunning(service, **kwargs)
if not running_success:
self.logdispatcher.log(LogPriority.DEBUG, "Post-start isrunning check indicates service not actually running after operation")
started = False
if started:
self.logdispatcher.log(LogPriority.DEBUG, "Successfully started service: " + service)
else:
self.logdispatcher.log(LogPriority.DEBUG, "Failed to start service: " + service)
return started
def stopService(self, service, **kwargs):
'''stop the given service
:param service: string; name of service
:param kwargs: return: stopped
:param **kwargs:
:returns: stopped
:rtype: bool
@author: Breen Malmberg
'''
self.logdispatcher.log(LogPriority.DEBUG, "Stopping service: " + service)
stopped = True
primstop = False
running_success = True
try:
if self.isHybrid:
primstop = self.svchelper.stopService(service, **kwargs)
secondstop = self.secondary.stopService(service, **kwargs)
stopped = bool(primstop or secondstop)
else:
if not self.svchelper.stopService(service, **kwargs):
stopped = False
# if one helper does not have the stop method then rely on the other one
except AttributeError:
stopped = primstop
# any other exception, raise
except:
raise
if self.isHybrid:
if bool(self.svchelper.isRunning(service, **kwargs) or self.secondary.isRunning(service, **kwargs)):
running_success = False
else:
if self.svchelper.isRunning(service, **kwargs):
running_success = False
if not running_success:
self.logdispatcher.log(LogPriority.DEBUG, "Post-stop isrunning check indicates service not actually stopped after operation")
stopped = False
if stopped:
self.logdispatcher.log(LogPriority.DEBUG, "Successfully stopped service: " + service)
else:
self.logdispatcher.log(LogPriority.DEBUG, "Failed to stop service: " + service)
return stopped
def getStartCommand(self, service):
'''retrieve the start command. Mostly used by event recording
:param service:
:returns: string - start command
@author: dwalker
'''
return self.svchelper.getStartCommand(service)
def getStopCommand(self, service):
'''retrieve the stop command. Mostly used by event recording
:param service:
:returns: string - stop command
@author: dwalker
'''
return self.svchelper.getStopCommand(service)
def getEnableCommand(self, service):
'''retrieve the enable command. Mostly used by event recording
:param service:
:returns: string - enable command
@author: dwalker
'''
return self.svchelper.getEnableCommand(service)
def getDisableCommand(self, service):
'''retrieve the start command. Mostly used by event recording
:param service:
:returns: string - disable command
@author: dwalker
'''
return self.svchelper.getDisableCommand(service) | gpl-2.0 | 3,988,485,208,477,613,000 | 35.203346 | 138 | 0.567528 | false |
5j9/wikitextparser | tests/test_cell.py | 1 | 2603 | from wikitextparser import parse
# noinspection PyProtectedMember
from wikitextparser._table import Cell, Table
def test_value():
c = Cell('\n| a ')
assert ' a ' == c.value
assert repr(c) == 'Cell(\'\\n| a \')'
assert c.attrs == {}
# Use _cached_attrs
assert c.attrs == {}
# Inline _header cell
c = Cell('|| 01 ', True)
assert c.value == ' 01 '
# Inline non-_header cell
c = Cell('|| 01 ', False)
assert c.value == ' 01 '
# Set a new value
c.value = '\na\na'
assert c.value == '\na\na'
def test_has_get():
c = Cell('\n! n="v" | 00', True)
assert c.has_attr('n')
assert c.get_attr('n') == 'v'
def test_set_overwrite():
c = Cell('\n! n=v | 00', True)
# Set a new value for an existing attribute
c.set_attr('n', 'w')
# Set a new attribute
c.set_attr('n2', 'v2')
assert c.string == '\n! n="w" n2="v2" | 00'
def test_newline_cell_no_attr_span_set():
c = Cell('\n! 00', True)
c.set_attr('n', 'v')
assert c.string == '\n! n="v" | 00'
c = Cell('\n! 00', True)
c.set_attr('n', '')
assert c.string == '\n! n | 00'
def test_inline_cell_no_attr_span_set():
c = Cell('!! 00', True)
c.set_attr('n', 'v')
assert c.string == '!! n="v" | 00'
c = Cell('!! 00', True)
c.set_attr('n', '')
assert c.string == '!! n | 00'
def test_space_or_quote_at_set_boundary():
c = Cell('!!n=v|', True)
c.set_attr('m', 'w')
assert c.string == '!!n=v m="w"|'
c = Cell('!! n=v |', True)
c.set_attr('m', 'w')
assert c.string == '!! n=v m="w" |'
def test_delete():
c = Cell('!!n=v|', True)
c.del_attr('n')
assert c.string == '!!|'
c = Cell('!!n=v1 m=w n="v2"|', True)
c.del_attr('n')
assert c.string == '!! m=w|'
# Test removing a non-existing attribute
c.del_attr('n')
def test_update_match_from_shadow():
t = Table('{|class=wikitable\n|{{text|s}}\n|}')
c = t.cells(0, 0)
assert c.value == '{{text|s}}'
t = c.templates[0]
t.arguments[0].value = 't'
assert c.value == '{{text|t}}'
def test_cached_attrs_expiry():
"""_cached_attrs should expire when _match_cache is updated."""
c = Cell('\n!v', True)
# Fill _match_cache and _attrs_match_cache
assert c.attrs == {}
# Invalidate both caches
c.insert(2, 'a|')
# Update _match_cache
assert c.value == 'v'
# _attrs_match_cache should not be valid
assert c.attrs == {'a': ''}
def test_cell_attrs_using_table_match():
c = parse('text\n{|\n!a=b| c\n|}').tables[0].cells(0, 0)
assert c.attrs == {'a': 'b'}
| gpl-3.0 | 3,365,345,734,659,423,700 | 24.752475 | 67 | 0.527105 | false |
Azure/azure-sdk-for-python | sdk/elastic/azure-mgmt-elastic/azure/mgmt/elastic/aio/operations/_monitors_operations.py | 1 | 28027 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MonitorsOperations:
"""MonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.elastic.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.ElasticMonitorResourceListResponse"]:
"""List all monitors under the specified subscription.
List all monitors under the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ElasticMonitorResourceListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.elastic.models.ElasticMonitorResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ElasticMonitorResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Elastic/monitors'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ElasticMonitorResourceListResponse"]:
"""List all monitors under the specified resource group.
List all monitors under the specified resource group.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ElasticMonitorResourceListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.elastic.models.ElasticMonitorResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ElasticMonitorResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors'} # type: ignore
async def get(
self,
resource_group_name: str,
monitor_name: str,
**kwargs
) -> "_models.ElasticMonitorResource":
"""Get the properties of a specific monitor resource.
Get the properties of a specific monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ElasticMonitorResource, or the result of cls(response)
:rtype: ~azure.mgmt.elastic.models.ElasticMonitorResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
monitor_name: str,
body: Optional["_models.ElasticMonitorResource"] = None,
**kwargs
) -> "_models.ElasticMonitorResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'ElasticMonitorResource')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
monitor_name: str,
body: Optional["_models.ElasticMonitorResource"] = None,
**kwargs
) -> AsyncLROPoller["_models.ElasticMonitorResource"]:
"""Create a monitor resource.
Create a monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:param body: Elastic monitor resource model.
:type body: ~azure.mgmt.elastic.models.ElasticMonitorResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ElasticMonitorResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.elastic.models.ElasticMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def update(
self,
resource_group_name: str,
monitor_name: str,
body: Optional["_models.ElasticMonitorResourceUpdateParameters"] = None,
**kwargs
) -> "_models.ElasticMonitorResource":
"""Update a monitor resource.
Update a monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:param body: Elastic resource model update parameters.
:type body: ~azure.mgmt.elastic.models.ElasticMonitorResourceUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ElasticMonitorResource, or the result of cls(response)
:rtype: ~azure.mgmt.elastic.models.ElasticMonitorResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'ElasticMonitorResourceUpdateParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
monitor_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
monitor_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a monitor resource.
Delete a monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
| mit | 6,675,997,211,549,810,000 | 48.256591 | 185 | 0.647733 | false |
tek/pytek | tek/config/errors.py | 1 | 2321 | from tek.errors import TException
class ConfigError(TException):
pass
class NoSuchOptionError(ConfigError):
def __init__(self, key):
super(NoSuchOptionError, self).__init__('No such config option: %s' % key)
class DuplicateDefaultError(ConfigError):
def __init__(self, key):
super(DuplicateDefaultError, self).__init__('Default config option already set: %s' % key)
class MultipleSectionsWithKeyError(ConfigError):
def __init__(self, key):
super(MultipleSectionsWithKeyError, self).__init__('More than one section contain an option with the value %s' % key)
class DuplicateFileSectionError(ConfigError):
def __init__(self, section):
super(DuplicateFileSectionError, self).__init__(
'Config file section \'%s\' already added!' % section)
class DuplicateDefaultSectionError(ConfigError):
def __init__(self, section):
super(DuplicateDefaultSectionError, self).__init__(
'Config defaults section \'%s\' already added!' % section)
class NoSuchConfigError(ConfigError):
def __init__(self, name):
super(NoSuchConfigError, self).__init__('No Configurable registered under the name %s' % name)
class NoSuchSectionError(ConfigError):
def __init__(self, section):
super(NoSuchSectionError, self).__init__('No section named \'%s\' had been loaded!' % section)
class ConfigClientNotYetConnectedError(ConfigError):
""" This error is thrown if a ConfigClient instance tries to get a
config value before the corresponding Configurable hadn't yet been
initialized and connected.
"""
def __init__(self, name, key):
error_string = 'Config Client \'%s\' wasn\'t connected when accessing config option \'%s\'!' % (name, key)
super(ConfigClientNotYetConnectedError, self).__init__(error_string)
class ConfigValueError(ConfigError):
def __init__(self, option, value):
message = '''Invalid value '{}' for config option '{}'!'''
super().__init__(message.format(value, option))
class ConfigTypeError(ConfigError):
def __init__(self, typ, value):
message = 'Invalid type \'{}\' for \'{}\' (expected \'{}\')!'
super().__init__(message.format(type(value), value, typ))
class ConfigLoadError(ConfigError):
pass
| gpl-3.0 | 4,992,464,223,598,709,000 | 28.379747 | 125 | 0.662215 | false |
NonWhite/IA_EP2 | code/utils.py | 1 | 1261 | # -*- coding: utf-8 -*-
import json
URL_HOME = 'http://bulbapedia.bulbagarden.net'
URL_LIST = '/wiki/List_of_Pokémon_by_Kanto_Pokédex_number'
HIDDEN_STYLE = [ 'display:none;' , 'display: none;' ]
SLEEP_TIME = 15 # seconds
MAX_CHUNK = 10
DATA_DIR = '../data/'
DESC_FILE = '%sdata.json' % DATA_DIR
RULES_FILE = '%sgenerated_rules.pl' % DATA_DIR
FIELD_FILES = [ "size.txt" , "weight.txt" , "has_evolution.txt" , "is_evolution.txt" , "is_starter.txt" ]
REPLACE_CHARS = {
u"\u00e9" : 'e' ,
u"\u2640" : ' female' ,
u"\u2642" : ' male' ,
"'" : '' ,
'.' : '' ,
" " : '_'
}
NORM_FIELDS = [ 'img_url' ]
def convertToJson( lst , filename = DESC_FILE , mode = 'a+' ) :
with open( filename , mode ) as jsonfile :
json.dump( lst , jsonfile , indent = 4 , sort_keys = True )
def importAsJson( filename = DESC_FILE ) :
f = open( filename )
data = json.load( f )
for row in data :
for k in row :
if k in NORM_FIELDS : continue
row[ k ] = normalizeString( row[ k ] )
return data
def normalizeString( st ) :
if isinstance( st , list ) :
st = [ normalizeString( cad ) for cad in st if cad != 'Unknown' ]
else :
st = st.lower().encode( 'utf-8' ).decode( 'utf-8' )
for k in REPLACE_CHARS : st = st.replace( k , REPLACE_CHARS[ k ] )
return st
| gpl-2.0 | -2,954,058,766,783,967,000 | 25.787234 | 105 | 0.602065 | false |
google-research/google-research | model_pruning/python/strip_pruning_vars.py | 1 | 3973 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes the auxiliary variables and ops added by the pruning library.
Usage:
bazel build tensorflow/contrib/model_pruning:strip_pruning_vars && \
bazel-bin/tensorflow/contrib/model_pruning/strip_pruning_vars \
--checkpoint_dir=/tmp/model_ckpts \
--output_node_names=softmax \
--output_dir=/tmp \
--filename=pruning_stripped.pb
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow.compat.v1 as tf
from model_pruning.python import strip_pruning_vars_lib
FLAGS = None
def strip_pruning_vars(checkpoint_dir, output_node_names, output_dir, filename):
"""Remove pruning-related auxiliary variables and ops from the graph.
Accepts training checkpoints and produces a GraphDef in which the pruning vars
and ops have been removed.
Args:
checkpoint_dir: Path to the checkpoints.
output_node_names: The name of the output nodes, comma separated.
output_dir: Directory where to write the graph.
filename: Output GraphDef file name.
Returns:
None
Raises:
ValueError: if output_nodes_names are not provided.
"""
if not output_node_names:
raise ValueError(
'Need to specify atleast 1 output node through output_node_names flag')
output_node_names = output_node_names.replace(' ', '').split(',')
initial_graph_def = strip_pruning_vars_lib.graph_def_from_checkpoint(
checkpoint_dir, output_node_names)
final_graph_def = strip_pruning_vars_lib.strip_pruning_vars_fn(
initial_graph_def, output_node_names)
tf.io.write_graph(final_graph_def, output_dir, filename, as_text=False)
tf.logging.info('\nFinal graph written to %s', os.path.join(
output_dir, filename))
def main(unused_args):
return strip_pruning_vars(FLAGS.checkpoint_dir, FLAGS.output_node_names,
FLAGS.output_dir, FLAGS.filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--checkpoint_dir', type=str, default='', help='Path to the checkpoints.')
parser.add_argument(
'--output_node_names',
type=str,
default='',
help='The name of the output nodes, comma separated.')
parser.add_argument(
'--output_dir',
type=str,
default='/tmp',
help='Directory where to write the graph.')
parser.add_argument(
'--filename',
type=str,
default='pruning_stripped.pb',
help='Output \'GraphDef\' file name.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 6,870,038,317,081,666,000 | 33.25 | 80 | 0.700982 | false |
somic/paasta | paasta_tools/monitoring/check_capacity.py | 1 | 5804 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import sys
from collections import defaultdict
from bravado.exception import HTTPError
from paasta_tools.api.client import get_paasta_api_client
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import paasta_print
def parse_capacity_check_options():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'type', choices=['cpus', 'mem', 'disk'], type=str,
help='The resource to check.',
)
parser.add_argument(
'--warn', '-w', dest='warn', type=float, default=80,
help='Level to emit a warning status.',
)
parser.add_argument(
'--crit', '-c', dest='crit', type=float, default=90,
help='Level to emit a critical status.',
)
parser.add_argument(
'--overrides', dest='overrides', type=str,
help='json file of per-attribute overrides.\n'
'In the format [{groupings: {attribute: value, ...}, warn: {cpus: num, disk: num, mem: num}, '
'crit: {cpus: num, disk: num, mem: num}}, ...]',
)
parser.add_argument(
'--cluster', dest='cluster', type=str,
help='Cluster to check. Defaults to looking for the current cluster.',
)
parser.add_argument(
'--attributes', dest='attributes', type=str, default='pool',
help='Comma separated list of attributes to check.\n'
'Checks combinations of attributes',
)
options = parser.parse_args()
return options
def calc_percent_usage(resource_item, value_to_check):
values = resource_item[value_to_check]
if values['total'] == 0:
return 0
return 100 * (values['used'] / values['total'])
def error_message(failures, level, cluster, value_to_check):
result = "%s cluster %s %s usage:\n" % (level, cluster, value_to_check)
results = []
for f in failures:
attrs = ", ".join(["%s: %s" % (e['attr'], e['value']) for e in f['attrs']])
results.append(
" %s is at %.2f percent %s, maximum %.2f percent" % (
attrs, f['current'], value_to_check,
f['maximum'],
),
)
result += "\n".join(results)
return result
def get_check_from_overrides(overrides, default_check, groupings):
"""Get the overrides dict from overrides with the same groupings as groupings,
or return the default"""
checks = [o for o in overrides if o['groupings'] == groupings]
if len(checks) == 0:
return default_check
elif len(checks) == 1:
return checks[0]
else:
group_string = ', '.join(["%s: %s" % (k, v) for k, v in groupings.items()])
paasta_print("UNKNOWN Multiple overrides specified for %s" % group_string)
sys.exit(3)
def read_overrides(override_file):
if override_file:
with open(override_file, 'r') as f:
return json.loads(f.read())
else:
return {}
def run_capacity_check():
options = parse_capacity_check_options()
system_paasta_config = load_system_paasta_config()
cluster = options.cluster if options.cluster is not None else system_paasta_config.get_cluster()
value_to_check = options.type
client = get_paasta_api_client(cluster=cluster)
if client is None:
paasta_print('UNKNOWN Failed to load paasta api client')
sys.exit(3)
overrides = read_overrides(options.overrides)
attributes = options.attributes.split(',')
try:
resource_use = client.resources.resources(groupings=attributes).result()
except HTTPError as e:
paasta_print("UNKNOWN recieved exception from paasta api:\n\t%s" % e)
sys.exit(3)
default_check = {
'warn': {
'cpus': options.warn,
'mem': options.warn,
'disk': options.warn,
},
'crit': {
'cpus': options.crit,
'mem': options.crit,
'disk': options.crit,
},
}
failures = defaultdict(list)
for usage_value in resource_use:
check = get_check_from_overrides(overrides, default_check, usage_value['groupings'])
usage_percent = calc_percent_usage(usage_value, value_to_check)
for c in ['crit', 'warn']:
if usage_percent > check[c][value_to_check]:
failures[c].append({
'attrs': [{'attr': a, 'value': v} for a, v in usage_value['groupings'].items()],
'maximum': check[c][value_to_check], 'current': usage_percent,
})
break
return_value = [0]
if len(failures['crit']) > 0:
result = error_message(failures['crit'], 'CRITICAL', cluster, value_to_check)
paasta_print(result)
return_value.append(2)
if len(failures['warn']) > 0:
result = error_message(failures['warn'], 'WARNING', cluster, value_to_check)
paasta_print(result)
return_value.append(1)
if max(return_value) == 0:
paasta_print("OK cluster %s is below critical capacity in %s" % (cluster, value_to_check))
sys.exit(max(return_value))
if __name__ == "__main__":
run_capacity_check()
| apache-2.0 | -2,668,023,415,934,533,000 | 32.549133 | 102 | 0.614232 | false |
sunithamisra/dimac | dimac/old/imgaccess.py | 1 | 1674 | #!/usr/bin/python
# coding=UTF-8
#
# DIMAC (Disk Image Access for the Web)
# Copyright (C) 2014
# All rights reserved.
#
# This code is distributed under the terms of the GNU General Public
# License, Version 3. See the text file "COPYING" for further details
# about the terms of this license.
#
# This is the main disk image access package script
from dimac import app
from flask import render_template
import pytsk3
@app.route("/")
@app.route("/<retstr>")
# Sample hello world for testing
# def hello():
# return "Hello World!"
def tsktest(retstr=None):
# Step 1: get an IMG_INFO object
img = pytsk3.Img_Info("/home/bcadmin/Desktop/jo-work-usb-2009-12-11.E01")
## Step 2: get a Volume_Info object
volume = pytsk3.Volume_Info(img)
## Step 3: Iterate over all the partitions.
retstr = 'PARTITIONS ON THIS DISK:' + '\<br\>'
for part in volume:
#print part.addr, part.desc, part.start, part.len
retstr += str(part.addr) + ' ' + str(part.desc) + ' ' + str(part.start) + ' ' + str(part.len) + '</br>'
retstr += '</br>' + 'Contents of the root directory:' + '</br>'
## Now, a hack to recognize the start location. Do NOT use this
## code in production. It's just a demo.
fs = pytsk3.FS_Info(img, offset = 63 * 512)
for directory_entry in fs.open_dir(path="/"):
directory_entry = directory_entry.info.name.name
try:
retstr += directory_entry.decode("utf8") + '<br>'
directory_entry.decode("utf8")
except UnicodeError:
pass
#return retstr
return render_template('index.html', retstr=retstr)
#if __name__ == "__main__":
# app.run()
| gpl-3.0 | -5,312,183,724,516,702,000 | 29.436364 | 111 | 0.633214 | false |
NewpTone/hotzenplotz | hotzenplotz/openstack/common/exception.py | 1 | 3305 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions common to OpenStack projects
"""
import logging
from hotzenplotz.openstack.common.gettextutils import _
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
pass
class UnknownScheme(Error):
msg = "Unknown scheme '%s' found in URI"
def __init__(self, scheme):
msg = self.__class__.msg % scheme
super(UnknownScheme, self).__init__(msg)
class BadStoreUri(Error):
msg = "The Store URI %s was malformed. Reason: %s"
def __init__(self, uri, reason):
msg = self.__class__.msg % (uri, reason)
super(BadStoreUri, self).__init__(msg)
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
class BadInputError(Exception):
"""Error resulting from a client sending bad input to a server"""
pass
class MissingArgumentError(Error):
pass
class DatabaseMigrationError(Error):
pass
class ClientConnectionError(Exception):
"""Error resulting from a client connecting to a server"""
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap
class OpenstackException(Exception):
"""
Base Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class MalformedRequestBody(OpenstackException):
message = "Malformed message body: %(reason)s"
class InvalidContentType(OpenstackException):
message = "Invalid content type %(content_type)s"
| apache-2.0 | 748,899,593,439,119,600 | 23.124088 | 78 | 0.649622 | false |
CPekko/memorizer | main.py | 1 | 2026 | #!/usr/bin/env python3
from flask import Flask
from flask.ext.assets import Environment, Bundle
from logging.handlers import SMTPHandler
from models import db
from werkzeug.contrib.fixers import ProxyFix
from views.admin import admin
from views.api import api
from views.quiz import quiz
import logging
app = Flask(__name__)
app.config.from_pyfile('config.py')
app.wsgi_app = ProxyFix(app.wsgi_app)
db.init_app(app)
assets = Environment(app)
js = Bundle('js/ajax.js', 'js/alert.js', filters='jsmin', output='js/min.%(version)s.js')
admin_js = Bundle('js/admin.js', filters='jsmin', output='js/admin.min.%(version)s.js')
app_js = Bundle('js/app.js', filters='jsmin', output='js/app.min.%(version)s.js')
css = Bundle('css/font-awesome.min.css', 'css/styles.css', 'css/admin.css', filters='cssmin', output='css/min.%(version)s.css')
assets.register('js', js)
assets.register('admin_js', admin_js)
assets.register('app_js', app_js)
assets.register('css', css)
ADMINS = ['[email protected]']
if not app.debug:
mail_handler = SMTPHandler('127.0.0.1',
'[email protected]',
ADMINS, '[Flask] Memorizer ERROR')
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
app.register_blueprint(quiz)
app.register_blueprint(admin, url_prefix='/admin')
app.register_blueprint(api, url_prefix='/api')
@app.context_processor
def utility_processor():
def percentage(num, total):
if total > 0:
return round((num * 100) / total, 2)
return 0
def grade(num, total):
p = percentage(num, total)
if total == 0:
return '-'
if p < 41:
return 'F'
elif p < 53:
return 'E'
elif p < 65:
return 'D'
elif p < 77:
return 'C'
elif p < 89:
return 'B'
else:
return 'A'
return dict(percentage=percentage, grade=grade)
if __name__ == '__main__':
app.run()
| gpl-3.0 | 7,748,160,861,605,990,000 | 28.794118 | 127 | 0.621422 | false |
atomman/nmrglue | nmrglue/analysis/analysisbase.py | 4 | 10487 | """
analysisbase provides general purpose analysis functions and classes used by
several nmrglue.analysis modules
"""
import numpy as np
pi = np.pi
# helper functions
def neighbors(pt, shape, structure):
"""
Generate a list of all neighbors to a point.
Parameters
----------
pt : tuple of ints
Index of the point to find neighbors of.
shape : tuple of ints
Shape of the region.
structure : ndarray of bools
Structure element that defines connections.
Returns
-------
pts : list of int tuples
List of tuples which represent indices for all points neighboring pt.
Edges are treated as stopping points.
"""
# set middle of structure to False
s = np.copy(structure) # copy structure
middle = [int(np.floor(i / 2.)) for i in s.shape] # find middle
s.flat[np.ravel_multi_index(middle, s.shape)] = False
offsets = np.argwhere(s) - middle
# loop over the offset adding all valid points
pts = []
for offset in offsets:
npt = pt - offset
if valid_pt(npt, shape):
pts.append(tuple(npt))
return pts
def valid_pt(pt, shape):
"""
Determind if a point (indices) is valid for a given shaped
"""
for i, j in zip(pt, shape):
if i < 0: # index is not negative
return False
if i >= j: # index is less than j
return False
return True
dimension_names = ['A', 'Z', 'Y', 'X']
# utility functions
def find_limits(pts):
"""
Find the limits which outline the provided list of points
Parameters
----------
pts : list of int tuples
List of points [(z0, y0, x0), (z1, y1, x1), ...]
Returns
-------
min : ndarray
Array of minimum indices: array([zmin, ymin, xmin]
max : ndarray
Array of maximum indices: array([zmin, ymin, xmin]
See Also
--------
limits2slice : Create a list of slices from min, max limits
"""
arr_pts = np.array(pts)
return np.min(arr_pts, 0), np.max(arr_pts, 0)
def limits2slice(limits):
"""
Create a set of slice objects given an array of min, max limits.
Parameters
----------
limits: tuple, (ndarray, ndarray)
Two tuple consisting of array of the minimum and maximum indices.
Returns
-------
slices : list
List of slice objects which return points between limits
See Also
--------
find_limits : Find the minimum and maximum limits from a list of points.
slice2limits : Find a minimum and maximum limits for a list of slices.
"""
mins, maxs = limits
return tuple([slice(i, j + 1) for i, j in zip(mins, maxs)])
def slice2limits(slices):
"""
Create a tuple of minimum, maximum limits from a set of slices.
Parameters
----------
slices : list
List of slice objects which return points between limits
Returns
-------
limits: tuple, (ndarray, ndarray)
Two tuple consisting of array of the minimum and maximum indices.
See Also
--------
limits2slice : Find a list of slices given minumum and maximum limits.
"""
mins = [s.start for s in slices]
maxs = [s.stop - 1 for s in slices]
return mins, maxs
def squish(r, axis):
"""
Squish array along an axis.
Determine the sum along all but one axis for an array.
Parameters
----------
r : ndarray
Array to squish.
axis : int
Axis of r to squish along.
Returns
-------
s : 1D ndarray
Array r squished into a single dimension.
"""
# put axis to be squished as the last axis
N = int(r.ndim)
r = r.swapaxes(axis, N - 1)
# sum along leading axis N-1 times
for i in range(N - 1):
r = r.sum(0)
return r
# Windowing classes
class ndwindow(object):
"""
An N-dimensional iterator to slice arrays into windows.
Given the shape of an array and a window size, an 'ndwindow' instance
iterators over tuples of slices which slice an the array into wsize
sub-arrays. At each iteration, the index of the center of the sub-array
is incremented by one along the last dimension. Array borders are ignored
so the resulting sub-array can be smaller than wsize. If wsize contains
even values the window is off center containing an additional point with
lower index.
Parameters
----------
size : tuple of ints
Size of array to generate tuples of slices from.
wsize : tuple of ints
Window/sub-array size. Size of the area to select from array. This is
the maximum size of the window.
Examples
--------
>>> a = np.arange(12).reshape(3,4)
>>> for s in ndwindow(a.shape,(3,3)):
... print(a[s])
[[0 1]
[4 5]]
[[0 1 2]
[4 5 6]]
[[1 2 3]
[5 6 7]]
[[2 3]
[6 7]]
[[0 1]
[4 5]
[8 9]]
[[ 0 1 2]
[ 4 5 6]
[ 8 9 10]]
[[ 1 2 3]
[ 5 6 7]
[ 9 10 11]]
[[ 2 3]
[ 6 7]
[10 11]]
[[4 5]
[8 9]]
[[ 4 5 6]
[ 8 9 10]]
[[ 5 6 7]
[ 9 10 11]]
[[ 6 7]
[10 11]]
See Also
--------
ndwindow_index : Iterator of a ndwindow and index of the window center
ndwindow_inside : Iterator over equal sized windows in the array.
"""
def __init__(self, shape, wsize):
""" Set up the ndwindow object """
if len(shape) != len(wsize):
raise ValueError("shape and wsize do match match")
self.ndindex = np.ndindex(shape)
wsize = np.array(wsize)
self.sub = np.ceil((wsize - 1.) / 2.)
self.add = wsize - 1. - self.sub
def __next__(self):
""" next iterator. """
return self.next()
def next(self):
""" x.next() -> the next value, or raise StopIteration """
center = self.ndindex.next()
start = [max(0, i - j) for i, j in zip(center, self.sub)]
stop = [i + j + 1 for i, j in zip(center, self.add)]
return tuple([slice(x, y) for x, y in zip(start, stop)])
def __iter__(self):
""" x.__iter__() <==> iter(x) """
return self
class ndwindow_index(object):
"""
An N-dimensional interator object which returns the index of the window
center and a :py:class:`ndwindow` slice array. See :py:class:`ndwindow`
for additional documentation.
This class is equivalent to:
for slices, index in zip(np.ndindex(shape), ndwindow(shape,wshape)):
return (index, slice)
See Also
--------
ndwindow: Iterator over only the window slices.
ndwindow_inside : Iterator over equal sized windows in the array.
"""
def __init__(self, shape, wsize):
""" Set up the object """
if len(shape) != len(wsize):
raise ValueError("shape and wsize do match match")
self.ndindex = np.ndindex(shape)
wsize = np.array(wsize)
self.sub = np.ceil((wsize - 1.) / 2.)
self.add = wsize - 1. - self.sub
def __next__(self):
""" next iterator. """
return self.next()
def next(self):
""" x.next() -> the next value, or raise StopIteration """
center = self.ndindex.next()
start = [max(0, i - j) for i, j in zip(center, self.sub)]
stop = [i + j + 1 for i, j in zip(center, self.add)]
return center, tuple([slice(x, y) for x, y in zip(start, stop)])
def __iter__(self):
""" x.__iter__() <==> iter(x) """
return self
class ndwindow_inside(object):
"""
An N-dimentional iterator to slice arrays into uniform size windows.
Given the shape of an array and a window size, an 'ndwindow_inside'
instance iterators over tuples of slices which slice an the array into
uniform size wsize windows/sub-arrays. At each iteration, the index of
the top left of the sub-array is incremented by one along the last
dimension utill the resulting windows would extend past the array border.
All sub-arrays are equal sized (wsize).
Parameters
----------
size : tuple of ints
Size of array to generate tuples of slices from.
wsize : tuple of ints
Size of the area to select from array (widow size).
Examples
--------
>>> a = np.arange(9).reshape(3,3)
>>> for s in ndwindow_inside(a.shape,(2,2)):
... print(a[s])
[[0 1]
[3 4]]
[[1 2]
[4 5]]
[[3 4]
[6 7]]
[[4 5]
[7 8]]
See Also
--------
ndwindow : Iterator over non-uniform windows.
ndwindow_inside_index : Iterator of a ndwindow_inside and the index of the
window's top left point.
"""
def __init__(self, shape, wsize):
""" Set up the object """
if len(shape) != len(wsize):
raise ValueError("shape and wsize do match match")
self.ndindex = np.ndindex(
tuple(np.array(shape) - np.array(wsize) + 1))
self.wsize = wsize
def __next__(self):
""" next iterator. """
return self.next()
def next(self):
""" x.next() -> the next value, or raise StopIteration """
start = self.ndindex.next()
stop = np.array(start) + np.array(self.wsize)
return tuple([slice(x, y) for x, y in zip(start, stop)])
def __iter__(self):
""" x.__iter__() <==> iter(x) """
return self
class ndwindow_inside_index(object):
"""
An N-dimensional interator object which returns the index of the window
top-left and a :py:class:`ndwindow_inside` slice array.
Similar to :py:class:`ndwindow_index` but reports top left index of
window.
See :py:class:`ndwindow_inside` and :py:class`ndwindow_index` for addition
documentation.
"""
def __init__(self, shape, wsize):
" Set up the object """
if len(shape) != len(wsize):
raise ValueError("shape and wsize do match match")
self.ndindex = np.ndindex(
tuple(np.array(shape) - np.array(wsize) + 1))
self.wsize = wsize
def __next__(self):
""" next iterator. """
return self.next()
def next(self):
""" x.next() -> the next value, or raiseStopIteration """
start = self.ndindex.next()
stop = np.array(start) + np.array(self.wsize)
return (start, tuple([slice(x, y) for x, y in zip(start, stop)]))
def __iter__(self):
""" x.__iter__() <==> iter(x) """
return self
| bsd-3-clause | 5,625,475,794,048,658,000 | 26.098191 | 78 | 0.574425 | false |
fastinetserver/portage-idfetch | pym/_emerge/PackageUninstall.py | 1 | 1544 | # Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import codecs
import logging
import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from _emerge.AsynchronousTask import AsynchronousTask
from _emerge.unmerge import unmerge
from _emerge.UninstallFailure import UninstallFailure
class PackageUninstall(AsynchronousTask):
__slots__ = ("world_atom", "ldpath_mtimes", "opts",
"pkg", "scheduler", "settings")
def _start(self):
try:
retval = unmerge(self.pkg.root_config, self.opts, "unmerge",
[self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
writemsg_level=self._writemsg_level)
except UninstallFailure as e:
self.returncode = e.status
else:
self.returncode = os.EX_OK
if retval == 1:
self.world_atom(self.pkg)
self.wait()
def _writemsg_level(self, msg, level=0, noiselevel=0):
log_path = self.settings.get("PORTAGE_LOG_FILE")
background = self.background
if log_path is None:
if not (background and level < logging.WARNING):
portage.util.writemsg_level(msg,
level=level, noiselevel=noiselevel)
else:
if not background:
portage.util.writemsg_level(msg,
level=level, noiselevel=noiselevel)
f = codecs.open(_unicode_encode(log_path,
encoding=_encodings['fs'], errors='strict'),
mode='a', encoding=_encodings['content'], errors='replace')
try:
f.write(msg)
finally:
f.close()
| gpl-2.0 | 9,214,868,639,829,030,000 | 26.571429 | 66 | 0.716969 | false |
alex/remoteobjects | tests/utils.py | 1 | 3271 | # Copyright (c) 2009 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import httplib2
import logging
import os
import mox
import nose
import nose.tools
def todo(fn):
@nose.tools.make_decorator(fn)
def test_reverse(*args, **kwargs):
try:
fn(*args, **kwargs)
except:
pass
else:
raise AssertionError('test %s unexpectedly succeeded' % fn.__name__)
return test_reverse
def mock_http(req, resp_or_content):
mock = mox.MockObject(httplib2.Http)
if not isinstance(req, dict):
req = dict(uri=req)
def make_response(response, url):
default_response = {
'status': 200,
'etag': '7',
'content-type': 'application/json',
'content-location': url,
}
if isinstance(response, dict):
if 'content' in response:
content = response['content']
del response['content']
else:
content = ''
status = response.get('status', 200)
if 200 <= status < 300:
response_info = dict(default_response)
response_info.update(response)
else:
# Homg all bets are off!! Use specified headers only.
response_info = dict(response)
else:
response_info = dict(default_response)
content = response
return httplib2.Response(response_info), content
resp, content = make_response(resp_or_content, req['uri'])
mock.request(**req).AndReturn((resp, content))
mox.Replay(mock)
return mock
def log():
import sys
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr, format="%(asctime)s %(levelname)s %(message)s")
| bsd-3-clause | -8,400,358,860,056,375,000 | 34.172043 | 111 | 0.660654 | false |
tobecontinued/onedrive-e | onedrivee/workers/tasks/down_task.py | 1 | 2530 | import os
import traceback
from onedrivee.common.utils import OS_USER_ID, OS_USER_GID
from onedrivee.drives import errors
from onedrivee.common import hasher
from onedrivee.common.dateparser import datetime_to_timestamp
from onedrivee.workers.tasks.task_base import TaskBase
from onedrivee.store.items_db import ItemRecordStatuses
def get_tmp_filename(name):
return '.' + name + '.!od'
class DownloadFileTask(TaskBase):
def __init__(self, parent_task, rel_parent_path, item):
"""
:param TaskBase parent_task: Base task.
:param str rel_parent_path: Relative working path of this task.
:param onedrivee.api.items.OneDriveItem item: The item to download.
"""
super().__init__(parent_task)
self.rel_parent_path = rel_parent_path
self._item = item
self._item_name = item.name
def handle(self):
local_item_tmp_path = self.local_parent_path + get_tmp_filename(self.item_name)
try:
with open(local_item_tmp_path, 'wb') as f:
self.drive.download_file(file=f, size=self._item.size, item_id=self._item.id)
local_sha1 = hasher.hash_value(local_item_tmp_path)
item_sha1 = None
if self._item.file_props is not None and self._item.file_props.hashes is not None:
item_sha1 = self._item.file_props.hashes.sha1
if item_sha1 is None:
self.logger.warn('Remote file %s has not sha1 property, we keep the file but cannot check correctness of it',
self.local_path)
elif local_sha1 != item_sha1:
self.logger.error('Mismatch hash of download file %s : remote:%s,%d local:%s %d', self.local_path,
self._item.file_props.hashes.sha1, self._item.size, local_sha1, os.path.getsize(local_item_tmp_path))
return
os.rename(local_item_tmp_path, self.local_path)
t = datetime_to_timestamp(self._item.modified_time)
os.utime(self.local_path, (t, t))
os.chown(self.local_path, OS_USER_ID, OS_USER_GID)
self.items_store.update_item(self._item, ItemRecordStatuses.DOWNLOADED)
except (IOError, OSError) as e:
self.logger.error('An IO error occurred when downloading "%s":\n%s.', self.local_path, traceback.format_exc())
except errors.OneDriveError as e:
self.logger.error('An API error occurred when downloading "%s":\n%s.', self.local_path, traceback.format_exc())
| gpl-3.0 | 7,274,206,629,668,221,000 | 47.653846 | 125 | 0.636759 | false |
dchirikov/luna | luna/utils/ip.py | 1 | 5972 | '''
Written by Dmitry Chirikov <[email protected]>
This file is part of Luna, cluster provisioning tool
https://github.com/dchirikov/luna
This file is part of Luna.
Luna is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Luna is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Luna. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import socket
from binascii import hexlify, unhexlify
import logging
log = logging.getLogger(__name__)
af = {
4: socket.AF_INET,
6: socket.AF_INET6,
}
hex_format = {
4: '08x',
6: '032x'
}
def ntoa(num_ip, ver=4):
"""
Convert the IP numip from the binary notation
into the IPv4 numbers-and-dots form
"""
try:
ip = socket.inet_ntop(
af[ver],
unhexlify(format(num_ip, hex_format[ver]))
)
return ip
except:
err_msg = ("Cannot convert '{}' from C"
" to IPv{} format".format(num_ip, ver))
log.error(err_msg)
raise RuntimeError, err_msg
def aton(ip, ver=4):
"""
Convert the IP ip from the IPv4 numbers-and-dots
notation into binary form (in network byte order)
"""
try:
absnum = int(hexlify(socket.inet_pton(af[ver], ip)), 16)
return long(absnum)
except:
err_msg = "Cannot convert IP '{}' to C format".format(ip)
log.error(err_msg)
raise RuntimeError, err_msg
def reltoa(num_net, rel_ip, ver):
"""
Convert a relative ip (a number relative to the base of the
network obtained using 'get_num_subnet') into an IPv4 address
"""
num_ip = int(num_net) + int(rel_ip)
return ntoa(num_ip, ver)
def atorel(ip, num_net, prefix, ver=4):
"""
Convert an IPv4 address into a number relative to the base of
the network obtained using 'get_num_subnet'
"""
num_ip = aton(ip, ver)
# Check if the ip address actually belongs to num_net/prefix
if not ip_in_net(ip, num_net, prefix, ver):
err_msg = ("Network '{}/{}' does not contain '{}'"
.format(ntoa(num_net, ver), prefix, ip))
log.error(err_msg)
raise RuntimeError, err_msg
relative_num = long(num_ip - num_net)
return relative_num
def get_num_subnet(ip, prefix, ver=4):
"""
Get the address of the subnet to which ip belongs in binary form
"""
maxbits = 32
if ver == 6:
maxbits = 128
try:
prefix = int(prefix)
except:
err_msg = "Prefix '{}' is invalid, must be 'int'".format(prefix)
log.error(err_msg)
raise RuntimeError, err_msg
if ver == 4 and prefix not in range(1, 31):
err_msg = "Prefix should be in the range [1..30]"
log.error(err_msg)
raise RuntimeError, err_msg
if ver == 6 and prefix not in range(1, 127):
err_msg = "Prefix should be in the range [1..126]"
log.error(err_msg)
raise RuntimeError, err_msg
if type(ip) is long or type(ip) is int:
num_ip = ip
else:
try:
num_ip = aton(ip, ver)
except socket.error:
err_msg = "'{}' is not a valid IP".format(ip)
log.error(err_msg)
raise RuntimeError, err_msg
num_mask = (((1 << maxbits) - 1)
^ ((1 << (maxbits+1 - prefix) - 1) - 1))
num_subnet = long(num_ip & num_mask)
return num_subnet
def ip_in_net(ip, num_net, prefix, ver=4):
"""
Check if an address (either in binary or IPv4 form) belongs to
num_net/prefix
"""
if type(ip) is long or type(ip) is int:
num_ip = ip
else:
num_ip = aton(ip, ver)
num_subnet1 = get_num_subnet(num_net, prefix, ver)
num_subnet2 = get_num_subnet(num_ip, prefix, ver)
return num_subnet1 == num_subnet2
def guess_ns_hostname():
"""
Try to guess the hostname to use for the nameserver
it supports hosts of the format host-N, hostN for HA
configurations. Returns the current hostname otherwise
"""
ns_hostname = socket.gethostname().split('.')[0]
if ns_hostname[-1:].isdigit():
guessed_name = re.match('(.*)[0-9]+$', ns_hostname).group(1)
if guessed_name[-1] == '-':
guessed_name = guessed_name[:-1]
try:
guessed_ip = socket.gethostbyname(guessed_name)
except:
guessed_ip = None
if guessed_ip:
log.info(("Guessed that NS server should be '%s', "
"instead of '%s'. "
"Please update if this is not correct.") %
(guessed_name, ns_hostname))
return guessed_name
# Return the current host's hostname if the guessed name could not
# be resolved
return ns_hostname
def get_ip_version(ip):
for ver in [4, 6]:
try:
int(hexlify(socket.inet_pton(af[ver], ip)), 16)
return ver
except:
pass
return None
def ipv6_unwrap(ip):
"""
Retruns IPv6 ip address in full form:
fe80:1:: => fe80:0001:0000:0000:0000:0000:0000:0000
2001:db8::ff00:42:8329 => 2001:0db8:0000:0000:0000:ff00:0042:8329
"""
ip = ntoa(aton(ip, 6), 6)
out = [''] * 8
start, end = ip.split('::')
start_splited = start.split(':')
end_splited = end.split(':')
out[:len(start_splited)] = start_splited
i = 1
for elem in reversed(end_splited):
out[-i] = elem
i += 1
for i in range(len(out)):
out[i] = '{:0>4}'.format(out[i])
return ":".join(out)
| gpl-3.0 | 5,930,492,766,799,348,000 | 24.305085 | 72 | 0.586236 | false |
leppa/home-assistant | tests/components/device_tracker/test_device_condition.py | 1 | 4345 | """The tests for Device tracker device conditions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import STATE_HOME, STATE_NOT_HOME
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a device_tracker."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_not_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_home",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set("device_tracker.entity", STATE_HOME)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "device_tracker.entity",
"type": "is_home",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_home - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "device_tracker.entity",
"type": "is_not_home",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_not_home - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_home - event - test_event1"
hass.states.async_set("device_tracker.entity", STATE_NOT_HOME)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_not_home - event - test_event2"
| apache-2.0 | -657,734,917,007,290,400 | 33.484127 | 107 | 0.509551 | false |
HaraldWeber/client | src/fa/__init__.py | 1 | 1570 | #-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
# Initialize logging system
import logging
logger = logging.getLogger(__name__)
GPGNET_HOST = "lobby.faforever.com"
GPGNET_PORT = 8000
DEFAULT_LIVE_REPLAY = True
DEFAULT_RECORD_REPLAY = True
DEFAULT_WRITE_GAME_LOG = False
# We only want one instance of Forged Alliance to run, so we use a singleton here (other modules may wish to connect to its signals so it needs persistence)
from process import instance as instance
from play import run
from replay import replay
import check
import maps
import replayserver
import relayserver
import proxies
import updater
import upnp
import faction
import binary
import featured
import game_version | gpl-3.0 | 607,499,954,161,489,500 | 32.425532 | 156 | 0.705732 | false |
olitheolix/ogre_v21_example | python/setup.py | 1 | 3770 | # MIT License
#
# Copyright (c) 2016 Oliver Nagy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import shutil
import subprocess
from Cython.Build import cythonize
from distutils.core import setup, Extension
def main():
"""
Compile the Cython wrapper for Ogre.
"""
# Note: the name of the library *MUST* match the name of the pyx file. You
# are also *NOT* allowed to rename the library file name once it was built!
# If you do, Python will throw an error when importing the module.
libname = 'azOgre'
# Backup the current working directory because we will change it.
cwd = os.getcwd()
src_dir = os.path.join(cwd, 'src')
dir_conda = os.getenv('CONDA_ENV_PATH')
assert dir_conda != ''
# Let setuptools' Extension function take care of the compiler options.
ext = Extension(
name=libname,
sources=[
src_dir + '/' + libname + '.pyx'
],
include_dirs=[
dir_conda + '/include',
dir_conda + '/include/OGRE',
dir_conda + '/include/OGRE/Hlms',
dir_conda + '/include/OGRE/Hlms/Pbs',
dir_conda + '/include/OGRE/Hlms/Unlit',
dir_conda + '/include/OGRE/Overlay',
],
library_dirs=[
dir_conda + '/lib',
],
runtime_library_dirs=[
dir_conda + '/lib',
],
libraries=[
'OgreHlmsPbs',
'OgreHlmsUnlit',
'OgreMain',
'OgreOverlay',
'boost_atomic',
'boost_chrono',
'boost_date_time',
'boost_system',
'boost_thread',
'pthread',
],
extra_compile_args=[
'-Wall', '-Wextra', '-std=c++11',
'-Wno-unused-parameter', '-Wno-unused-function',
'-Wno-unused-variable', '-fPIC',
],
language='c++',
)
# Cython options.
cython_opts = {
'include_path': [src_dir]
}
# Build the extension module.
setup(
name=libname,
version='0.1',
description='Python Wrapper for Ogre3D (v2.1)',
author='Oliver Nagy',
author_email='[email protected]',
url='https://github.com/olitheolix/azOgre',
ext_modules=cythonize(ext, **cython_opts),
)
# Remove the library and build directory if --clean was specified.
if 'clean' in sys.argv:
# Remove library.
tmp = 'rm -f {}.*.so'.format(libname)
print(tmp)
subprocess.call(tmp, shell=True)
# Remove Cython file.
tmp = 'rm -f {}/{}.cpp'.format(src_dir, libname)
print(tmp)
subprocess.call(tmp, shell=True)
if __name__ == '__main__':
main()
| mit | -1,013,155,397,636,995,500 | 30.949153 | 81 | 0.607427 | false |
ppizarror/Ned-For-Spod | bin/external/pil/XVThumbImagePlugin.py | 1 | 1759 | #
# The Python Imaging Library.
# $Id$
#
# XV Thumbnail file handler by Charles E. "Gene" Cash
# ([email protected])
#
# see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV,
# available from ftp://ftp.cis.upenn.edu/pub/xv/
#
# history:
# 98-08-15 cec created (b/w only)
# 98-12-09 cec added color palette
# 98-12-28 fl added to PIL (with only a few very minor modifications)
#
# To do:
#
__version__ = "0.1"
import Image, ImageFile, ImagePalette
import string
# standard color palette for thumbnails (RGB332)
PALETTE = ""
for r in range(8):
for g in range(8):
for b in range(4):
PALETTE = PALETTE + (chr((r*255)/7)+chr((g*255)/7)+chr((b*255)/3))
##
# Image plugin for XV thumbnail images.
class XVThumbImageFile(ImageFile.ImageFile):
format = "XVThumb"
format_description = "XV thumbnail image"
def _open(self):
# check magic
s = self.fp.read(6)
if s != "P7 332":
raise SyntaxError, "not an XV thumbnail file"
# Skip to beginning of next line
self.fp.readline()
# skip info comments
while 1:
s = self.fp.readline()
if not s:
raise SyntaxError, "Unexpected EOF reading XV thumbnail file"
if s[0] != '#':
break
# parse header line (already read)
s = string.split(s.strip())
self.mode = "P"
self.size = int(s[0]), int(s[1])
self.palette = ImagePalette.raw("RGB", PALETTE)
self.tile = [
("raw", (0, 0)+self.size,
self.fp.tell(), (self.mode, 0, 1)
)]
# --------------------------------------------------------------------
Image.register_open("XVThumb", XVThumbImageFile)
| gpl-2.0 | -8,625,809,419,998,483,000 | 23.09589 | 78 | 0.553724 | false |
dvklopfenstein/PrincetonAlgorithms | tests/test_DirectedDFS.py | 1 | 1827 | #!/usr/bin/env python
"""Determine single-source or multiple-source reachability in a digraph"""
#pylint: disable=invalid-name
import sys
from os.path import join
from AlgsSedgewickWayne.Digraph import Digraph
from AlgsSedgewickWayne.DirectedDFS import DirectedDFS
from AlgsSedgewickWayne.testcode.InputArgs import cli_get_fin
from tests.utils import DIR_TEST
def test_main(digraph, *sources):
"""Determine single-source or multiple-source reachability in a digraph
using depth first search.
Runs in O(E + V) time.
>>> test_main("tinyDG.txt", 1)
[1]
>>> test_main("tinyDG.txt", 2)
[0, 1, 2, 3, 4, 5]
>>> test_main("tinyDG.txt", 1, 2, 6)
[0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12]
"""
if isinstance(digraph, str):
digraph_fin = join(DIR_TEST, digraph)
digraph_arr = cli_get_fin(digraph_fin)
grph = Digraph(digraph_arr)
dfs = DirectedDFS(grph, sources) # multiple-source reachability
return [v for v in grph.keys if dfs.marked(v)]
def cli(prt=sys.stdout):
"""Command-line interface"""
if len(sys.argv) == 1:
import doctest
doctest.testmod()
else:
# read in digraph from command-line argument
digraph_array = cli_get_fin(sys.argv[1])
# read in sources from command-line arguments
sources = [int(s) for s in sys.argv[2:]]
# print out vertices reachable from sources
# reachable = test_main(digraph_array, *sources)
reachable = test_main(digraph_array, *sources)
prt.write("{}\n".format(' '.join(str(r) for r in reachable)))
#****************************************************************************
if __name__ == "__main__":
cli()
# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.
# Copyright 2015-2019, DV Klopfenstein, Python implementation.
| gpl-2.0 | 3,476,185,876,321,415,000 | 31.625 | 77 | 0.626163 | false |
gopaycommunity/gopay-python-api | gopay/http.py | 1 | 1078 | import requests
class Browser:
def __init__(self, logger, timeout):
self.logger = logger
self.timeout = timeout
def browse(self, request):
try:
r = requests.request(request.method, request.url, headers=request.headers, data=request.body, timeout=self.timeout)
response = Response(r.content, r.json(), r.status_code)
except ValueError as ve:
response = Response(r.content, None, r.status_code)
except Exception as e:
response = Response(e, {}, 500)
self.logger(request, response)
return response
class Request:
def __init__(self):
self.method = 'get'
self.url = ''
self.headers = {}
self.body = {}
class Response:
def __init__(self, raw_body, json, status_code):
self.raw_body = str(raw_body)
self.json = json
self.status_code = status_code
def has_succeed(self):
return self.status_code == 200
def __str__(self):
return self.raw_body
def null_logger(*args):
pass
| mit | 8,063,709,450,911,829,000 | 24.069767 | 127 | 0.58256 | false |
westurner/pyleset | pyleset/pyleset.py | 1 | 4859 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
design_cleanup
"""
import glob
import os
import pathlib
import structlog
import sarge
log = structlog.get_logger()
def match_file_pattern(pattern):
#cmd = sarge.shell_format('ls {0}', pattern)
#files = sarge.get_stdout(cmd)
files = glob.glob(pattern)
return files
def moveto(path, pattern, write_changes=False):
"""
Move a pattern (glob) of files to a directory
Args:
path (str) -- directory path
pattern (str) -- filename glob pattern
Yields:
sarge.run outputs
"""
# files = !ls $pattern
log.debug('moveto()', path=path,
pattern=pattern, write_changes=write_changes)
files = match_file_pattern(pattern)
path_ = pathlib.Path(path)
path_.name in files and files.remove(path_.name)
log.info('patternmatch', files=files)
if not files:
return
# !mkdir $path
if not os.path.exists(path):
log.info('mkdir', path=path)
os.makedirs(path)
git_mv_opts = '-n'
if write_changes:
git_mv_opts = ''
for f in files:
cmd = sarge.shell_format(
"git mv %s {0} {1}" % git_mv_opts,
f, os.path.join(path, f))
log.info('cmd', cmd=cmd)
yield sarge.capture_both(cmd)
# py3: patch xrange
import sys
if sys.version_info.major > 2:
xrange = range
def numbered_design_task(dirpattern="drawing-%s",
nrange=(0, 22+1),
write_changes=True):
"""
Move a pattern (glob) of numbered files to a directory
Args:
dirpattern (str): "%s-string" to interpolate
nrange (tuple): *args for (x)range
Yields:
sarge.run outputs
"""
for n in range(*nrange):
path = dirpattern % n
pattern = path + ".*" # TODO: sarge check
for output in moveto(path, pattern, write_changes=write_changes):
yield output
def design_cleanup():
"""
mainfunc
"""
pass
import unittest
class Test_design_cleanup(unittest.TestCase):
def test_design_cleanup(self):
output = numbered_design_task()
self.assertTrue(output)
for x in output:
self.assertTrue(x)
print(x.returncode)
print(x.stdout.text)
print(x.stderr.text)
def main(*args):
import optparse
import logging
import sys
prs = optparse.OptionParser(usage="%prog: [args]")
prs.add_option('-M', '--move',
action='store_true')
prs.add_option('-N', '--numbered',
action='store_true')
prs.add_option('-w', '--actually-write',
dest='write_changes',
action='store_true')
prs.add_option('-v', '--verbose',
dest='verbose',
action='store_true',)
prs.add_option('-q', '--quiet',
dest='quiet',
action='store_true',)
prs.add_option('-t', '--test',
dest='run_tests',
action='store_true',)
args = args and list(args) or sys.argv[1:]
(opts, args) = prs.parse_args(args)
if not opts.quiet:
logging.basicConfig()
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if opts.run_tests:
sys.argv = [sys.argv[0]] + args
import unittest
sys.exit(unittest.main())
if opts.move:
try:
pattern, path = args
output = moveto(pattern, path, write_changes=opts.write_changes)
for x in output:
log.info('cmd',
cmds=zip(x.commands, x.returncodes),)
if x.stdout.text:
print(x.stdout.text)
if x.stderr.text:
print(x.stderr.text)
if x.returncode:
return x.returncode
return 0
except TypeError: # TODO
return -1
if opts.numbered:
try:
x="""
if len(args) >= 2:
start, end = args[:1]
elif len(args) == 1:
start, end = 0, args[0]
else:
prs.error("Must specify <start[, end]>")
"""
output = numbered_design_task()
for x in output:
log.info('cmd',
cmds=zip(x.commands, x.returncodes),)
if x.stdout.text:
print(x.stdout.text)
if x.stderr.text:
print(x.stderr.text)
if x.returncode:
return x.returncode
return 0
except TypeError: # TODO
return -1
if __name__ == "__main__":
sys.exit(main())
| mit | 6,796,045,735,677,614,000 | 23.174129 | 76 | 0.51348 | false |
ssundarraj/music_genre_classifier | models/ingestor.py | 1 | 1159 | import csv
import numpy as np
from sklearn import preprocessing
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.decomposition import PCA
def absHelper(data):
for i in range(len(data)):
for j in range(len(data[0])):
data[i][j] = abs(data[i][j])
return data
def get_data(file_name):
with open(file_name, 'rb') as csvfile:
data = []
target = []
datareader = csv.reader(csvfile, delimiter=',',)
for row in datareader:
row = [abs(float(i)) for i in row]
data.append(row[:-1])
target.append(row[-1])
data = np.array(data)
data = preprocessing.scale(data)
data = absHelper(data)
#data = SelectKBest(chi2, k=19).fit_transform(data, target)
data_length = len(data)
split_factor = int(0.80 * data_length)
training_data = data[:split_factor]
test_data = data[split_factor + 1:]
training_target = target[:split_factor]
test_target = target[split_factor + 1:]
return (training_data, training_target, test_data, test_target)
| mit | -3,011,801,378,734,631,000 | 25.340909 | 67 | 0.638481 | false |
hasgeek/funnel | funnel/forms/auth_client.py | 1 | 6205 | from __future__ import annotations
from urllib.parse import urlparse
from baseframe import _, __
from coaster.utils import getbool
import baseframe.forms as forms
from ..models import (
AuthClient,
AuthClientCredential,
AuthClientTeamPermissions,
AuthClientUserPermissions,
valid_name,
)
__all__ = [
'AuthClientForm',
'AuthClientCredentialForm',
'AuthClientPermissionEditForm',
'TeamPermissionAssignForm',
'UserPermissionAssignForm',
]
@AuthClient.forms('main')
class AuthClientForm(forms.Form):
"""Register a new OAuth client application."""
title = forms.StringField(
__("Application title"),
validators=[forms.validators.DataRequired()],
filters=[forms.filters.strip()],
description=__("The name of your application"),
)
description = forms.TextAreaField(
__("Description"),
validators=[forms.validators.DataRequired()],
description=__("A description to help users recognize your application"),
)
client_owner = forms.RadioField(
__("Owner"),
validators=[forms.validators.DataRequired()],
description=__(
"User or organization that owns this application. Changing the owner"
" will revoke all currently assigned permissions for this app"
),
)
confidential = forms.RadioField(
__("Application type"),
coerce=getbool,
default=True,
choices=[
(
True,
__(
"Confidential (server-hosted app, capable of storing secret key securely)"
),
),
(
False,
__(
"Public (native or in-browser app, not capable of storing secret key securely)"
),
),
],
)
website = forms.URLField(
__("Application website"),
validators=[forms.validators.DataRequired(), forms.validators.URL()],
description=__("Website where users may access this application"),
)
redirect_uris = forms.TextListField(
__("Redirect URLs"),
validators=[
forms.validators.OptionalIf('confidential'),
forms.ForEach([forms.URL()]),
],
filters=[forms.strip_each()],
description=__(
"OAuth2 Redirect URL. If your app is available on multiple hostnames,"
" list each redirect URL on a separate line"
),
)
allow_any_login = forms.BooleanField(
__("Allow anyone to login"),
default=True,
description=__(
"If your application requires access to be restricted to specific users,"
" uncheck this, and only users who have been assigned a permission to the"
" app will be able to login"
),
)
def validate_client_owner(self, field):
if field.data == self.edit_user.buid:
self.user = self.edit_user
self.organization = None
else:
orgs = [
org
for org in self.edit_user.organizations_as_owner
if org.buid == field.data
]
if len(orgs) != 1:
raise forms.ValidationError(_("Invalid owner"))
self.user = None
self.organization = orgs[0]
def _urls_match(self, url1, url2):
p1 = urlparse(url1)
p2 = urlparse(url2)
return (
(p1.netloc == p2.netloc)
and (p1.scheme == p2.scheme)
and (p1.username == p2.username)
and (p1.password == p2.password)
)
def validate_redirect_uri(self, field):
if self.confidential.data and not self._urls_match(
self.website.data, field.data
):
raise forms.ValidationError(
_("The scheme, domain and port must match that of the website URL")
)
@AuthClientCredential.forms('main')
class AuthClientCredentialForm(forms.Form):
"""Generate new client credentials."""
title = forms.StringField(
__("What’s this for?"),
validators=[forms.validators.DataRequired(), forms.validators.Length(max=250)],
filters=[forms.filters.strip()],
description=__(
"Add a description to help yourself remember why this was generated"
),
)
def permission_validator(form, field):
permlist = field.data.split()
for perm in permlist:
if not valid_name(perm):
raise forms.ValidationError(
_("Permission ‘{perm}’ is malformed").format(perm=perm)
)
permlist.sort()
field.data = ' '.join(permlist)
@AuthClient.forms('permissions_user')
@AuthClientUserPermissions.forms('assign')
class UserPermissionAssignForm(forms.Form):
"""Assign permissions to a user."""
user = forms.UserSelectField(
__("User"),
validators=[forms.validators.DataRequired()],
description=__("Lookup a user by their username or email address"),
)
perms = forms.StringField(
__("Permissions"),
validators=[forms.validators.DataRequired(), permission_validator],
)
@AuthClient.forms('permissions_team')
@AuthClientTeamPermissions.forms('assign')
class TeamPermissionAssignForm(forms.Form):
"""Assign permissions to a team."""
team_id = forms.RadioField(
__("Team"),
validators=[forms.validators.DataRequired()],
description=__("Select a team to assign permissions to"),
)
perms = forms.StringField(
__("Permissions"),
validators=[forms.validators.DataRequired(), permission_validator],
)
def validate_team_id(self, field):
teams = [team for team in self.organization.teams if team.buid == field.data]
if len(teams) != 1:
raise forms.ValidationError(_("Unknown team"))
self.team = teams[0]
@AuthClientUserPermissions.forms('edit')
@AuthClientTeamPermissions.forms('edit')
class AuthClientPermissionEditForm(forms.Form):
"""Edit a user or team's permissions."""
perms = forms.StringField(__("Permissions"), validators=[permission_validator])
| agpl-3.0 | -2,272,297,041,695,459,600 | 30.467005 | 99 | 0.596548 | false |
rkk09c/Flask_Boilerplate | app/views.py | 1 | 2279 | #FLASK
from flask import abort, render_template, Response, flash, redirect, session, url_for, g, request, send_from_directory
#FLASK EXTENSIONS
from flask.ext.login import login_user, logout_user, current_user, login_required
from flask.ext.sqlalchemy import get_debug_queries
from flask.ext.mail import Mail
#LOCAL
from models import User, ROLE_USER, ROLE_ADMIN
from forms import LoginForm, RegistrationForm
from email import user_notification
from config import DATABASE_QUERY_TIMEOUT
from app import app, db, lm, mail
#OTHER
from datetime import datetime
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/create_acct/' , methods=['GET','POST'])
def create_acct():
form = RegistrationForm(request.form)
if form.validate_on_submit():
print form
user = User()
form.populate_obj(user)
db.session.add(user)
db.session.commit()
login_user(user)
user_notification(user)
return redirect(url_for('index'))
return render_template('create_acct.html', title = "Create Account", form=form)
@app.route('/login/',methods=['GET','POST'])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
user = form.get_user()
login_user(user)
flash("Logged in successfully.")
return redirect(request.args.get("next") or url_for("index"))
return render_template('login.html', title = "Login", form=form)
@app.route('/')
@app.route('/index')
@login_required
def index():
user = g.user
return render_template ("index.html",
title = "Home",
user = user)
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
@app.route('/logout')
def logout():
#double check if the
logout_user()
return redirect(url_for('index'))
@app.errorhandler(404)
def internal_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
# db.session.rollback()
return render_template('500.html'), 500
@app.after_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= DATABASE_QUERY_TIMEOUT:
app.logger.warning("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context))
return response
| mit | 8,549,711,656,679,542,000 | 27.135802 | 154 | 0.726634 | false |
ThePavolC/PivotGame | pivot.py | 1 | 8737 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty, BooleanProperty, OptionProperty
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.graphics import Color, Ellipse, Rectangle
from math import sin, cos, sqrt
from random import randint
class PivotGame(Widget):
"""Pivot Game"""
# Getting references to widgets from kv file
ball = ObjectProperty(None)
menu = ObjectProperty(None)
score_label = ObjectProperty(None)
score_label2 = ObjectProperty(None)
target = ObjectProperty(None)
# Game states
state = OptionProperty("started", options=["playing","killed"])
# Score counter
score = NumericProperty(-1)
# number of opponents show in window, something like difficulty
number_of_opponents = [2,3,5,7,11,13,17,19,23,29,31,37,41,43]
# current level or difficulty
level = 0
# list of opponents, so I can move them or check for collision
opponents = []
def update(self, dt):
"""Run the game
It does:
- checks if ball is touching wall
- checks if ball is touching opponents
- show labels based on game state
- moves the ball
- moves the target
- counts score
- adds up level
"""
if self.ball.is_touching_border():
self.state = "killed"
self.ball.reset_position()
if self.state == "started":
self.menu.canvas.opacity = 1
self.ball.canvas.opacity = 0
self.score_label.canvas.opacity = 0
self.target.canvas.opacity = 0
elif self.state == "playing":
if self.ball.is_touching(self.target):
self.target.move()
self.score += 1
# increasing game level
if self.score % 5 == 0:
self.level += 1
self.menu.canvas.opacity = 0
self.ball.canvas.opacity = 1
self.score_label.canvas.opacity = 0
self.target.canvas.opacity = 1
self.ball.move(dt)
self.set_score(self.score)
# moving all opponents and checking if collided with player
for o in self.opponents:
if self.ball.is_touching(o):
self.state = "killed"
o.move()
elif self.state == "killed":
self.menu.canvas.opacity = 1
self.ball.canvas.opacity = 0
self.set_score(self.score)
self.score_label.canvas.opacity = 1
self.target.canvas.opacity = 0
self.target.move()
# removing all opponents
for o in self.opponents:
self.remove_widget(o)
self.opponents = []
def update_opponent(self, dt):
"""Adds opponent to game"""
if self.state == "started":
pass
elif self.state == "playing":
if len(self.opponents) < self.number_of_opponents[self.level]:
p = (-50,randint(0,self.parent.height))
temp_op = PivotOpponent(pos=p, size=(50,50))
self.add_widget(temp_op,9999)
self.opponents.append(temp_op)
elif self.state == "killed":
pass
def set_score(self, num):
"""Set score on label in corner and label at the end"""
self.score_label.text = "Score " + str(num)
self.score_label2.text = "Score " + str(num)
class PivotApp(App):
"""Pivot App"""
def build(self):
"""Create Game
It create game, set scheduled running of update and listen to keyboard.
"""
self.game = PivotGame()
Clock.schedule_interval(self.game.update, 1.0/30.0)
Clock.schedule_interval(self.game.update_opponent, 5.0)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down = self._on_keyboard_down)
self._keyboard.bind(on_key_up = self._on_keyboard_up)
return self.game
def _keyboard_closed(self):
"""Not quiet sure..."""
self._keyboard.unbind(on_key_down = self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
"""Start game whenn spacebar pressed"""
if keycode[1] == 'spacebar':
if self.game.state == "started":
self.game.state = "playing"
elif self.game.state == "killed":
self.game.score = 0
self.game.state = "playing"
elif self.game.state == "playing":
self.game.ball.turn()
def _on_keyboard_up(self, *args):
"""No action set on key up"""
pass
class PivotBall(Widget):
"""Player's ball widget"""
angle = NumericProperty(0)
r = NumericProperty(5.5)
was_turn = BooleanProperty(True)
border = NumericProperty(10)
# list of 10 locations is stored and then shades are put to those locations
number_of_storing_locations = 10
previous_locations = []
shades = []
def reset_position(self):
"""Reset ball to center and initial behaviour"""
self.x = self.parent.center_x - self.size[0]
self.y = self.parent.center_y
self.was_turn = BooleanProperty(True)
self.angle = 0
for shade in self.shades:
self.canvas.remove(shade)
self.shades = []
self.previous_locations = []
def is_touching_border(self):
"""Check if ball is touching border"""
if (self.x < self.border or
self.x + self.size[0] > self.parent.width - self.border):
return True
elif (self.y < self.border or
self.y + self.size[1] > self.parent.height - self.border):
return True
else:
return False
def is_touching(self, other_object):
"""Check if ball and target center are in touching distance"""
dist = sqrt((self.center[0] - other_object.center[0]) ** 2 +
(self.center[1] - other_object.center[1]) ** 2)
touch_dist_x = self.size[0] / 2 + other_object.size[0] / 2
touch_dist_y = self.size[1] / 2 + other_object.size[1] / 2
if (dist < touch_dist_x or dist < touch_dist_y):
return True
def move(self, dt):
"""Move ball in circle"""
if self.was_turn:
self.angle += 0.1
else:
self.angle -= 0.1
self.x = self.x + sin(self.angle) * self.r
self.y = self.y + cos(self.angle) * self.r
self.add_shade()
def add_shade(self):
"""Adding semitransparent shades to previous ball's locations"""
if len(self.previous_locations) > self.number_of_storing_locations:
# this will prevent locations list to go over
self.previous_locations.pop()
for loc_idx, location in enumerate(self.previous_locations):
if len(self.shades) > self.number_of_storing_locations:
# this will remove old shades, those which are at the end
last_shade = self.shades.pop()
self.canvas.remove(last_shade)
with self.canvas:
e_size_x = self.size[0] / (loc_idx+1) + 20
e_size_y = self.size[1] / (loc_idx+1) + 20
Color(1,1,1,loc_idx * 0.1)
e = Ellipse(pos=location,size=(e_size_x, e_size_y))
self.shades.insert(0,e)
self.previous_locations.insert(0, (self.x, self.y))
def turn(self):
"""Make ball to circle in oposite direction"""
self.was_turn = not self.was_turn
class PivotTarget(Widget):
"""Target ball that player is chasing"""
def move(self):
"""Move target ball within the window"""
i = 10
self.x = randint(self.size[0] + i,
self.parent.width - self.size[0] - i)
self.y = randint(self.size[0] + i,
self.parent.height - self.size[0] - i)
class PivotOpponent(Widget):
"""Opponents, boxes, which player should avoid"""
speed = NumericProperty(5)
def move(self):
"""Move opponent from side to side. And then change it's y axis."""
if (self.x - self.size[0] > self.parent.width or
self.x + self.size[0] < 0):
self.x -= self.speed
self.speed *= -1
self.y = randint(0,self.parent.height)
self.x += self.speed
if __name__ == '__main__':
PivotApp().run()
| gpl-2.0 | -7,291,038,411,737,012,000 | 33.128906 | 115 | 0.563008 | false |
WarriorIng64/GxSubOS | label.py | 1 | 2754 | # This file is part of GxSubOS.
# Copyright (C) 2014 Christopher Kyle Horton <[email protected]>
# GxSubOS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# GxSubOS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GxSubOS. If not, see <http://www.gnu.org/licenses/>.
import sys, pygame
from widget import Widget
import glass
label_font = pygame.font.Font("fonts/Roboto/Roboto-Light.ttf", 16)
LEFT, CENTER, RIGHT = range(3)
class Label(Widget):
"""A Widget subclass which represents a label within a window."""
def __init__(self, parent_widget=None, parent_window=None, label_text=None, halign=CENTER):
self.parent_widget = parent_widget
self.parent_window = parent_window
self.rect = None
self.surface = None
self.button_text = ""
self.text_surface = None
self.SetLabelText(label_text)
self.hovered = False
self.requested_width = 0
self.requested_height = 0
self.halign = halign
def SetLabelText(self, label_text):
"""Sets the text displayed on the label."""
self.label_text = label_text
render_text = self.label_text if self.label_text != "" else " "
self.text_surface = label_font.render(render_text, True, glass.accent_color)
self.RedrawParentWindow()
def GetLabelText(self):
"""Gets the text displayed on the label."""
return self.label_text
def SetHorizontalAlignment(self, halign):
"""Sets the horizontal text alignment to whatever is passed in.
Acceptable values should be one of the following:
label.LEFT
label.CENTER
label.RIGHT
"""
self.halign = halign
def Redraw(self):
"""Redraw this Label."""
padding = 4
if self.rect == None:
return;
self.surface = glass.MakeTransparentSurface(self.rect.width, self.rect.height)
if self.text_surface is not None:
if self.halign == LEFT:
text_left_align = 0
elif self.halign == RIGHT:
text_left_align = self.surface.get_width() - self.text_surface.get_width()
else:
# Default to centered text
text_left_align = self.surface.get_width() / 2 - self.text_surface.get_width() / 2
text_top_align = self.surface.get_height() / 2 - self.text_surface.get_height() / 2
self.surface.blit(self.text_surface, (text_left_align, text_top_align))
| gpl-3.0 | -6,077,820,292,324,567,000 | 35.72 | 93 | 0.69281 | false |
Benozo/NBI | NBI/stpl.py | 1 | 17031 | '''
This is a port from SimpleTemplates to be a standalone Template Engine.
Simple Template is a very fast Template Engine for Python.
Now SimpleTemplate can be used on other projects :-)
All credit to the Bottle Project Creator (https://github.com/bottlepy/bottle).
'''
import functools
import os, re
from collections import MutableMapping as DictMixin
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
return unicode("" if s is None else s)
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
class TemplateError(BottleException):
pass
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
raise depr(0, 12, "Empty template lookup path.", "Configure a template lookup path.")
if os.path.isabs(name):
raise depr(0, 12, "Use of absolute path for template name.",
"Refer to templates with names or paths relative to the lookup path.")
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
raise depr(0, 11, 'Unsupported template encodings.', 'Use utf-8 for templates.')
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup, syntax=self.syntax)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
exec(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''(?mx)( # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '{% %} % [{ }]'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if syntax not in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def tpl(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
for dictarg in args[1:]:
kwargs.update(dictarg)
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tplid].render(kwargs)
######## Some Constant ###############
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
unicode = str
| mit | 6,394,658,072,408,750,000 | 37.515081 | 98 | 0.536375 | false |
baloo/shinken | shinken/daemons/skonfdaemon.py | 1 | 32418 | #!/usr/bin/env python
#Copyright (C) 2009-2012 :
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Try to see if we are in an android device or not
is_android = True
try:
import android
except ImportError:
is_android = False
import sys
import os
import time
import traceback
import threading
from Queue import Empty
import socket
from shinken.objects import Config
from shinken.external_command import ExternalCommandManager
from shinken.dispatcher import Dispatcher
from shinken.daemon import Daemon, Interface
from shinken.log import logger
from shinken.brok import Brok
from shinken.external_command import ExternalCommand
from shinken.util import safe_print
from shinken.skonfuiworker import SkonfUIWorker
from shinken.message import Message
# Now the bottle HTTP part :)
from shinken.webui.bottle import Bottle, run, static_file, view, route, request, response
# Debug
import shinken.webui.bottle as bottle
bottle.debug(True)
#Import bottle lib to make bottle happy
bottle_dir = os.path.abspath(os.path.dirname(bottle.__file__))
sys.path.insert(0, bottle_dir)
bottle.TEMPLATE_PATH.append(os.path.join(bottle_dir, 'views'))
bottle.TEMPLATE_PATH.append(bottle_dir)
try:
from pymongo.connection import Connection
except ImportError:
Connection = None
# Interface for the other Arbiter
# It connects, and together we decide who's the Master and who's the Slave, etc.
# Here is a also a function to get a new conf from the master
class IForArbiter(Interface):
def have_conf(self, magic_hash):
# I've got a conf and a good one
if self.app.cur_conf and self.app.cur_conf.magic_hash == magic_hash:
return True
else: #I've no conf or a bad one
return False
# The master Arbiter is sending us a new conf. Ok, we take it
def put_conf(self, conf):
super(IForArbiter, self).put_conf(conf)
self.app.must_run = False
def get_config(self):
return self.app.conf
# The master arbiter asks me not to run!
def do_not_run(self):
# If i'm the master, then F**K YOU!
if self.app.is_master:
print "Some f***ing idiot asks me not to run. I'm a proud master, so I decide to run anyway"
# Else, I'm just a spare, so I listen to my master
else:
print "Someone asks me not to run"
self.app.last_master_speack = time.time()
self.app.must_run = False
# Here a function called by check_shinken to get daemon status
def get_satellite_status(self, daemon_type, daemon_name):
daemon_name_attr = daemon_type+"_name"
daemons = self.app.get_daemons(daemon_type)
if daemons:
for dae in daemons:
if hasattr(dae, daemon_name_attr) and getattr(dae, daemon_name_attr) == daemon_name:
if hasattr(dae, 'alive') and hasattr(dae, 'spare'):
return {'alive' : dae.alive, 'spare' : dae.spare}
return None
# Here a function called by check_shinken to get daemons list
def get_satellite_list(self, daemon_type):
satellite_list = []
daemon_name_attr = daemon_type+"_name"
daemons = self.app.get_daemons(daemon_type)
if daemons:
for dae in daemons:
if hasattr(dae, daemon_name_attr):
satellite_list.append(getattr(dae, daemon_name_attr))
else:
#If one daemon has no name... ouch!
return None
return satellite_list
return None
# Dummy call. We are a master, we managed what we want
def what_i_managed(self):
return []
def get_all_states(self):
res = {'arbiter' : self.app.conf.arbiterlinks,
'scheduler' : self.app.conf.schedulerlinks,
'poller' : self.app.conf.pollers,
'reactionner' : self.app.conf.reactionners,
'receiver' : self.app.conf.receivers,
'broker' : self.app.conf.brokers}
return res
# Main Skonf Class
class Skonf(Daemon):
def __init__(self, config_files, is_daemon, do_replace, verify_only, debug, debug_file):
super(Skonf, self).__init__('skonf', config_files[0], is_daemon, do_replace, debug, debug_file)
self.config_files = config_files
self.verify_only = verify_only
self.broks = {}
self.is_master = False
self.me = None
self.nb_broks_send = 0
# Now tab for external_commands
self.external_commands = []
self.fifo = None
# Use to know if we must still be alive or not
self.must_run = True
self.interface = IForArbiter(self)
self.conf = Config()
self.workers = {} # dict of active workers
self.host_templates = None
# Use for adding things like broks
def add(self, b):
if isinstance(b, Brok):
self.broks[b.id] = b
elif isinstance(b, ExternalCommand):
self.external_commands.append(b)
else:
logger.log('Warning : cannot manage object type %s (%s)' % (type(b), b))
def load_config_file(self):
print "Loading configuration"
# REF: doc/shinken-conf-dispatching.png (1)
buf = self.conf.read_config(self.config_files)
raw_objects = self.conf.read_config_buf(buf)
print "Opening local log file"
# First we need to get arbiters and modules first
# so we can ask them some objects too
self.conf.create_objects_for_type(raw_objects, 'arbiter')
self.conf.create_objects_for_type(raw_objects, 'module')
self.conf.early_arbiter_linking()
# Search wich Arbiterlink I am
for arb in self.conf.arbiterlinks:
if arb.is_me():
arb.need_conf = False
self.me = arb
self.is_master = not self.me.spare
if self.is_master:
logger.log("I am the master Arbiter : %s" % arb.get_name())
else:
logger.log("I am the spare Arbiter : %s" % arb.get_name())
# Set myself as alive ;)
self.me.alive = True
else: #not me
arb.need_conf = True
if not self.me:
sys.exit("Error: I cannot find my own Arbiter object, I bail out. \
To solve it, please change the host_name parameter in \
the object Arbiter in the file shinken-specific.cfg. \
With the value %s \
Thanks." % socket.gethostname())
logger.log("My own modules : " + ','.join([m.get_name() for m in self.me.modules]))
# we request the instances without them being *started*
# (for these that are concerned ("external" modules):
# we will *start* these instances after we have been daemonized (if requested)
self.modules_manager.set_modules(self.me.modules)
self.do_load_modules()
# Call modules that manage this read configuration pass
self.hook_point('read_configuration')
# Now we ask for configuration modules if they
# got items for us
for inst in self.modules_manager.instances:
if 'configuration' in inst.phases:
try :
r = inst.get_objects()
except Exception, exp:
print "The instance %s raise an exception %s. I bypass it" % (inst.get_name(), str(exp))
continue
types_creations = self.conf.types_creations
for k in types_creations:
(cls, clss, prop) = types_creations[k]
if prop in r:
for x in r[prop]:
# test if raw_objects[k] is already set - if not, add empty array
if not k in raw_objects:
raw_objects[k] = []
# now append the object
raw_objects[k].append(x)
print "Added %i objects to %s from module %s" % (len(r[prop]), k, inst.get_name())
### Resume standard operations ###
self.conf.create_objects(raw_objects)
# Maybe conf is already invalid
if not self.conf.conf_is_correct:
sys.exit("***> One or more problems was encountered while processing the config files...")
# Change Nagios2 names to Nagios3 ones
self.conf.old_properties_names_to_new()
# Manage all post-conf modules
self.hook_point('early_configuration')
# Create Template links
self.conf.linkify_templates()
# All inheritances
self.conf.apply_inheritance()
# Explode between types
self.conf.explode()
# Create Name reversed list for searching list
self.conf.create_reversed_list()
# Cleaning Twins objects
self.conf.remove_twins()
# Implicit inheritance for services
self.conf.apply_implicit_inheritance()
# Fill default values
self.conf.fill_default()
# Remove templates from config
# SAVE TEMPLATES
self.host_templates = self.conf.hosts.templates
# Then clean for other parts
self.conf.remove_templates()
# We removed templates, and so we must recompute the
# search lists
self.conf.create_reversed_list()
# Pythonize values
self.conf.pythonize()
# Linkify objects each others
self.conf.linkify()
# applying dependencies
self.conf.apply_dependencies()
# Hacking some global parameter inherited from Nagios to create
# on the fly some Broker modules like for status.dat parameters
# or nagios.log one if there are no already available
self.conf.hack_old_nagios_parameters()
# Raise warning about curently unmanaged parameters
if self.verify_only:
self.conf.warn_about_unmanaged_parameters()
# Exlode global conf parameters into Classes
self.conf.explode_global_conf()
# set ourown timezone and propagate it to other satellites
self.conf.propagate_timezone_option()
# Look for business rules, and create the dep tree
self.conf.create_business_rules()
# And link them
self.conf.create_business_rules_dependencies()
# Warn about useless parameters in Shinken
if self.verify_only:
self.conf.notice_about_useless_parameters()
# Manage all post-conf modules
self.hook_point('late_configuration')
# Correct conf?
self.conf.is_correct()
#If the conf is not correct, we must get out now
#if not self.conf.conf_is_correct:
# sys.exit("Configuration is incorrect, sorry, I bail out")
# REF: doc/shinken-conf-dispatching.png (2)
logger.log("Cutting the hosts and services into parts")
self.confs = self.conf.cut_into_parts()
# The conf can be incorrect here if the cut into parts see errors like
# a realm with hosts and not schedulers for it
if not self.conf.conf_is_correct:
self.conf.show_errors()
sys.exit("Configuration is incorrect, sorry, I bail out")
logger.log('Things look okay - No serious problems were detected during the pre-flight check')
# Now clean objects of temporary/unecessary attributes for live work:
self.conf.clean()
# Exit if we are just here for config checking
if self.verify_only:
sys.exit(0)
# Some properties need to be "flatten" (put in strings)
# before being send, like realms for hosts for example
# BEWARE: after the cutting part, because we stringify some properties
self.conf.prepare_for_sending()
# Ok, here we must check if we go on or not.
# TODO : check OK or not
self.use_local_log = self.conf.use_local_log
self.local_log = self.conf.local_log
self.pidfile = os.path.abspath(self.conf.lock_file)
self.idontcareaboutsecurity = self.conf.idontcareaboutsecurity
self.user = self.conf.shinken_user
self.group = self.conf.shinken_group
# If the user set a workdir, let use it. If not, use the
# pidfile directory
if self.conf.workdir == '':
self.workdir = os.path.abspath(os.path.dirname(self.pidfile))
else:
self.workdir = self.conf.workdir
#print "DBG curpath=", os.getcwd()
#print "DBG pidfile=", self.pidfile
#print "DBG workdir=", self.workdir
## We need to set self.host & self.port to be used by do_daemon_init_and_start
self.host = self.me.address
self.port = 8766#self.me.port
logger.log("Configuration Loaded")
print ""
def load_web_configuration(self):
self.plugins = []
self.http_port = 7766#int(getattr(modconf, 'port', '7767'))
self.http_host = '0.0.0.0'#getattr(modconf, 'host', '0.0.0.0')
self.auth_secret = 'CHANGE_ME'.encode('utf8', 'replace')#getattr(modconf, 'auth_secret').encode('utf8', 'replace')
self.http_backend = 'auto'#getattr(modconf, 'http_backend', 'auto')
self.login_text = None#getattr(modconf, 'login_text', None)
self.allow_html_output = False#to_bool(getattr(modconf, 'allow_html_output', '0'))
self.remote_user_enable = '0'#getattr(modconf, 'remote_user_enable', '0')
self.remote_user_variable = 'X_REMOTE_USER'#getattr(modconf, 'remote_user_variable', 'X_REMOTE_USER')
# Load the photo dir and make it a absolute path
self.photo_dir = 'photos'#getattr(modconf, 'photo_dir', 'photos')
self.photo_dir = os.path.abspath(self.photo_dir)
print "Webui : using the backend", self.http_backend
# We check if the photo directory exists. If not, try to create it
def check_photo_dir(self):
print "Checking photo path", self.photo_dir
if not os.path.exists(self.photo_dir):
print "Truing to create photo dir", self.photo_dir
try:
os.mkdir(self.photo_dir)
except Exception, exp:
print "Photo dir creation failed", exp
# Main loop function
def main(self):
try:
# Log will be broks
for line in self.get_header():
self.log.log(line)
self.load_config_file()
self.load_web_configuration()
self.do_daemon_init_and_start()
self.uri_arb = self.pyro_daemon.register(self.interface, "ForArbiter")
# Under Android, we do not have multiprocessing lib
# so use standard Queue threads things
# but in multiprocess, we are also using a Queue(). It's just
# not the same
if is_android:
self.returns_queue = Queue()
else:
self.returns_queue = self.manager.Queue()
# create the input queue of all workers
try:
if is_android:
self.workers_queue = Queue()
else:
self.workers_queue = self.manager.Queue()
# If we got no /dev/shm on linux, we can got problem here.
# Must raise with a good message
except OSError, exp:
# We look for the "Function not implemented" under Linux
if exp.errno == 38 and os.name == 'posix':
logger.log("ERROR : get an exception (%s). If you are under Linux, please check that your /dev/shm directory exists." % (str(exp)))
raise
# For multiprocess things, we should not have
# sockettimeouts. will be set explicitly in Pyro calls
import socket
socket.setdefaulttimeout(None)
# ok we are now fully daemon (if requested)
# now we can start our "external" modules (if any) :
self.modules_manager.start_external_instances()
# Ok now we can load the retention data
self.hook_point('load_retention')
## And go for the main loop
self.do_mainloop()
except SystemExit, exp:
# With a 2.4 interpreter the sys.exit() in load_config_file
# ends up here and must be handled.
sys.exit(exp.code)
except Exception, exp:
logger.log("CRITICAL ERROR: I got an unrecoverable error. I have to exit")
logger.log("You can log a bug ticket at https://github.com/naparuba/shinken/issues/new to get help")
logger.log("Back trace of it: %s" % (traceback.format_exc()))
raise
def setup_new_conf(self):
""" Setup a new conf received from a Master arbiter. """
conf = self.new_conf
self.new_conf = None
self.cur_conf = conf
self.conf = conf
for arb in self.conf.arbiterlinks:
if (arb.address, arb.port) == (self.host, self.port):
self.me = arb
arb.is_me = lambda: True # we now definitively know who we are, just keep it.
else:
arb.is_me = lambda: False # and we know who we are not, just keep it.
def do_loop_turn(self):
# If I am a spare, I wait for the master arbiter to send me
# true conf. When
if self.me.spare:
self.wait_for_initial_conf()
if not self.new_conf:
return
self.setup_new_conf()
print "I must wait now"
self.wait_for_master_death()
if self.must_run:
# Main loop
self.run()
# Get 'objects' from external modules
# It can be used for get external commands for example
def get_objects_from_from_queues(self):
for f in self.modules_manager.get_external_from_queues():
#print "Groking from module instance %s" % f
while True:
try:
o = f.get(block=False)
self.add(o)
except Empty:
break
# Maybe the queue got problem
# log it and quit it
except (IOError, EOFError), exp:
logger.log("Warning : an external module queue got a problem '%s'" % str(exp))
break
# We wait (block) for arbiter to send us something
def wait_for_master_death(self):
logger.log("Waiting for master death")
timeout = 1.0
self.last_master_speack = time.time()
# Look for the master timeout
master_timeout = 300
for arb in self.conf.arbiterlinks:
if not arb.spare:
master_timeout = arb.check_interval * arb.max_check_attempts
logger.log("I'll wait master for %d seconds" % master_timeout)
while not self.interrupted:
elapsed, _, tcdiff = self.handleRequests(timeout)
# if there was a system Time Change (tcdiff) then we have to adapt last_master_speak:
if self.new_conf:
self.setup_new_conf()
if tcdiff:
self.last_master_speack += tcdiff
if elapsed:
self.last_master_speack = time.time()
timeout -= elapsed
if timeout > 0:
continue
timeout = 1.0
sys.stdout.write(".")
sys.stdout.flush()
# Now check if master is dead or not
now = time.time()
if now - self.last_master_speack > master_timeout:
logger.log("Master is dead!!!")
self.must_run = True
break
# Take all external commands, make packs and send them to
# the schedulers
def push_external_commands_to_schedulers(self):
# Now get all external commands and put them into the
# good schedulers
for ext_cmd in self.external_commands:
self.external_command.resolve_command(ext_cmd)
# Now for all alive schedulers, send the commands
for sched in self.conf.schedulerlinks:
cmds = sched.external_commands
if len(cmds) > 0 and sched.alive:
safe_print("Sending %d commands" % len(cmds), 'to scheduler', sched.get_name())
sched.run_external_commands(cmds)
# clean them
sched.external_commands = []
# Main function
def run(self):
# Before running, I must be sure who am I
# The arbiters change, so we must refound the new self.me
for arb in self.conf.arbiterlinks:
if arb.is_me():
self.me = arb
if self.conf.human_timestamp_log:
logger.set_human_format()
# Ok start to work :)
self.check_photo_dir()
self.request = request
self.response = response
self.load_plugins()
# Declare the whole app static files AFTER the plugin ones
self.declare_common_static()
# Start sub workers
for i in xrange(1, 3):
self.create_and_launch_worker()
self.init_db()
# Launch the data thread"
self.workersmanager_thread = threading.Thread(None, self.workersmanager, 'httpthread')
self.workersmanager_thread.start()
# TODO : look for alive and killing
print "Starting SkonfUI app"
srv = run(host=self.http_host, port=self.http_port, server=self.http_backend)
def workersmanager(self):
while True:
print "Workers manager thread"
time.sleep(1)
# Here we will load all plugins (pages) under the webui/plugins
# directory. Each one can have a page, views and htdocs dir that we must
# route correctly
def load_plugins(self):
from shinken.webui import plugins_skonf as plugins
plugin_dir = os.path.abspath(os.path.dirname(plugins.__file__))
print "Loading plugin directory : %s" % plugin_dir
# Load plugin directories
plugin_dirs = [ fname for fname in os.listdir(plugin_dir)
if os.path.isdir(os.path.join(plugin_dir, fname)) ]
print "Plugin dirs", plugin_dirs
sys.path.append(plugin_dir)
# We try to import them, but we keep only the one of
# our type
for fdir in plugin_dirs:
print "Try to load", fdir
mod_path = 'shinken.webui.plugins_skonf.%s.%s' % (fdir, fdir)
print "MOD PATH", mod_path
try:
m = __import__(mod_path, fromlist=[mod_path])
m_dir = os.path.abspath(os.path.dirname(m.__file__))
sys.path.append(m_dir)
#print "Loaded module m", m
print m.__file__
pages = m.pages
print "Try to load pages", pages
for (f, entry) in pages.items():
routes = entry.get('routes', None)
v = entry.get('view', None)
static = entry.get('static', False)
# IMPORTANT : apply VIEW BEFORE route!
if v:
#print "Link function", f, "and view", v
f = view(v)(f)
# Maybe there is no route to link, so pass
if routes:
for r in routes:
method = entry.get('method', 'GET')
print "link function", f, "and route", r, "method", method
# Ok, we will just use the lock for all
# plugin page, but not for static objects
# so we set the lock at the function level.
lock_version = self.lockable_function(f)
f = route(r, callback=lock_version, method=method)
# If the plugin declare a static entry, register it
# and remeber : really static! because there is no lock
# for them!
if static:
self.add_static(fdir, m_dir)
# And we add the views dir of this plugin in our TEMPLATE
# PATH
bottle.TEMPLATE_PATH.append(os.path.join(m_dir, 'views'))
# And finally register me so the pages can get data and other
# useful stuff
m.app = self
except Exception, exp:
logger.log("Warning in loading plugins : %s" % exp)
def add_static(self, fdir, m_dir):
static_route = '/static/'+fdir+'/:path#.+#'
#print "Declaring static route", static_route
def plugin_static(path):
print "Ask %s and give %s" % (path, os.path.join(m_dir, 'htdocs'))
return static_file(path, root=os.path.join(m_dir, 'htdocs'))
route(static_route, callback=plugin_static)
# We want a lock manager version of the plugin fucntions
def lockable_function(self, f):
#print "We create a lock verion of", f
def lock_version(**args):
#self.wait_for_no_writers()
t = time.time()
try:
return f(**args)
finally:
print "rendered in", time.time() - t
# We can remove us as a reader from now. It's NOT an atomic operation
# so we REALLY not need a lock here (yes, I try without and I got
# a not so accurate value there....)
#self.global_lock.acquire()
#self.nb_readers -= 1
#self.global_lock.release()
#print "The lock version is", lock_version
return lock_version
def declare_common_static(self):
@route('/static/photos/:path#.+#')
def give_photo(path):
# If the file really exist, give it. If not, give a dummy image.
if os.path.exists(os.path.join(self.photo_dir, path+'.jpg')):
return static_file(path+'.jpg', root=self.photo_dir)
else:
return static_file('images/user.png', root=os.path.join(bottle_dir, 'htdocs'))
# Route static files css files
@route('/static/:path#.+#')
def server_static(path):
return static_file(path, root=os.path.join(bottle_dir, 'htdocs'))
# And add the favicon ico too
@route('/favicon.ico')
def give_favicon():
return static_file('favicon.ico', root=os.path.join(bottle_dir, 'htdocs', 'images'))
def old_run(self):
suppl_socks = None
# Now create the external commander. It's just here to dispatch
# the commands to schedulers
e = ExternalCommandManager(self.conf, 'dispatcher')
e.load_arbiter(self)
self.external_command = e
print "Run baby, run..."
timeout = 1.0
while self.must_run and not self.interrupted:
elapsed, ins, _ = self.handleRequests(timeout, suppl_socks)
# If FIFO, read external command
if ins:
now = time.time()
ext_cmds = self.external_command.get()
if ext_cmds:
for ext_cmd in ext_cmds:
self.external_commands.append(ext_cmd)
else:
self.fifo = self.external_command.open()
if self.fifo is not None:
suppl_socks = [ self.fifo ]
else:
suppl_socks = None
elapsed += time.time() - now
if elapsed or ins:
timeout -= elapsed
if timeout > 0: # only continue if we are not over timeout
continue
# Timeout
timeout = 1.0 # reset the timeout value
# Try to see if one of my module is dead, and
# try to restart previously dead modules :)
self.check_and_del_zombie_modules()
# Call modules that manage a starting tick pass
self.hook_point('tick')
print "Tick"
# If ask me to dump my memory, I do it
if self.need_dump_memory:
self.dump_memory()
self.need_dump_memory = False
def get_daemons(self, daemon_type):
""" Returns the daemons list defined in our conf for the given type """
# We get the list of the daemons from their links
# 'schedulerlinks' for schedulers, 'arbiterlinks' for arbiters
# and 'pollers', 'brokers', 'reactionners' for the others
if (daemon_type == 'scheduler' or daemon_type == 'arbiter'):
daemon_links = daemon_type+'links'
else:
daemon_links = daemon_type+'s'
# shouldn't the 'daemon_links' (whetever it is above) be always present ?
return getattr(self.conf, daemon_links, None)
# Helper functions for retention modules
# So we give our broks and external commands
def get_retention_data(self):
r = {}
r['broks'] = self.broks
r['external_commands'] = self.external_commands
return r
# Get back our data from a retention module
def restore_retention_data(self, data):
broks = data['broks']
external_commands = data['external_commands']
self.broks.update(broks)
self.external_commands.extend(external_commands)
def get_user_auth(self):
# First we look for the user sid
# so we bail out if it's a false one
user_name = self.request.get_cookie("user", secret=self.auth_secret)
# If we cannot check the cookie, bailout
if not user_name:
return None
#c = self.datamgr.get_contact(user_name)
return user_name
# Create and launch a new worker, and put it into self.workers
def create_and_launch_worker(self):
w = SkonfUIWorker(1, self.workers_queue, self.returns_queue, 1, mortal=False, max_plugins_output_length = 1, target=None)
w.module_name = 'skonfuiworker'
w.add_database_data('localhost')
# save this worker
self.workers[w.id] = w
logger.log("[%s] Allocating new %s Worker : %s" % (self.name, w.module_name, w.id))
# Ok, all is good. Start it!
w.start()
# TODO : fix hard coded server/database
def init_db(self):
if not Connection:
logger.log('ERROR : you need the pymongo lib for running skonfui. Please install it')
sys.exit(2)
con = Connection('localhost')
self.db = con.shinken
# TODO : code this!
def check_auth(self, login, password):
return True
# We are asking to a worker .. to work :)
def ask_new_scan(self, id):
msg = Message(id=0, type='ScanAsk', data={'scan_id' : id})
print "Creating a Message for ScanAsk", msg
self.workers_queue.put(msg)
| agpl-3.0 | 4,848,755,895,858,417,000 | 34.860619 | 149 | 0.571041 | false |
geobricks/geobricks_dbms | geobricks_dbms/core/dbms_postgresql.py | 1 | 4855 | import simplejson
import psycopg2
from types import DictType
class DBMSPostgreSQL:
# User's parameters
db_name = None
username = None
password = None
host = None
port = None
schema = None
# Computed variables.
connection = None
def __init__(self, db_name, username, password, host='localhost', port=5432, schema="public"):
# Store user parameters.
self.db_name = db_name
self.username = username
self.password = password
self.host = host
self.port = port
self.schema = schema
# Connect to the DB
self.connect()
def __init__(self, db_settings):
# Store user parameters.
self.db_name = db_settings["dbname"]
self.username = db_settings["username"]
self.password = db_settings["password"]
self.host = "localhost" if "host" not in db_settings else db_settings["host"]
self.port = 5432 if "port" not in db_settings else db_settings["port"]
self.schema = "public" if "schema" not in db_settings else db_settings["schema"]
# Connect to the DB
self.connect()
def connect(self):
try:
self.connection = psycopg2.connect(self.get_connection_string())
self.connection.autocommit = True
# set search_path to a specific schema
if self.schema is not "public" and self.schema is not None:
search_path = "SET search_path TO %s, public" % self.schema
self.connection.cursor().execute(search_path)
self.connection.commit()
except Exception, e:
raise Exception('Unable to connect to the DB. ' + str(e))
def query(self, sql, output_json=False):
if self.check_query(sql):
cur = self.connection.cursor()
cur.execute(sql)
rows = cur.fetchall()
if output_json:
return simplejson.dumps(rows)
return rows
else:
raise Exception("Query not allowed: " + sql)
def query_extented(self, select, table, where, output_json=False):
sql = "SELECT " + select + " FROM " + table
if where is not None:
sql += " WHERE " + where
if self.check_query(sql):
cur = self.connection.cursor()
cur.execute(sql)
rows = cur.fetchall()
if output_json:
return simplejson.dumps(rows)
return rows
else:
raise Exception("Query not allowed: " + sql)
def select_all(self, table_name, output_json=False):
cur = self.connection.cursor()
cur.execute('SELECT * FROM ' + table_name)
rows = cur.fetchall()
if output_json:
return simplejson.dumps(rows)
return rows
def select_by_id(self, table_name, item_id, output_json=False):
cur = self.connection.cursor()
cur.execute("SELECT * FROM " + table_name + " WHERE id = '" + item_id + "' ")
rows = cur.fetchall()
if output_json:
return simplejson.dumps(rows)
return rows
def select_by_field(self, table_name, field_name, field_value, output_json=False):
cur = self.connection.cursor()
cur.execute("SELECT * FROM " + table_name + " WHERE " + field_name + " = '" + field_value + "' ")
rows = cur.fetchall()
if simplejson:
return simplejson.dumps(rows)
return rows
def insert(self, table_name, item):
sql = ''
if type(item) is DictType:
sql += "INSERT INTO " + table_name + " ("
for key in item:
sql += key + ','
sql = sql[0:len(sql) - 1]
sql += ") VALUES ("
for key in item:
sql += "'" + item[key] + "',"
sql = sql[0:len(sql) - 1]
sql += ")"
else:
sql = item
cur = self.connection.cursor()
return cur.execute(sql)
def get_connection_string(self, add_pg=False):
db_connection_string = ""
if add_pg is True:
db_connection_string += "PG:"
db_connection_string += "schemas='public,%s' " % self.schema
db_connection_string += "host='%s' port=%s dbname='%s' user='%s' password='%s'" % (self.host, self.port, self.db_name, self.username, self.password)
return db_connection_string
# blacklist methods not allowed
def check_query(self, query):
q = query.lower()
if "insert" in q or "update" in q or "delete" in q:
return False
return True
def close_connection(self):
if self.connection is not None:
self.connection.close()
def __del__(self):
self.close_connection()
def __exit__(self):
self.close_connection() | gpl-2.0 | -5,700,996,830,208,089,000 | 32.034014 | 156 | 0.554274 | false |
buguen/pylayers | pylayers/simul/exploit.py | 1 | 9905 | from pylayers.util import project
from pylayers.signal.bsignal import *
import pylayers.util.pyutil as pyu
import ConfigParser
import matplotlib.pylab as plt
import itertools
import pdb
r"""
.. currentmodule:: pylayers.simul.exploit
This module
Class Exploit
=============
.. autosummary::
:toctree: generated/
Exploit.__init__
Exploit.load
Exploit.compute
Exploit.pltcir
Exploit.pltciri
"""
class Exploit(object):
""" class Exploit
Methods
-------
load
"""
def __init__(self,simnetfile='simulnet.ini'):
self.simnetfile=simnetfile
self.load()
def load(self):
"""
Load simulnet_data configuration file
"""
self.simcfg = ConfigParser.ConfigParser()
self.simcfg.read(pyu.getlong(self.simnetfile,pstruc['DIRNETSAVE']))
pdb.set_trace()
self.Lfilename = self.simcfg.get('layout','layoutname')
self.lAG = eval(self.simcfg.get('nodes','ag'))
self.lAP = eval(self.simcfg.get('nodes','ap'))
self.uptime = eval(self.simcfg.get('simulation','updatetime'))
# create a Simul object with the correct layout
self.S = Simul()
self.S.layout(self.Lfilename,'matDB.ini','slabDB.ini')
self.lap = len(self.lAP)
self.lag = len(self.lAG)
self.L = self.S.L
try:
self.L.dumpr()
print 'Layout graphs are loaded from ',basename,'/struc'
except:
#self.L.sl = sl
#self.L.loadGr(G1)
print 'First time your use the Layout.Graphs are curently build, it may take few minutes.'
self.L.buildGt()
self.L.dumpw()
def compute(self):
"""
Compute Raytracing simulation with the given simulation files
"""
#
### STEP 1 : all mobile node with all agent
#
self.S.clean_project(verbose=True)
for apidx,ap in enumerate(self.lAP):
self.S.tx = RadioNode(typ='tx',name=ap)
self.S.tx.loadini(ap+'.ini',rep=pstruc['DIRNETSAVE'])
for agidx,ag in enumerate(self.lAG):
print '---------------------'
print ' Raytracing for : '
print ' AP #',self.lAP[apidx-1] ,' / AG #',ag
print '---------------------'
print ' Computed :'
print 'AP:',apidx-1,'/',self.lap+1
print 'AG:',agidx,'/',self.lag
print '---------------------'
self.S.rx = RadioNode(typ='rx',name=ag)
self.S.rx.loadini(ag+'.ini',rep=pstruc['DIRNETSAVE'])
self.S.run(apidx+1,range(1,self.S.rx.N+1))
#### STEP 2 : all mobile/mobile
icag = itertools.combinations(self.lAG,2)
for cag in icag:
self.S.tx = RadioNode(typ='tx',name=cag[0])
self.S.tx.loadini(cag[0]+'.ini',rep=pstruc['DIRNETSAVE'])
self.S.rx = RadioNode(typ='tx',name=cag[1])
self.S.rx.loadini(cag[1]+'.ini',rep=pstruc['DIRNETSAVE'])
lidxpts = range(1,self.S.rx.N+1)
print '---------------------'
print ' Raytracing for : '
print ' AG #', cag[0] ,' / AG #',cag[1]
print '---------------------'
for n in lidxpts:
print ' trajectory point #',n,'/',self.S.rx.N+1
print '---------------------'
self.S.run(n,n)
def pltcir(self,itx,irx,pn,fig=[]):
""" plot channel impulse response for Tx,Rx and a specified position
Parameters
----------
itx : int
node number
irx : int
node number
pn : int
position index of the Rx
"""
if fig == []:
fig = plt.figure(2)
fig.clf()
cir = TUsignal()
spn = str(pn)
line = 'defaultcir-' +str(itx) +'-'+str(irx)+'-p'+spn.zfill(3)
try:
cir.readcir(line,str(itx))
except:
return False
print 'load : ',line,'.mat'
cir.show(fig)
return True
def pltciri(self,itx,irx):
"""
plot channel impulse response interactively.
display all nodes position of Tx and Rx and choose
for which link the CIR is displayed.
Once self.pltciri(node1,node2) is called :
1) Press 't' on the displayed graph to chose the Tx
2) Press 'x' on the displayed graph to chose the Rx
3) Press Enter to display CIR betwen Tx-Rx
Parameters
----------
itx : int
node number
irx : int
node number
usage
>>> W=W2()
>>> W.pltciri(6,1)
"""
plt.ion()
fig1 = plt.figure(1)
ax=fig1.add_subplot(111)
self.ax2 = fig1.add_subplot(111)
self.L.showG(fig=fig1,graph='')
self.S.tx = RadioNode(typ='tx',name=itx)
self.S.tx.loadini(str(itx)+'.ini',rep=pstruc['DIRNETSAVE'])
self.S.rx = RadioNode(typ='rx',name=irx)
self.S.rx.loadini(str(irx)+'.ini',rep=pstruc['DIRNETSAVE'])
ax.plot(self.S.tx.position[0,:],self.S.tx.position[1,:],'ob')
ax.plot(self.S.rx.position[0,:],self.S.rx.position[1,:],'or')
plt.show()
print '1. Press \'t\' and click to select a Tx '
print '2. Press \'x\' and click to select a Rx '
print '3. Press Enter to see the associated CIR '
self.key=''
self.x1=''
self.x2=''
self.y1=''
self.y2=''
self.n1=''
self.n2=''
self.pos1=''
self.pos2=''
self.c1=[]
self.c2=[]
cid=fig1.canvas.mpl_connect('button_press_event', self.onclick)
cid=fig1.canvas.mpl_connect('key_press_event', self.on_key)
def onclick(self,event):
"""
Events on click
"""
if event.button == 1:
print self.key
if self.key =='t':
self.x1 = event.xdata
self.y1 = event.ydata
self.n1,self.pos1=self.srchpoint(self.x1,self.y1)
print 'select node1(Tx) # ',self.n1,
if self.key =='x':
self.x2 = event.xdata
self.y2 = event.ydata
self.n2,self.pos2=self.srchpoint(self.x2,self.y2)
print 'select node2(Rx) # ',self.n2, ', at position #', self.pos2+1
if self.key == 'enter':
pass
def on_key(self,event):
"""
Events on key stroke
"""
if event.key == 't':
self.key = 't'
if event.key == 'x':
self.key = 'x'
if event.key == 'enter':
inv=False
case = ''
if self.pos1 !='' and self.pos2 !='':
if self.n1 == self.n2:
print 'ERROR :tx and rx on the same node'
else :
if str(self.n1) in self.lAG and str(self.n2) in self.lAG:
upos = self.pos2[0]+1
case = '2agents'
elif str(self.n2) in self.lAP:
upos = self.pos1[0]+1
else :
upos = self.pos2[0]+1
if self.pltcir(self.n1,self.n2,upos):
pass
else:
self.pltcir(self.n2,self.n1,upos)
inv=True
print inv
### manage black cross for involved nodes
try:
self.c1.pop(0).remove()
self.c2.pop(0).remove()
except:
pass
if case == '2agents':
### manage 2 mobile nodes
self.c1=self.ax2.plot(self.S.tx.position[0,upos-1],self.S.tx.position[1,upos-1],'xk',ms=10.,mew=3.)
self.c2=self.ax2.plot(self.S.rx.position[0,upos-1],self.S.rx.position[1,upos-1],'xk',ms=10.,mew=3.)
else:
if self.S.tx.N == 1:
self.c1=self.ax2.plot(self.S.tx.position[0,0],self.S.tx.position[1,0],'xk',ms=10.,mew=3.)
self.c2=self.ax2.plot(self.S.rx.position[0,upos-1],self.S.rx.position[1,upos-1],'xk',ms=10.,mew=3.)
else :
self.c1=self.ax2.plot(self.S.tx.position[0,upos-1],self.S.tx.position[1,upos-1],'xk',ms=10.,mew=3.)
self.c2=self.ax2.plot(self.S.rx.position[0,0],self.S.rx.position[1,0],'xk',ms=10.,mew=3.)
# ### manage 2 mobile nodes to take position of the 2nd click
# if str(self.S.tx.name) in self.lAG and str(self.S.rx.name) in self.lAG :
# self.pos1 = self.pos2
#
def srchpoint(self,x,y):
"""
Search from the closest point (x,y) into self.tx.position and self.rx.position
Returns
-------
N : int
node ID
pos : int
index of the closest position
"""
t = self.S.tx.position[:2,:]
r = self.S.rx.position[:2,:]
p = np.array((x,y))
d1 = np.sqrt(np.sum((t.T-p)**2,axis=1))
d2 = np.sqrt(np.sum((r.T-p)**2,axis=1))
d1m=np.min(d1)
d2m=np.min(d2)
if d1m < d2m:
n = self.S.tx.name
pos = np.nonzero(d1==d1m)[0]
else :
n = self.S.rx.name
pos = np.nonzero(d2==d2m)[0]
return (n,pos)
#cid = fig.canvas.mpl_connect('button_press_event', onclick)
if (__name__ == "__main__"):
E=Exploit()
E.compute()
E.pltciri(1,2)
| lgpl-3.0 | 3,811,786,968,643,733,500 | 30.848875 | 127 | 0.475114 | false |
wjkoh/cs262a | utils/slowData.py | 1 | 1686 | #!/usr/bin/python
import csv
import calendar
import datetime
import glob
import os
import sys
dirName = sys.argv[1]
multiplier = float(sys.argv[2]);
fnames = []
dates = []
rows = []
headers = [];
for index,fname in enumerate(glob.glob(dirName + "/*.csv")):
fnames.append(fname)
rows.append([])
dates.append([])
with open(fname, 'rb') as f:
reader = csv.DictReader(f)
headers.append(reader.fieldnames)
for row in reader:
try:
date = datetime.datetime.strptime(row['date'],
'%Y-%m-%d %H:%M:%S.%f')
except ValueError:
date = datetime.datetime.strptime(row['date'],
'%Y-%m-%d %H:%M:%S')
rows[index].append(row)
dates[index].append(date)
minDate = dates[0][0]
for datesInFile in dates:
minDate = min(min(datesInFile),minDate)
# Clear the monkey directory
outdir = 'parsedData/monkey/'
for f in glob.glob(outdir + '/*'):
os.remove(f)
for file_i,fname in enumerate(fnames):
currDates = dates[file_i];
currRows = rows[file_i];
for row_i,row in enumerate(currRows):
date = currDates[row_i]
td = (date-minDate);
tdNew = datetime.timedelta(seconds = multiplier * td.total_seconds()*2.5);
newDate = minDate + tdNew;
rows[file_i][row_i]['date'] = newDate;
# Print out curr file
(path,fn) = os.path.split(fname)
outfn = outdir + fn;
print outfn
with open(outfn, 'wb') as f:
writer = csv.DictWriter(f, delimiter = ',', fieldnames = headers[file_i])
writer.writeheader()
for row in currRows:
writer.writerow(row)
| bsd-3-clause | 2,900,439,998,527,990,300 | 27.576271 | 82 | 0.581257 | false |
jerabaul29/python_huffman | test/test_example_encode_hamlet.py | 1 | 4351 | from __future__ import print_function
import pyhuffman.pyhuffman as pyhuffman
import os
"""
A test case, that can also be used as example, about how to generate tree, and encode and
decode some text.
"""
path_to_here = os.path.dirname(os.path.realpath(__file__)) + '/'
# path_to_here = ''
# functions for generating the frequency list ----
def generate_frequency_list(dict_appearances):
"""Generate the list to be used for building the Huffman tree from the
dictionary of symbol appearances"""
# total number of symbol appearances
total_number_appearances = 0
for key in dict_appearances:
total_number_appearances += dict_appearances[key]
frequency_list = []
for key in dict_appearances:
new_entry = (float(dict_appearances[key]) / total_number_appearances, key)
frequency_list.append(new_entry)
return frequency_list
def generate_frequency_dict(data):
"""Generate a dictionary of all symbols (keys) with their number of appearances
(values) from data.
"""
dict_appearances = {}
for crrt_char in data:
if crrt_char in dict_appearances:
dict_appearances[crrt_char] += 1
else:
dict_appearances[crrt_char] = 1
return dict_appearances
def generate_frequency_data(input_file):
"""Generate the frequency data to be used for building the Huffman tree.
"""
dict_appearances = generate_frequency_dict(input_file)
list_frequencies = generate_frequency_list(dict_appearances)
return list_frequencies
def test_good_encoding_decoding():
# various pathes ----
# note: the raw text data for Hamlet comes from: https://www.gutenberg.org/cache/epub/1524/pg1524.txt
path_to_hamlet = path_to_here + 'hamlet_from_gutember_project.txt'
# where to save the list_frequencies to be able to re-build the same huffman tree
path_to_list_frequencies = path_to_here + 'data_huffman_tree.pkl'
# where to save the encoded output
binary_hamlet_file = path_to_here + 'hamlet_huffman.bin'
# generate list_frequencies ----
with open(path_to_hamlet) as crrt_file:
data_hamlet = crrt_file.read()
list_frequencies = generate_frequency_data(data_hamlet)
# build the tree and encode ----
huffman_tree = pyhuffman.HuffmanTree(frequency_data=list_frequencies, path_to_tree=path_to_list_frequencies)
huffman_tree.encode_as_bitarray(data_hamlet, path_to_save=binary_hamlet_file)
# build a new tree to decode (just to show how to restaure from saved data) ----
huffman_tree_restaured = pyhuffman.HuffmanTree(path_to_tree=path_to_list_frequencies)
decoded = ''.join(huffman_tree_restaured.decode_from_bitarray(path_to_decode=binary_hamlet_file))
assert len(decoded) == 173940
assert decoded[0: 10] == 'HAMLET, PR'
assert decoded[-10:] == 'hot off.]\n'
os.remove(path_to_list_frequencies)
os.remove(binary_hamlet_file)
def test_automatic_exhaustive_1():
# various pathes ----
# note: the raw text data for Hamlet comes from: https://www.gutenberg.org/cache/epub/1524/pg1524.txt
path_to_hamlet = path_to_here + 'hamlet_from_gutember_project.txt'
# where to save the list_frequencies to be able to re-build the same huffman tree
path_to_list_frequencies = path_to_here + 'data_huffman_tree.pkl'
# where to save the encoded output
binary_hamlet_file = path_to_here + 'hamlet_huffman.bin'
# generate list_frequencies ----
with open(path_to_hamlet) as crrt_file:
data_hamlet = crrt_file.read()
list_frequencies = generate_frequency_data(data_hamlet)
for stop in range(200, 230, 1):
reduced_data = data_hamlet[100: stop]
# build the tree and encode ----
huffman_tree = pyhuffman.HuffmanTree(frequency_data=list_frequencies, path_to_tree=path_to_list_frequencies)
huffman_tree.encode_as_bitarray(reduced_data, path_to_save=binary_hamlet_file)
# build a new tree to decode (just to show how to restaure from saved data) ----
huffman_tree_restaured = pyhuffman.HuffmanTree(path_to_tree=path_to_list_frequencies)
decoded = ''.join(huffman_tree_restaured.decode_from_bitarray(path_to_decode=binary_hamlet_file))
assert(decoded == reduced_data)
os.remove(path_to_list_frequencies)
os.remove(binary_hamlet_file)
| mit | 2,481,112,119,950,024,000 | 35.258333 | 116 | 0.692484 | false |
dominicrodger/tinyblog | tinyblog/admin.py | 1 | 1075 | from django.contrib import admin
from tinyblog.models import Post, EmailSubscriber
from tinymce.widgets import TinyMCE
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'created', 'emailed', )
list_filter = ('emailed', )
date_hierarchy = 'created'
search_fields = ('title', 'text', )
prepopulated_fields = {'slug': ('title', )}
readonly_fields = ('emailed', )
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in ('text_html', 'teaser_html', ):
return db_field.formfield(widget=TinyMCE(
attrs={'cols': 80, 'rows': 30},
))
return super(PostAdmin, self).formfield_for_dbfield(db_field, **kwargs)
admin.site.register(Post, PostAdmin)
class EmailSubscriberAdmin(admin.ModelAdmin):
list_filter = ('confirmed', 'unsubscribed', )
list_display = ('email', 'confirmed', 'subscribed', 'unsubscribed', )
readonly_fields = ('uuid_first', 'uuid_second', )
search_fields = ('email', )
admin.site.register(EmailSubscriber, EmailSubscriberAdmin)
| bsd-3-clause | -6,948,106,030,963,254,000 | 34.833333 | 79 | 0.652093 | false |
suutari-ai/shoop | shuup/admin/dashboard/charts.py | 3 | 4673 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import abc
import json
import six
from babel.numbers import format_decimal, format_percent
from shuup.utils.i18n import format_money, get_current_babel_locale
from shuup.utils.money import Money
from shuup.utils.serialization import ExtendedJSONEncoder
class ChartType(object):
""" Type of a chart """
BAR = "bar"
LINE = "line"
class ChartDataType(object):
""" Data type of datasets """
NUMBER = "number"
CURRENCY = "currency"
PERCENT = "percent"
class Chart(six.with_metaclass(abc.ABCMeta)):
supported_chart_types = [] # list[ChartType]
def __init__(self, title, data_type=ChartDataType.NUMBER, locale=None, currency=None, options=None):
"""
:param str title: the title of the chart
:param ChartDataType data_type: the data type of values
The chart will format the output labels according to this parameter
:param str locale: the locale to render values
If not set, the locale will be fetched from Babel
:param str currency: the ISO-4217 code for the currency
This is necessary when the data_type is CURRENCY
:param dict options: a dicionaty with options for Chartjs
"""
self.title = title
self.datasets = []
self.options = options
self.data_type = data_type
self.currency = currency
if locale:
self.locale = locale
else:
self.locale = get_current_babel_locale()
if data_type == ChartDataType.CURRENCY and not currency:
raise AttributeError("You should also set currency for this data type")
@abc.abstractmethod
def get_config(self):
"""
Get a JSONable dictionary of configuration data for this chart.
This is passed on as `CHART_CONFIGS` in the JS environment and eventually
processed by `dashboard-charts.js`.
:return: Dict of configuration
:rtype: dict
"""
return {} # Implement me in a subclass, please.
def get_config_json(self):
return json.dumps(self.get_config(), cls=ExtendedJSONEncoder, separators=',:')
def add_data(self, name, data, chart_type):
"""
Add data to this chart
:param name: the name of the dataset
:type name: str
:param data: the list of data
:type data: list[int|float|Decimal]
:param chart_type: the chart type - tells how data should be rendered.
This data type must be available in the `supported_chart_type` attribute of this instance
:type chart_type: ChartType
"""
assert chart_type in self.supported_chart_types
formatted_data = []
# format value for each data point
if self.data_type == ChartDataType.CURRENCY:
for value in data:
formatted_data.append(format_money(Money(value, currency=self.currency).as_rounded()))
elif self.data_type == ChartDataType.PERCENT:
for value in data:
formatted_data.append(format_percent(value, locale=self.locale))
# self.data_type == ChartDataType.NUMBER
else:
for value in data:
formatted_data.append(format_decimal(value, locale=self.locale))
self.datasets.append({"type": chart_type, "label": name, "data": data, "formatted_data": formatted_data})
class BarChart(Chart):
supported_chart_types = [ChartType.BAR]
def __init__(self, title, labels, data_type=ChartDataType.NUMBER, **kwargs):
super(BarChart, self).__init__(title, data_type=data_type, **kwargs)
self.labels = labels
def get_config(self):
return {
"type": ChartType.BAR,
"data": {
"labels": self.labels,
"datasets": self.datasets
},
"options": self.options
}
class MixedChart(Chart):
"""
This chart supports both Bars and Lines
"""
supported_chart_types = [ChartType.BAR, ChartType.LINE]
def __init__(self, title, labels, data_type=ChartDataType.NUMBER, **kwargs):
super(MixedChart, self).__init__(title, data_type=data_type, **kwargs)
self.labels = labels
def get_config(self):
return {
"type": "mixed",
"labels": self.labels,
"data": self.datasets,
"options": self.options
}
| agpl-3.0 | -1,327,710,243,566,116,600 | 31.451389 | 113 | 0.621014 | false |
gecos-team/gecosws-config-assistant | gecosws_config_assistant/dao/NetworkInterfaceDAO.py | 1 | 4583 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Abraham Macias Paredes <[email protected]>"
__copyright__ = "Copyright (C) 2015, Junta de Andalucía" + \
"<[email protected]>"
__license__ = "GPL-2"
import logging
import fcntl
import array
import struct
import socket
import platform
import subprocess
import traceback
from gecosws_config_assistant.dto.NetworkInterface import NetworkInterface
SIOCGIFCONF = 0x8912
MAXBYTES = 8096
class NetworkInterfaceDAO(object):
'''
DAO class to manipulate NetworkInterface DTO objects.
'''
# Singleton pattern
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(NetworkInterfaceDAO, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
'''
Constructor
'''
self.logger = logging.getLogger('NetworkInterfaceDAO')
def loadAll(self):
''' Loading all '''
self.logger.debug('loadAll - BEGIN')
interfaces = []
arch = platform.architecture()[0]
# I really don't know what to call these right now
var1 = -1
var2 = -1
if arch == '32bit':
var1 = 32
var2 = 32
elif arch == '64bit':
var1 = 16
var2 = 40
else:
raise OSError("Unknown architecture: %s" % arch)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array.array('B', '\0' * MAXBYTES)
outbytes = struct.unpack('iL', fcntl.ioctl(
sock.fileno(),
SIOCGIFCONF,
struct.pack('iL', MAXBYTES, names.buffer_info()[0])
))[0]
namestr = names.tostring()
ifaces = [
(namestr[i:i + var1].split('\0', 1)[0],
socket.inet_ntoa(namestr[i + 20:i + 24])
) for i
in xrange(0, outbytes, var2)
]
for iface in ifaces:
interface = NetworkInterface()
interface.set_name(iface[0].strip())
interface.set_ip_address(iface[1].strip())
interfaces.append(interface)
return interfaces
def get_hostname(self):
''' Getting hostname '''
name = None
try:
p = subprocess.Popen(
'hostname',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
for line in p.stdout.readlines():
name = line
p.wait()
if name is not None:
name = name.strip()
except Exception:
self.logger.warn('Error trying to get the hostname')
self.logger.warn(str(traceback.format_exc()))
return name
def set_hostname(self, name):
''' Setting up hostname '''
if name is None:
return False
original = self.get_hostname()
try:
p = subprocess.Popen(
'hostname {}'.format(name),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
for line in p.stdout.readlines():
name = line
p.wait()
except Exception:
self.logger.warn('Error trying to set the hostname')
self.logger.warn(str(traceback.format_exc()))
return False
# Change the name is /etc/hosts file
hosts = None
with open('/etc/hosts','r') as f:
hosts = f.read()
if hosts is not None:
hosts = hosts.replace(original, name)
with open('/etc/hosts','w') as f:
f.write(hosts)
return True
| gpl-2.0 | -5,583,918,951,602,620,000 | 26.769697 | 74 | 0.562418 | false |
pearu/f2py | fparser/Fortran2003.py | 1 | 252094 | #!/usr/bin/env python
"""Fortran 2003 Syntax Rules.
"""
from __future__ import absolute_import
from __future__ import print_function
#Author: Pearu Peterson <[email protected]>
#Created: Oct 2006
import re
import logging
from .splitline import string_replace_map
from . import pattern_tools as pattern
from .readfortran import FortranReaderBase
from six.moves import map
from six.moves import range
logger = logging.getLogger("fparser")
###############################################################################
############################## BASE CLASSES ###################################
###############################################################################
class NoMatchError(Exception):
pass
class ParseError(Exception):
pass
def show_result(func):
return func
def new_func(cls, string, **kws):
r = func(cls, string, **kws)
if r is not None and isinstance(r, StmtBase):
print('%s(%r) -> %r' % (cls.__name__, string, str(r)))
return r
return new_func
class Base(object):
""" Base class for Fortran 2003 syntax rules.
All Base classes have the following attributes:
.string - original argument to construct a class instance, it's type
is either str or FortranReaderBase.
.item - Line instance (holds label) or None.
"""
subclasses = {}
@show_result
def __new__(cls, string, parent_cls = None):
"""
"""
if parent_cls is None:
parent_cls = [cls]
elif cls not in parent_cls:
parent_cls.append(cls)
#print '__new__:',cls.__name__,`string`
match = cls.__dict__.get('match', None)
result = None
if isinstance(string, FortranReaderBase) and match is not None and not issubclass(cls, BlockBase):
reader = string
item = reader.get_item()
if item is None: return
try:
obj = item.parse_line(cls, parent_cls)
#obj = cls(item.line, parent_cls = parent_cls)
except NoMatchError:
obj = None
if obj is None:
reader.put_item(item)
return
obj.item = item
return obj
if match is not None:
# IMPORTANT: if string is FortranReaderBase then cls must
# restore readers content when no match is found.
try:
result = cls.match(string)
except NoMatchError as msg:
if str(msg)=='%s: %r' % (cls.__name__, string): # avoid recursion 1.
raise
#print '__new__:result:',cls.__name__,`string,result`
if isinstance(result, tuple):
obj = object.__new__(cls)
obj.string = string
obj.item = None
if hasattr(cls, 'init'): obj.init(*result)
return obj
elif isinstance(result, Base):
return result
elif result is None:
for subcls in Base.subclasses.get(cls.__name__,[]):
if subcls in parent_cls: # avoid recursion 2.
continue
#print '%s:%s: %r' % (cls.__name__,subcls.__name__,string)
try:
obj = subcls(string, parent_cls = parent_cls)
except NoMatchError as msg:
obj = None
if obj is not None:
return obj
else:
raise AssertionError(repr(result))
errmsg = '%s: %r' % (cls.__name__, string)
#if isinstance(string, FortranReaderBase) and string.fifo_item:
# errmsg += ' while reaching %s' % (string.fifo_item[-1])
raise NoMatchError(errmsg)
## def restore_reader(self):
## self._item.reader.put_item(self._item)
## return
def init(self, *items):
self.items = items
return
def torepr(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr,self.items)))
def compare(self, other):
return cmp(self.items,other.items)
def __str__(self): return self.tostr()
def __repr__(self): return self.torepr()
def __cmp__(self, other):
if self is other: return 0
if not isinstance(other, self.__class__): return cmp(self.__class__, other.__class__)
return self.compare(other)
def tofortran(self, tab='', isfix=None):
return tab + str(self)
def restore_reader(self, reader):
reader.put_item(self.item)
class BlockBase(Base):
"""
::
<block-base> = [ <startcls> ]
[ <subcls> ]...
...
[ <subcls> ]...
[ <endcls> ]
Attributes
----------
content : tuple
"""
def match(startcls, subclasses, endcls, reader,
match_labels = False,
match_names = False, set_unspecified_end_name = False,
match_name_classes = (),
enable_do_label_construct_hook = False,
enable_if_construct_hook = False,
enable_where_construct_hook = False,
enable_select_type_construct_hook = False,
enable_case_construct_hook = False
):
assert isinstance(reader,FortranReaderBase),repr(reader)
content = []
if startcls is not None:
try:
obj = startcls(reader)
except NoMatchError:
obj = None
if obj is None:
return
content.append(obj)
if enable_do_label_construct_hook:
start_label = obj.get_start_label()
if match_names:
start_name = obj.get_start_name()
if endcls is not None:
classes = subclasses + [endcls]
else:
classes = subclasses[:]
if endcls is not None:
endcls_all = tuple([endcls]+endcls.subclasses[endcls.__name__])
i = 0
while classes:
if enable_do_label_construct_hook:
try:
obj = startcls(reader)
except NoMatchError:
obj = None
if obj is not None:
if start_label == obj.get_start_label():
content.append(obj)
continue
else:
obj.restore_reader(reader)
cls = classes[i]
try:
obj = cls(reader)
except NoMatchError:
obj = None
if obj is None:
j = i
for cls in classes[i+1:]:
j += 1
try:
obj = cls(reader)
except NoMatchError:
obj = None
if obj is not None:
break
if obj is not None:
i = j
if obj is not None:
content.append(obj)
if match_names and isinstance(obj,match_name_classes):
end_name = obj.get_end_name()
if end_name != start_name:
reader.warning('expected construct name "%s" but got "%s"' % (start_name, end_name))
if endcls is not None and isinstance(obj, endcls_all):
if match_labels:
start_label, end_label = content[0].get_start_label(), content[-1].get_end_label()
if start_label != end_label:
continue
if match_names:
start_name, end_name = content[0].get_start_name(), content[-1].get_end_name()
if set_unspecified_end_name and end_name is None and start_name is not None:
content[-1].set_name(start_name)
elif start_name != end_name:
reader.warning('expected construct name "%s" but got "%s"' % (start_name, end_name))
continue
break
if enable_if_construct_hook:
if isinstance(obj, Else_If_Stmt):
i = 0
if isinstance(obj, (Else_Stmt, End_If_Stmt)):
enable_if_construct_hook = False
if enable_where_construct_hook:
if isinstance(obj, Masked_Elsewhere_Stmt):
i = 0
if isinstance(obj, (Elsewhere_Stmt, End_Where_Stmt)):
enable_where_construct_hook = False
if enable_select_type_construct_hook:
if isinstance(obj, Type_Guard_Stmt):
i = 1
if isinstance(obj, End_Select_Type_Stmt):
enable_select_type_construct_hook = False
if enable_case_construct_hook:
if isinstance(obj, Case_Stmt):
i = 1
if isinstance(obj, End_Select_Stmt):
enable_case_construct_hook = False
continue
if endcls is not None:
if 1:
for obj in reversed(content):
obj.restore_reader(reader)
return
item = reader.get_item()
if item is not None:
if 0:
pass
elif content:
reader.info('closing <%s> not found while reaching %s' % (endcls.__name__.lower(), item), item=content[0].item)
else:
reader.info('closing <%s> not found while reaching %s' % (endcls.__name__.lower(), item))
# no match found, restoring consumed reader items
reader.put_item(item)
for obj in reversed(content):
obj.restore_reader(reader)
return
if content:# and hasattr(content[0],'name'):
reader.info('closing <%s> not found while reaching eof' % (endcls.__name__.lower()), item=content[0].item)
for obj in reversed(content):
obj.restore_reader(reader)
return
else:
reader.error('unexpected eof file while looking line for <%s>.'\
% (classes[-1].__name__.lower().replace('_','-')))
break
if not content: return
if startcls is not None and endcls is not None:
# check names of start and end statements:
start_stmt = content[0]
end_stmt = content[-1]
if isinstance(end_stmt, endcls_all) and hasattr(end_stmt, 'get_name') and hasattr(start_stmt, 'get_name'):
if end_stmt.get_name() is not None:
if start_stmt.get_name() != end_stmt.get_name():
end_stmt.item.reader.error('expected <%s-name> is %s but got %s. Ignoring.'\
% (end_stmt.get_type().lower(), start_stmt.get_name(), end_stmt.get_name()))
else:
end_stmt.set_name(start_stmt.get_name())
return content,
match = staticmethod(match)
def init(self, content):
self.content = content
return
def compare(self, other):
return cmp(self.content,other.content)
def tostr(self):
return self.tofortran()
def torepr(self):
return '%s(%s)' % (self.__class__.__name__,', '.join(map(repr, self.content)))
def tofortran(self, tab='', isfix=None):
l = []
start = self.content[0]
end = self.content[-1]
extra_tab = ''
if isinstance(end, EndStmtBase):
extra_tab = ' '
if start is not None:
l.append(start.tofortran(tab=tab,isfix=isfix))
for item in self.content[1:-1]:
l.append(item.tofortran(tab=tab+extra_tab,isfix=isfix))
if len(self.content)>1:
l.append(end.tofortran(tab=tab,isfix=isfix))
return '\n'.join(l)
## def restore_reader(self):
## content = self.content[:]
## content.reverse()
## for obj in content:
## obj.restore_reader()
## return
def restore_reader(self, reader):
for obj in reversed(self.content):
obj.restore_reader(reader)
class SequenceBase(Base):
"""
::
<sequence-base> = <obj>, <obj> [ , <obj> ]...
"""
def match(separator, subcls, string):
line, repmap = string_replace_map(string)
if isinstance(separator, str):
splitted = line.split(separator)
else:
splitted = separator[1].split(line)
separator = separator[0]
if len(splitted)<=1: return
lst = []
for p in splitted:
lst.append(subcls(repmap(p.strip())))
return separator, tuple(lst)
match = staticmethod(match)
def init(self, separator, items):
self.separator = separator
self.items = items
return
def tostr(self):
s = self.separator
if s==',': s = s + ' '
elif s==' ': pass
else: s = ' ' + s + ' '
return s.join(map(str, self.items))
def torepr(self): return '%s(%r, %r)' % (self.__class__.__name__, self.separator, self.items)
def compare(self, other):
return cmp((self.separator,self.items),(other.separator,self.items))
class UnaryOpBase(Base):
"""
::
<unary-op-base> = <unary-op> <rhs>
"""
def tostr(self):
return '%s %s' % tuple(self.items)
def match(op_pattern, rhs_cls, string, exclude_op_pattern = None):
m = op_pattern.match(string)
if not m: return
rhs = string[m.end():].lstrip()
if not rhs: return
op = string[:m.end()].rstrip().upper()
if exclude_op_pattern is not None:
if exclude_op_pattern.match(op):
return
return op, rhs_cls(rhs)
match = staticmethod(match)
class BinaryOpBase(Base):
"""
::
<binary-op-base> = <lhs> <op> <rhs>
<op> is searched from right by default.
"""
def match(lhs_cls, op_pattern, rhs_cls, string, right=True, exclude_op_pattern = None,
is_add = False):
line, repmap = string_replace_map(string)
if isinstance(op_pattern, str):
if right:
t = line.rsplit(op_pattern,1)
else:
t = line.split(op_pattern,1)
if len(t)!=2: return
lhs, rhs = t[0].rstrip(), t[1].lstrip()
op = op_pattern
else:
if right:
t = op_pattern.rsplit(line, is_add = is_add)
else:
t = op_pattern.lsplit(line)
if t is None or len(t)!=3: return
lhs, op, rhs = t
lhs = lhs.rstrip()
rhs = rhs.lstrip()
op = op.upper()
if not lhs: return
if not rhs: return
if exclude_op_pattern is not None:
if exclude_op_pattern.match(op):
return
lhs_obj = lhs_cls(repmap(lhs))
rhs_obj = rhs_cls(repmap(rhs))
return lhs_obj, op.replace(' ',''), rhs_obj
match = staticmethod(match)
def tostr(self):
return '%s %s %s' % tuple(self.items)
class SeparatorBase(Base):
"""
::
<separator-base> = [ <lhs> ] : [ <rhs> ]
"""
def match(lhs_cls, rhs_cls, string, require_lhs=False, require_rhs=False):
line, repmap = string_replace_map(string)
if ':' not in line: return
lhs,rhs = line.split(':',1)
lhs = lhs.rstrip()
rhs = rhs.lstrip()
lhs_obj, rhs_obj = None, None
if lhs:
if lhs_cls is None: return
lhs_obj = lhs_cls(repmap(lhs))
elif require_lhs:
return
if rhs:
if rhs_cls is None: return
rhs_obj = rhs_cls(repmap(rhs))
elif require_rhs:
return
return lhs_obj, rhs_obj
match = staticmethod(match)
def tostr(self):
s = ''
if self.items[0] is not None:
s += '%s :' % (self.items[0])
else:
s += ':'
if self.items[1] is not None:
s += ' %s' % (self.items[1])
return s
class KeywordValueBase(Base):
"""
::
<keyword-value-base> = [ <lhs> = ] <rhs>
"""
def match(lhs_cls, rhs_cls, string, require_lhs = True, upper_lhs = False):
if require_lhs and '=' not in string: return
if isinstance(lhs_cls, (list, tuple)):
for s in lhs_cls:
try:
obj = KeywordValueBase.match(s, rhs_cls, string, require_lhs=require_lhs, upper_lhs=upper_lhs)
except NoMatchError:
obj = None
if obj is not None: return obj
return obj
lhs,rhs = string.split('=',1)
lhs = lhs.rstrip()
rhs = rhs.lstrip()
if not rhs: return
if not lhs:
if require_lhs: return
return None, rhs_cls(rhs)
if isinstance(lhs_cls, str):
if upper_lhs:
lhs = lhs.upper()
if lhs_cls!=lhs: return
return lhs, rhs_cls(rhs)
return lhs_cls(lhs),rhs_cls(rhs)
match = staticmethod(match)
def tostr(self):
if self.items[0] is None: return str(self.items[1])
return '%s = %s' % tuple(self.items)
class BracketBase(Base):
"""
::
<bracket-base> = <left-bracket-base> <something> <right-bracket>
"""
def match(brackets, cls, string, require_cls=True):
i = len(brackets)//2
print(i)
left = brackets[:i]
right = brackets[-i:]
if string.startswith(left) and string.endswith(right):
line = string[i:-i].strip()
if not line:
if require_cls:
return
return left,None,right
return left,cls(line),right
return
match = staticmethod(match)
def tostr(self):
if self.items[1] is None:
return '%s%s' % (self.items[0], self.items[2])
return '%s%s%s' % tuple(self.items)
class NumberBase(Base):
"""
::
<number-base> = <number> [ _ <kind-param> ]
"""
def match(number_pattern, string):
m = number_pattern.match(string.replace(' ',''))
if m is None: return
d = m.groupdict()
return d['value'].upper(),d.get('kind_param')
match = staticmethod(match)
def tostr(self):
if self.items[1] is None: return str(self.items[0])
return '%s_%s' % tuple(self.items)
def compare(self, other):
return cmp(self.items[0], other.items[0])
class CallBase(Base):
"""
::
<call-base> = <lhs> ( [ <rhs> ] )
"""
def match(lhs_cls, rhs_cls, string, upper_lhs = False, require_rhs=False):
if not string.endswith(')'): return
line, repmap = string_replace_map(string)
i = line.rfind('(')
if i==-1: return
lhs = line[:i].rstrip()
if not lhs: return
j = line.rfind(')')
rhs = line[i+1:j].strip()
if line[j+1:].lstrip():
return
lhs = repmap(lhs)
if upper_lhs:
lhs = lhs.upper()
rhs = repmap(rhs)
if isinstance(lhs_cls, str):
if lhs_cls!=lhs: return
else:
lhs = lhs_cls(lhs)
if rhs:
if isinstance(rhs_cls, str):
if rhs_cls!=rhs: return
else:
rhs = rhs_cls(rhs)
return lhs, rhs
elif require_rhs:
return
return lhs, None
match = staticmethod(match)
def tostr(self):
if self.items[1] is None: return '%s()' % (self.items[0])
return '%s(%s)' % (self.items[0], self.items[1])
class CALLBase(CallBase):
"""
::
<CALL-base> = <LHS> ( [ <rhs> ] )
"""
def match(lhs_cls, rhs_cls, string, require_rhs = False):
return CallBase.match(lhs_cls, rhs_cls, string, upper_lhs=True, require_rhs = require_rhs)
match = staticmethod(match)
class StringBase(Base):
"""
::
<string-base> = <xyz>
Attributes
----------
string
"""
def match(pattern, string):
if isinstance(pattern, (list,tuple)):
for p in pattern:
obj = StringBase.match(p, string)
if obj is not None: return obj
return
if isinstance(pattern, str):
if len(pattern)==len(string) and pattern==string: return string,
return
if pattern.match(string): return string,
return
match = staticmethod(match)
def init(self, string):
self.string = string
return
def tostr(self): return str(self.string)
def torepr(self): return '%s(%r)' % (self.__class__.__name__, self.string)
def compare(self, other):
return cmp(self.string,other.string)
class STRINGBase(StringBase):
"""
::
<STRING-base> = <XYZ>
"""
match = staticmethod(StringBase.match)
def match(pattern, string):
if isinstance(pattern, (list,tuple)):
for p in pattern:
obj = STRINGBase.match(p, string)
if obj is not None: return obj
return
STRING = string.upper()
if isinstance(pattern, str):
if len(pattern)==len(string) and pattern==STRING: return STRING,
return
if pattern.match(STRING): return STRING,
return
match = staticmethod(match)
class StmtBase(Base):
"""
::
[ [ <label> ] [ <construct-name> : ] ] <stmt>
Attributes
----------
item : readfortran.Line
"""
def tofortran(self, tab='', isfix=None):
label = None
name = None
if self.item is not None:
label = self.item.label
name = self.item.name
if isfix:
c = ' '
else:
c = ''
if label:
t = c + str(label)
if isfix:
while len(t)<6: t += ' '
else:
tab = tab[len(t):] or ' '
else:
t = ''
if name:
return t + tab + name+':' + str(self)
return t + tab + str(self)
def get_end_label(self):
return self.item.label
class EndStmtBase(StmtBase):
"""
::
<end-stmt-base> = END [ <stmt> [ <stmt-name>] ]
"""
@staticmethod
def match(stmt_type, stmt_name, string, require_stmt_type=False):
start = string[:3].upper()
if start != 'END': return
line = string[3:].lstrip()
start = line[:len(stmt_type)].upper()
if start:
if start.replace(' ','') != stmt_type.replace(' ',''): return
line = line[len(stmt_type):].lstrip()
else:
if require_stmt_type: return
line = ''
if line:
if stmt_name is None: return
return stmt_type, stmt_name(line)
return stmt_type, None
def init(self, stmt_type, stmt_name):
self.items = [stmt_type, stmt_name]
self.type, self.name = stmt_type, stmt_name
return
def get_name(self): return self.items[1]
def get_type(self): return self.items[0]
def set_name(self, name):
if self.items[1] is not None:
self.warning('item already has name %r, changing it to %r' % (self.items[1], name))
if isinstance(name, Name):
self.items[1] = name
else:
self.items[1] = Name(name)
def tostr(self):
if self.items[1] is not None:
return 'END %s %s' % tuple(self.items)
return 'END %s' % (self.items[0])
def torepr(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.type, self.name)
def get_end_name(self):
name = self.items[1]
if name is not None:
return name.string
def isalnum(c): return c.isalnum() or c=='_'
class WORDClsBase(Base):
"""
::
<WORD-cls> = <WORD> [ [ :: ] <cls> ]
"""
@staticmethod
def match(pattern, cls, string, check_colons=False, require_cls=False):
if isinstance(pattern, (tuple,list)):
for p in pattern:
try:
obj = WORDClsBase.match(p, cls, string, check_colons=check_colons, require_cls=require_cls)
except NoMatchError:
obj = None
if obj is not None: return obj
return
if isinstance(pattern, str):
if string[:len(pattern)].upper()!=pattern: return
line = string[len(pattern):]
if not line: return pattern, None
if isalnum(line[0]): return
line = line.lstrip()
if check_colons and line.startswith('::'):
line = line[2:].lstrip()
if not line:
if require_cls: return
return pattern, None
if cls is None: return
return pattern, cls(line)
m = pattern.match(string)
if m is None: return
line = string[len(m.group()):]
if pattern.value is not None:
pattern_value = pattern.value
else:
pattern_value = m.group().upper()
if not line: return pattern_value, None
if isalnum(line[0]): return
line = line.lstrip()
if check_colons and line.startswith('::'):
line = line[2:].lstrip()
if not line:
if require_cls: return
return pattern_value, None
if cls is None: return
return pattern_value, cls(line)
def tostr(self):
if self.items[1] is None: return str(self.items[0])
s = str(self.items[1])
if s and s[0] in '(*':
return '%s%s' % (self.items[0], s)
return '%s %s' % (self.items[0], s)
def tostr_a(self): # colons version of tostr
if self.items[1] is None: return str(self.items[0])
return '%s :: %s' % (self.items[0], self.items[1])
class Type_Declaration_StmtBase(StmtBase):
"""
<type-declaration-stmt> = <declaration-type-spec> [ [ , <attr-spec> ]... :: ] <entity-decl-list>
"""
subclass_names = []
use_names = None # derived class must define this list
@staticmethod
def match(decl_type_spec_cls, attr_spec_list_cls, entity_decl_list_cls, string):
line, repmap = string_replace_map(string)
i = line.find('::')
if i!=-1:
j = line[:i].find(',')
if j!=-1:
i = j
else:
if line[:6].upper()=='DOUBLE':
m = re.search(r'\s[a-z_]',line[6:].lstrip(),re.I)
if m is None: return
i = m.start() + len(line)-len(line[6:].lstrip())
else:
m = re.search(r'\s[a-z_]',line,re.I)
if m is None: return
i = m.start()
type_spec = decl_type_spec_cls(repmap(line[:i].rstrip()))
if type_spec is None: return
line = line[i:].lstrip()
if line.startswith(','):
i = line.find('::')
if i==-1: return
attr_specs = attr_spec_list_cls(repmap(line[1:i].strip()))
if attr_specs is None: return
line = line[i:]
else:
attr_specs = None
if line.startswith('::'):
line = line[2:].lstrip()
entity_decls = entity_decl_list_cls(repmap(line))
if entity_decls is None: return
return type_spec, attr_specs, entity_decls
def tostr(self):
if self.items[1] is None:
return '%s :: %s' % (self.items[0], self.items[2])
else:
return '%s, %s :: %s' % self.items
###############################################################################
############################### SECTION 1 ####################################
###############################################################################
#R101: <xyz-list> = <xyz> [ , <xyz> ]...
#R102: <xyz-name> = <name>
#R103: <scalar-xyz> = <xyz>
###############################################################################
############################### SECTION 2 ####################################
###############################################################################
class Program(BlockBase): # R201
"""
:F03R:`201`::
<program> = <program-unit>
[ <program-unit> ] ...
"""
subclass_names = []
use_names = ['Program_Unit']
@staticmethod
def match(reader):
#return Program_Unit(reader)
try:
result = BlockBase.match(Program_Unit, [Program_Unit], None, reader)
except NoMatchError:
result = None
if result is not None:
return result
return BlockBase.match(Main_Program0, [], None, reader)
class Program_Unit(Base): # R202
"""
:F03R:`202`::
<program-unit> = <main-program>
| <external-subprogram>
| <module>
| <block-data>
"""
subclass_names = ['Main_Program', 'External_Subprogram', 'Module', 'Block_Data']
class External_Subprogram(Base): # R203
"""
:F03R:`203`::
<external-subprogram> = <function-subprogram>
| <subroutine-subprogram>
"""
subclass_names = ['Function_Subprogram', 'Subroutine_Subprogram']
class Specification_Part(BlockBase): # R204
"""
:F03R:`204`::
<specification-part> = [ <use-stmt> ]...
[ <import-stmt> ]...
[ <implicit-part> ]
[ <declaration-construct> ]...
"""
subclass_names = []
use_names = ['Use_Stmt', 'Import_Stmt', 'Implicit_Part', 'Declaration_Construct']
@staticmethod
def match(reader):
return BlockBase.match(None, [Use_Stmt, Import_Stmt, Implicit_Part, Declaration_Construct], None, reader)
class Implicit_Part(BlockBase): # R205
"""
:F03R:`205`::
<implicit-part> = [ <implicit-part-stmt> ]...
<implicit-stmt>
"""
subclass_names = []
use_names = ['Implicit_Part_Stmt', 'Implicit_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(None, [Implicit_Part_Stmt], None, reader)
class Implicit_Part_Stmt(Base): # R206
"""
:F03R:`206`::
<implicit-part-stmt> = <implicit-stmt>
| <parameter-stmt>
| <format-stmt>
| <entry-stmt>
"""
subclass_names = ['Implicit_Stmt', 'Parameter_Stmt', 'Format_Stmt', 'Entry_Stmt']
class Declaration_Construct(Base): # R207
"""
:F03R:`207`::
<declaration-construct> = <derived-type-def>
| <entry-stmt>
| <enum-def>
| <format-stmt>
| <interface-block>
| <parameter-stmt>
| <procedure-declaration-stmt>
| <specification-stmt>
| <type-declaration-stmt>
| <stmt-function-stmt>
"""
subclass_names = ['Derived_Type_Def', 'Entry_Stmt', 'Enum_Def', 'Format_Stmt',
'Interface_Block', 'Parameter_Stmt', 'Procedure_Declaration_Stmt',
'Specification_Stmt', 'Type_Declaration_Stmt', 'Stmt_Function_Stmt']
class Execution_Part(BlockBase): # R208
"""
:F03R:`208`::
<execution-part> = <executable-construct>
| [ <execution-part-construct> ]...
<execution-part> shall not contain <end-function-stmt>, <end-program-stmt>, <end-subroutine-stmt>
"""
subclass_names = []
use_names = ['Executable_Construct_C201', 'Execution_Part_Construct_C201']
def match(string): return BlockBase.match(Executable_Construct_C201, [Execution_Part_Construct_C201], None, string)
match = staticmethod(match)
class Execution_Part_Construct(Base): # R209
"""
<execution-part-construct> = <executable-construct>
| <format-stmt>
| <entry-stmt>
| <data-stmt>
"""
subclass_names = ['Executable_Construct', 'Format_Stmt', 'Entry_Stmt', 'Data_Stmt']
class Execution_Part_Construct_C201(Base):
subclass_names = ['Executable_Construct_C201', 'Format_Stmt', 'Entry_Stmt', 'Data_Stmt']
class Internal_Subprogram_Part(BlockBase): # R210
"""
<internal-subprogram-part> = <contains-stmt>
<internal-subprogram>
[ <internal-subprogram> ]...
"""
subclass_names = []
use_names = ['Contains_Stmt', 'Internal_Subprogram']
@staticmethod
def match(reader):
return BlockBase.match(Contains_Stmt, [Internal_Subprogram], None, reader)
class Internal_Subprogram(Base): # R211
"""
<internal-subprogram> = <function-subprogram>
| <subroutine-subprogram>
"""
subclass_names = ['Function_Subprogram', 'Subroutine_Subprogram']
class Specification_Stmt(Base):# R212
"""
<specification-stmt> = <access-stmt>
| <allocatable-stmt>
| <asynchronous-stmt>
| <bind-stmt>
| <common-stmt>
| <data-stmt>
| <dimension-stmt>
| <equivalence-stmt>
| <external-stmt>
| <intent-stmt>
| <intrinsic-stmt>
| <namelist-stmt>
| <optional-stmt>
| <pointer-stmt>
| <protected-stmt>
| <save-stmt>
| <target-stmt>
| <volatile-stmt>
| <value-stmt>
"""
subclass_names = ['Access_Stmt', 'Allocatable_Stmt', 'Asynchronous_Stmt','Bind_Stmt',
'Common_Stmt', 'Data_Stmt', 'Dimension_Stmt', 'Equivalence_Stmt',
'External_Stmt', 'Intent_Stmt', 'Intrinsic_Stmt', 'Namelist_Stmt',
'Optional_Stmt','Pointer_Stmt','Protected_Stmt','Save_Stmt',
'Target_Stmt','Volatile_Stmt', 'Value_Stmt']
class Executable_Construct(Base):# R213
"""
<executable-construct> = <action-stmt>
| <associate-stmt>
| <case-construct>
| <do-construct>
| <forall-construct>
| <if-construct>
| <select-type-construct>
| <where-construct>
"""
subclass_names = ['Action_Stmt', 'Associate_Stmt', 'Case_Construct', 'Do_Construct',
'Forall_Construct', 'If_Construct', 'Select_Type_Construct', 'Where_Construct']
class Executable_Construct_C201(Base):
subclass_names = Executable_Construct.subclass_names[:]
subclass_names[subclass_names.index('Action_Stmt')] = 'Action_Stmt_C201'
class Action_Stmt(Base):# R214
"""
<action-stmt> = <allocate-stmt>
| <assignment-stmt>
| <backspace-stmt>
| <call-stmt>
| <close-stmt>
| <continue-stmt>
| <cycle-stmt>
| <deallocate-stmt>
| <endfile-stmt>
| <end-function-stmt>
| <end-program-stmt>
| <end-subroutine-stmt>
| <exit-stmt>
| <flush-stmt>
| <forall-stmt>
| <goto-stmt>
| <if-stmt>
| <inquire-stmt>
| <nullify-stmt>
| <open-stmt>
| <pointer-assignment-stmt>
| <print-stmt>
| <read-stmt>
| <return-stmt>
| <rewind-stmt>
| <stop-stmt>
| <wait-stmt>
| <where-stmt>
| <write-stmt>
| <arithmetic-if-stmt>
| <computed-goto-stmt>
"""
subclass_names = ['Allocate_Stmt', 'Assignment_Stmt', 'Backspace_Stmt', 'Call_Stmt',
'Close_Stmt', 'Continue_Stmt', 'Cycle_Stmt', 'Deallocate_Stmt',
'Endfile_Stmt', 'End_Function_Stmt', 'End_Subroutine_Stmt', 'Exit_Stmt',
'Flush_Stmt', 'Forall_Stmt', 'Goto_Stmt', 'If_Stmt', 'Inquire_Stmt',
'Nullify_Stmt', 'Open_Stmt', 'Pointer_Assignment_Stmt', 'Print_Stmt',
'Read_Stmt', 'Return_Stmt', 'Rewind_Stmt', 'Stop_Stmt', 'Wait_Stmt',
'Where_Stmt', 'Write_Stmt', 'Arithmetic_If_Stmt', 'Computed_Goto_Stmt']
class Action_Stmt_C201(Base):
"""
<action-stmt-c201> = <action-stmt>
C201 is applied.
"""
subclass_names = Action_Stmt.subclass_names[:]
subclass_names.remove('End_Function_Stmt')
subclass_names.remove('End_Subroutine_Stmt')
#subclass_names.remove('End_Program_Stmt')
class Action_Stmt_C802(Base):
"""
<action-stmt-c802> = <action-stmt>
C802 is applied.
"""
subclass_names = Action_Stmt.subclass_names[:]
subclass_names.remove('End_Function_Stmt')
subclass_names.remove('End_Subroutine_Stmt')
subclass_names.remove('If_Stmt')
class Action_Stmt_C824(Base):
"""
<action-stmt-c824> = <action-stmt>
C824 is applied.
"""
subclass_names = Action_Stmt.subclass_names[:]
subclass_names.remove('End_Function_Stmt')
subclass_names.remove('End_Subroutine_Stmt')
subclass_names.remove('Continue_Stmt')
subclass_names.remove('Goto_Stmt')
subclass_names.remove('Return_Stmt')
subclass_names.remove('Stop_Stmt')
subclass_names.remove('Exit_Stmt')
subclass_names.remove('Cycle_Stmt')
subclass_names.remove('Arithmetic_If_Stmt')
class Keyword(Base): # R215
"""
<keyword> = <name>
"""
subclass_names = ['Name']
###############################################################################
############################### SECTION 3 ####################################
###############################################################################
#R301: <character> = <alphanumeric-character> | <special-character>
#R302: <alphanumeric-character> = <letter> | <digit> | <underscore>
#R303: <underscore> = _
class Name(StringBase): # R304
"""
<name> = <letter> [ <alphanumeric_character> ]...
"""
subclass_names = []
@staticmethod
def match(string): return StringBase.match(pattern.abs_name, string.replace(' ',''))
class Constant(Base): # R305
"""
<constant> = <literal-constant>
| <named-constant>
"""
subclass_names = ['Literal_Constant','Named_Constant']
class Literal_Constant(Base): # R306
"""
<literal-constant> = <int-literal-constant>
| <real-literal-constant>
| <complex-literal-constant>
| <logical-literal-constant>
| <char-literal-constant>
| <boz-literal-constant>
"""
subclass_names = ['Int_Literal_Constant', 'Real_Literal_Constant','Complex_Literal_Constant',
'Logical_Literal_Constant','Char_Literal_Constant','Boz_Literal_Constant']
class Named_Constant(Base): # R307
"""
<named-constant> = <name>
"""
subclass_names = ['Name']
class Int_Constant(Base): # R308
"""
<int-constant> = <constant>
"""
subclass_names = ['Constant']
class Char_Constant(Base): # R309
"""
<char-constant> = <constant>
"""
subclass_names = ['Constant']
#R310: <intrinsic-operator> = <power-op> | <mult-op> | <add-op> | <concat-op> | <rel-op> | <not-op> | <and-op> | <or-op> | <equiv-op>
#R311: <defined-operator> = <defined-unary-op> | <defined-binary-op> | <extended-intrinsic-op>
#R312: <extended-intrinsic-op> = <intrinsic-op>
class Label(StringBase): # R313
"""
::
<label> = <digit> [ <digit> [ <digit> [ <digit> [ <digit> ] ] ] ]
Attributes
----------
string : str
"""
subclass_names = []
@staticmethod
def match(string):
return StringBase.match(pattern.abs_label, string)
def __int__(self):
return int(self.string)
###############################################################################
############################### SECTION 4 ####################################
###############################################################################
class Type_Spec(Base): # R401
"""
<type-spec> = <intrinsic-type-spec>
| <derived-type-spec>
"""
subclass_names = ['Intrinsic_Type_Spec', 'Derived_Type_Spec']
class Type_Param_Value(StringBase): # R402
"""
<type-param-value> = <scalar-int-expr>
| *
| :
"""
subclass_names = ['Scalar_Int_Expr']
use_names = []
def match(string): return StringBase.match(['*',':'], string)
match = staticmethod(match)
class Intrinsic_Type_Spec(WORDClsBase): # R403
"""
<intrinsic-type-spec> = INTEGER [ <kind-selector> ]
| REAL [ <kind-selector> ]
| DOUBLE COMPLEX
| COMPLEX [ <kind-selector> ]
| CHARACTER [ <char-selector> ]
| LOGICAL [ <kind-selector> ]
Extensions:
| DOUBLE PRECISION
| BYTE
"""
subclass_names = []
use_names = ['Kind_Selector','Char_Selector']
def match(string):
for w,cls in [('INTEGER',Kind_Selector),
('REAL',Kind_Selector),
('COMPLEX',Kind_Selector),
('LOGICAL',Kind_Selector),
('CHARACTER',Char_Selector),
(pattern.abs_double_complex_name, None),
(pattern.abs_double_precision_name, None),
('BYTE', None),
]:
try:
obj = WORDClsBase.match(w,cls,string)
except NoMatchError:
obj = None
if obj is not None: return obj
return
match = staticmethod(match)
class Kind_Selector(Base): # R404
"""
<kind-selector> = ( [ KIND = ] <scalar-int-initialization-expr> )
Extensions:
| * <char-length>
"""
subclass_names = []
use_names = ['Char_Length','Scalar_Int_Initialization_Expr']
def match(string):
if string[0]+string[-1] != '()':
if not string.startswith('*'): return
return '*',Char_Length(string[1:].lstrip())
line = string[1:-1].strip()
if line[:4].upper()=='KIND':
line = line[4:].lstrip()
if not line.startswith('='): return
line = line[1:].lstrip()
return '(',Scalar_Int_Initialization_Expr(line),')'
match = staticmethod(match)
def tostr(self):
if len(self.items)==2: return '%s%s' % tuple(self.items)
return '%sKIND = %s%s' % tuple(self.items)
class Signed_Int_Literal_Constant(NumberBase): # R405
"""
<signed-int-literal-constant> = [ <sign> ] <int-literal-constant>
"""
subclass_names = ['Int_Literal_Constant'] # never used because sign is included in pattern
def match(string):
return NumberBase.match(pattern.abs_signed_int_literal_constant_named, string)
match = staticmethod(match)
class Int_Literal_Constant(NumberBase): # R406
"""
<int-literal-constant> = <digit-string> [ _ <kind-param> ]
"""
subclass_names = []
def match(string):
return NumberBase.match(pattern.abs_int_literal_constant_named, string)
match = staticmethod(match)
class Digit_String(NumberBase):
"""
<digit-string> = <digit> [ <digit> ]...
"""
subclass_names = []
def match(string):
return NumberBase.match(pattern.abs_digit_string_named, string)
match = staticmethod(match)
#R407: <kind-param> = <digit-string> | <scalar-int-constant-name>
#R408: <signed-digit-string> = [ <sign> ] <digit-string>
#R409: <digit-string> = <digit> [ <digit> ]...
#R410: <sign> = + | -
class Boz_Literal_Constant(Base): # R411
"""
<boz-literal-constant> = <binary-constant>
| <octal-constant>
| <hex-constant>
"""
subclass_names = ['Binary_Constant','Octal_Constant','Hex_Constant']
class Binary_Constant(STRINGBase): # R412
"""
<binary-constant> = B ' <digit> [ <digit> ]... '
| B \" <digit> [ <digit> ]... \"
"""
subclass_names = []
def match(string): return STRINGBase.match(pattern.abs_binary_constant, string)
match = staticmethod(match)
class Octal_Constant(STRINGBase): # R413
"""
<octal-constant> = O ' <digit> [ <digit> ]... '
| O \" <digit> [ <digit> ]... \"
"""
subclass_names = []
def match(string): return STRINGBase.match(pattern.abs_octal_constant, string)
match = staticmethod(match)
class Hex_Constant(STRINGBase): # R414
"""
<hex-constant> = Z ' <digit> [ <digit> ]... '
| Z \" <digit> [ <digit> ]... \"
"""
subclass_names = []
def match(string): return STRINGBase.match(pattern.abs_hex_constant, string)
match = staticmethod(match)
#R415: <hex-digit> = <digit> | A | B | C | D | E | F
class Signed_Real_Literal_Constant(NumberBase): # R416
"""
<signed-real-literal-constant> = [ <sign> ] <real-literal-constant>
"""
subclass_names = ['Real_Literal_Constant'] # never used
def match(string):
return NumberBase.match(pattern.abs_signed_real_literal_constant_named, string)
match = staticmethod(match)
class Real_Literal_Constant(NumberBase): # R417
"""
"""
subclass_names = []
def match(string):
return NumberBase.match(pattern.abs_real_literal_constant_named, string)
match = staticmethod(match)
#R418: <significand> = <digit-string> . [ <digit-string> ] | . <digit-string>
#R419: <exponent-letter> = E | D
#R420: <exponent> = <signed-digit-string>
class Complex_Literal_Constant(Base): # R421
"""
<complex-literal-constant> = ( <real-part>, <imag-part> )
"""
subclass_names = []
use_names = ['Real_Part','Imag_Part']
def match(string):
if not string or string[0]+string[-1]!='()': return
if not pattern.abs_complex_literal_constant.match(string):
return
r,i = string[1:-1].split(',')
return Real_Part(r.strip()), Imag_Part(i.strip())
match = staticmethod(match)
def tostr(self): return '(%s, %s)' % tuple(self.items)
class Real_Part(Base): # R422
"""
<real-part> = <signed-int-literal-constant>
| <signed-real-literal-constant>
| <named-constant>
"""
subclass_names = ['Signed_Int_Literal_Constant','Signed_Real_Literal_Constant','Named_Constant']
class Imag_Part(Base): # R423
"""
<imag-part> = <real-part>
"""
subclass_names = ['Signed_Int_Literal_Constant','Signed_Real_Literal_Constant','Named_Constant']
class Char_Selector(Base): # R424
"""
<char-selector> = <length-selector>
| ( LEN = <type-param-value> , KIND = <scalar-int-initialization-expr> )
| ( <type-param-value> , [ KIND = ] <scalar-int-initialization-expr> )
| ( KIND = <scalar-int-initialization-expr> [ , LEN = <type-param-value> ] )
"""
subclass_names = ['Length_Selector']
use_names = ['Type_Param_Value','Scalar_Int_Initialization_Expr']
def match(string):
if string[0]+string[-1] != '()': return
line, repmap = string_replace_map(string[1:-1].strip())
if line[:3].upper()=='LEN' and line[3:].lstrip().startswith('='):
line = line[3:].lstrip()
line = line[1:].lstrip()
i = line.find(',')
if i==-1: return
v = line[:i].rstrip()
line = line[i+1:].lstrip()
if line[:4].upper()!='KIND': return
line = line[4:].lstrip()
if not line.startswith('='): return
line = line[1:].lstrip()
v = repmap(v)
line = repmap(line)
return Type_Param_Value(v), Scalar_Int_Initialization_Expr(line)
elif line[:4].upper()=='KIND' and line[4:].lstrip().startswith('='):
line = line[4:].lstrip()
line = line[1:].lstrip()
i = line.find(',')
if i==-1: return None,Scalar_Int_Initialization_Expr(line)
v = line[i+1:].lstrip()
line = line[:i].rstrip()
if v[:3].upper()!='LEN': return
v = v[3:].lstrip()
if not v.startswith('='): return
v = v[1:].lstrip()
return Type_Param_Value(v), Scalar_Int_Initialization_Expr(line)
else:
i = line.find(',')
if i==-1: return
v = line[:i].rstrip()
line = line[i+1:].lstrip()
if line[:4].upper()=='KIND' and line[4:].lstrip().startswith('='):
line = line[4:].lstrip()
line = line[1:].lstrip()
return Type_Param_Value(v), Scalar_Int_Initialization_Expr(line)
return
match = staticmethod(match)
def tostr(self):
if self.items[0] is None:
return '(KIND = %s)' % (self.items[1])
return '(LEN = %s, KIND = %s)' % (self.items[0],self.items[1])
class Length_Selector(Base): # R425
"""
<length -selector> = ( [ LEN = ] <type-param-value> )
| * <char-length> [ , ]
"""
subclass_names = []
use_names = ['Type_Param_Value','Char_Length']
def match(string):
if string[0]+string[-1] == '()':
line = string[1:-1].strip()
if line[:3].upper()=='LEN' and line[3:].lstrip().startswith('='):
line = line[3:].lstrip()
line = line[1:].lstrip()
return '(',Type_Param_Value(line),')'
if not string.startswith('*'): return
line = string[1:].lstrip()
if string[-1]==',': line = line[:-1].rstrip()
return '*',Char_Length(line)
match = staticmethod(match)
def tostr(self):
if len(self.items)==2: return '%s%s' % tuple(self.items)
return '%sLEN = %s%s' % tuple(self.items)
class Char_Length(BracketBase): # R426
"""
<char-length> = ( <type-param-value> )
| <scalar-int-literal-constant>
"""
subclass_names = ['Scalar_Int_Literal_Constant']
use_names = ['Type_Param_Value']
def match(string): return BracketBase.match('()',Type_Param_Value, string)
match = staticmethod(match)
class Char_Literal_Constant(Base): # R427
"""
<char-literal-constant> = [ <kind-param> _ ] ' <rep-char> '
| [ <kind-param> _ ] \" <rep-char> \"
"""
subclass_names = []
rep = pattern.char_literal_constant
def match(string):
if string[-1] not in '"\'': return
if string[-1]=='"':
abs_a_n_char_literal_constant_named = pattern.abs_a_n_char_literal_constant_named2
else:
abs_a_n_char_literal_constant_named = pattern.abs_a_n_char_literal_constant_named1
line, repmap = string_replace_map(string)
m = abs_a_n_char_literal_constant_named.match(line)
if not m: return
kind_param = m.group('kind_param')
line = m.group('value')
line = repmap(line)
return line, kind_param
match = staticmethod(match)
def tostr(self):
if self.items[1] is None: return str(self.items[0])
return '%s_%s' % (self.items[1], self.items[0])
class Logical_Literal_Constant(NumberBase): # R428
"""
<logical-literal-constant> = .TRUE. [ _ <kind-param> ]
| .FALSE. [ _ <kind-param> ]
"""
subclass_names = []
def match(string):
return NumberBase.match(pattern.abs_logical_literal_constant_named, string)
match = staticmethod(match)
class Derived_Type_Def(BlockBase): # R429
"""
<derived-type-def> = <derived-type-stmt>
[ <type-param-def-stmt> ]...
[ <private-or-sequence> ]...
[ <component-part> ]
[ <type-bound-procedure-part> ]
<end-type-stmt>
"""
subclass_names = []
use_names = ['Derived_Type_Stmt', 'Type_Param_Def_Stmt', 'Private_Or_Sequence',
'Component_Part',
'Type_Bound_Procedure_Part', 'End_Type_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Derived_Type_Stmt, [Type_Param_Def_Stmt, Private_Or_Sequence,
Component_Part, Type_Bound_Procedure_Part], End_Type_Stmt, reader,
match_names = True, set_unspecified_end_name = True # C431
)
class Derived_Type_Stmt(StmtBase): # R430
"""
<derived-type-stmt> = TYPE [ [ , <type-attr-spec-list> ] :: ] <type-name> [ ( <type-param-name-list> ) ]
"""
subclass_names = []
use_names = ['Type_Attr_Spec_List', 'Type_Name', 'Type_Param_Name_List']
@staticmethod
def match(string):
if string[:4].upper()!='TYPE': return
line = string[4:].lstrip()
i = line.find('::')
attr_specs = None
if i!=-1:
if line.startswith(','):
l = line[1:i].strip()
if not l: return
attr_specs = Type_Attr_Spec_List(l)
line = line[i+2:].lstrip()
m = pattern.name.match(line)
if m is None: return
name = Type_Name(m.group())
line = line[m.end():].lstrip()
if not line: return attr_specs, name, None
if line[0]+line[-1]!='()': return
return attr_specs, name, Type_Param_Name_List(line[1:-1].strip())
def tostr(self):
s = 'TYPE'
if self.items[0] is not None:
s += ', %s :: %s' % (self.items[0], self.items[1])
else:
s += ' :: %s' % (self.items[1])
if self.items[2] is not None:
s += '(%s)' % (self.items[2])
return s
def get_start_name(self):
return self.items[1].string
class Type_Name(Name): # C424
"""
<type-name> = <name>
<type-name> shall not be DOUBLEPRECISION or the name of intrinsic type
"""
subclass_names = []
use_names = []
def match(string):
if pattern.abs_intrinsic_type_name.match(string): return
return Name.match(string)
match = staticmethod(match)
class Type_Attr_Spec(Base): # R431
"""
<type-attr-spec> = <access-spec>
| EXTENDS ( <parent-type-name> )
| ABSTRACT
| BIND (C)
"""
subclass_names = ['Access_Spec', 'Language_Binding_Spec'][:-1]
use_names = ['Parent_Type_Name']
@staticmethod
def match(string):
if len(string)==8 and string.upper()=='ABSTRACT':
return 'ABSTRACT', None
if string[:4].upper()=='BIND':
line = string[4:].lstrip()
if not line or line[0]+line[-1]!='()': return
line = line[1:-1].strip()
if line.upper()=='C':
return 'BIND', 'C'
elif string[:7].upper()=='EXTENDS':
line = string[7:].lstrip()
if not line or line[0]+line[-1]!='()': return
return 'EXTENDS', Parent_Type_Name(line[1:-1].strip())
def tostr(self):
if self.items[1] is None:
return '%s' % (self.items[0])
return '%s(%s)' % (self.items)
class Private_Or_Sequence(Base): # R432
"""
<private-or-sequence> = <private-components-stmt>
| <sequence-stmt>
"""
subclass_names = ['Private_Components_Stmt', 'Sequence_Stmt']
class End_Type_Stmt(EndStmtBase): # R433
"""
<end-type-stmt> = END TYPE [ <type-name> ]
"""
subclass_names = []
use_names = ['Type_Name']
@staticmethod
def match(string):
return EndStmtBase.match('TYPE',Type_Name, string, require_stmt_type=True)
class Sequence_Stmt(STRINGBase): # R434
"""
<sequence-stmt> = SEQUENCE
"""
subclass_names = []
@staticmethod
def match(string):
return STRINGBase.match('SEQUENCE', string)
class Type_Param_Def_Stmt(StmtBase): # R435
"""
<type-param-def-stmt> = INTEGER [ <kind-selector> ] , <type-param-attr-spec> :: <type-param-decl-list>
"""
subclass_names = []
use_names = ['Kind_Selector', 'Type_Param_Attr_Spec', 'Type_Param_Decl_List']
def match(string):
if string[:7].upper()!='INTEGER': return
line, repmap = string_replace_map(string[7:].lstrip())
if not line: return
i = line.find(',')
if i==-1: return
kind_selector = repmap(line[:i].rstrip()) or None
line = repmap(line[i+1:].lstrip())
i = line.find('::')
if i==-1: return
l1 = line[:i].rstrip()
l2 = line[i+2:].lstrip()
if not l1 or not l2: return
if kind_selector: kind_selector = Kind_Selector(kind_selector)
return kind_selector, Type_Param_Attr_Spec(l1), Type_Param_Decl_List(l2)
match = staticmethod(match)
def tostr(self):
s = 'INTEGER'
if self.items[0] is not None:
s += '%s, %s :: %s' % tuple(self.items)
else:
s += ', %s :: %s' % tuple(self.items[1:])
return s
class Type_Param_Decl(BinaryOpBase): # R436
"""
<type-param-decl> = <type-param-name> [ = <scalar-int-initialization-expr> ]
"""
subclass_names = ['Type_Param_Name']
use_names = ['Scalar_Int_Initialization_Expr']
def match(string):
if '=' not in string: return
lhs,rhs = string.split('=',1)
lhs = lhs.rstrip()
rhs = rhs.lstrip()
if not lhs or not rhs: return
return Type_Param_Name(lhs),'=',Scalar_Int_Initialization_Expr(rhs)
match = staticmethod(match)
class Type_Param_Attr_Spec(STRINGBase): # R437
"""
<type-param-attr-spec> = KIND
| LEN
"""
subclass_names = []
def match(string): return STRINGBase.match(['KIND', 'LEN'], string)
match = staticmethod(match)
class Component_Part(BlockBase): # R438
"""
<component-part> = [ <component-def-stmt> ]...
"""
subclass_names = []
use_names = ['Component_Def_Stmt']
def match(reader):
content = []
while 1:
try:
obj = Component_Def_Stmt(reader)
except NoMatchError:
obj = None
if obj is None:
break
content.append(obj)
if content:
return content,
return
match = staticmethod(match)
def tofortran(self, tab='', isfix=None):
l = []
for item in self.content:
l.append(item.tofortran(tab=tab,isfix=isfix))
return '\n'.join(l)
class Component_Def_Stmt(Base): # R439
"""
<component-def-stmt> = <data-component-def-stmt>
| <proc-component-def-stmt>
"""
subclass_names = ['Data_Component_Def_Stmt', 'Proc_Component_Def_Stmt']
class Data_Component_Def_Stmt(Type_Declaration_StmtBase): # R440
"""
<data-component-def-stmt> = <declaration-type-spec> [ [ , <component-attr-spec-list> ] :: ] <component-decl-list>
"""
subclass_names = []
use_names = ['Declaration_Type_Spec', 'Component_Attr_Spec_List', 'Component_Decl_List']
@staticmethod
def match(string):
return Type_Declaration_StmtBase.match(Declaration_Type_Spec, Component_Attr_Spec_List, Component_Decl_List, string)
class Dimension_Component_Attr_Spec(CALLBase):
"""
<dimension-component-attr-spec> = DIMENSION ( <component-array-spec> )
"""
subclass_names = []
use_names = ['Component_Array_Spec']
def match(string): return CALLBase.match('DIMENSION', Component_Array_Spec, string)
match = staticmethod(match)
class Component_Attr_Spec(STRINGBase): # R441
"""
<component-attr-spec> = POINTER
| DIMENSION ( <component-array-spec> )
| ALLOCATABLE
| <access-spec>
"""
subclass_names = ['Access_Spec', 'Dimension_Component_Attr_Spec']
use_names = []
@staticmethod
def match(string):
return STRINGBase.match(['POINTER', 'ALLOCATABLE'], string)
class Component_Decl(Base): # R442
"""
<component-decl> = <component-name> [ ( <component-array-spec> ) ] [ * <char-length> ] [ <component-initialization> ]
"""
subclass_names = []
use_names = ['Component_Name', 'Component_Array_Spec', 'Char_Length', 'Component_Initialization']
def match(string):
m = pattern.name.match(string)
if m is None: return
name = Component_Name(m.group())
newline = string[m.end():].lstrip()
if not newline: return name, None, None, None
array_spec = None
char_length = None
init = None
if newline.startswith('('):
line, repmap = string_replace_map(newline)
i = line.find(')')
if i==-1: return
array_spec = Component_Array_Spec(repmap(line[1:i].strip()))
newline = repmap(line[i+1:].lstrip())
if newline.startswith('*'):
line, repmap = string_replace_map(newline)
i = line.find('=')
if i!=-1:
char_length = repmap(line[1:i].strip())
newline = repmap(newline[i:].lstrip())
else:
char_length = repmap(newline[1:].strip())
newline = ''
char_length = Char_Length(char_length)
if newline.startswith('='):
init = Component_Initialization(newline)
else:
assert newline=='',repr(newline)
return name, array_spec, char_length, init
match = staticmethod(match)
def tostr(self):
s = str(self.items[0])
if self.items[1] is not None:
s += '(' + str(self.items[1]) + ')'
if self.items[2] is not None:
s += '*' + str(self.items[2])
if self.items[3] is not None:
s += ' ' + str(self.items[3])
return s
class Component_Array_Spec(Base): # R443
"""
<component-array-spec> = <explicit-shape-spec-list>
| <deferred-shape-spec-list>
"""
subclass_names = ['Explicit_Shape_Spec_List', 'Deferred_Shape_Spec_List']
class Component_Initialization(Base): # R444
"""
<component-initialization> = = <initialization-expr>
| => <null-init>
"""
subclass_names = []
use_names = ['Initialization_Expr', 'Null_Init']
def match(string):
if string.startswith('=>'):
return '=>', Null_Init(string[2:].lstrip())
if string.startswith('='):
return '=', Initialization_Expr(string[1:].lstrip())
return
match = staticmethod(match)
def tostr(self): return '%s %s' % tuple(self.items)
class Proc_Component_Def_Stmt(StmtBase): # R445
"""
<proc-component-def-stmt> = PROCEDURE ( [ <proc-interface> ] ) , <proc-component-attr-spec-list> :: <proc-decl-list>
"""
subclass_names = []
use_names = ['Proc_Interface', 'Proc_Component_Attr_Spec_List', 'Proc_Decl_List']
@staticmethod
def match(string):
if string[:9].upper()!='PROCEDURE': return
line, repmap = string_replace_map(string[9:].lstrip())
if not line.startswith('('): return
i = line.find(')')
if i==-1: return
p = repmap(line[:i+1])[1:-1].strip() or None
if p:
p = Proc_Interface(p)
line = line[i+1:].lstrip()
if not line.startswith(','): return
line = line[1:].strip()
i = line.find('::')
if i==-1: return
return p, Proc_Component_Attr_Spec_List(repmap(line[:i].rstrip())), Proc_Decl_List(repmap(line[i+2:].lstrip()))
def tostr(self):
if self.items[0] is not None:
return 'PROCEDURE(%s), %s :: %s' % (self.items)
return 'PROCEDURE(), %s :: %s' % (self.items[1:])
class Proc_Component_PASS_Arg_Name(CALLBase):
"""
<proc-component-PASS-arg-name> = PASS ( <arg-name> )
"""
subclass_names = []
use_names = ['Arg_Name']
def match(string): return CALLBase.match('PASS', Arg_Name, string)
match = staticmethod(match)
class Proc_Component_Attr_Spec(STRINGBase): # R446
"""
<proc-component-attr-spec> = POINTER
| PASS [ ( <arg-name> ) ]
| NOPASS
| <access-spec>
"""
subclass_names = ['Access_Spec', 'Proc_Component_PASS_Arg_Name']
def match(string): return STRINGBase.match(['POINTER','PASS','NOPASS'], string)
match = staticmethod(match)
class Private_Components_Stmt(StmtBase): # R447
"""
<private-components-stmt> = PRIVATE
"""
subclass_names = []
def match(string): return StringBase.match('PRIVATE', string)
match = staticmethod(match)
class Type_Bound_Procedure_Part(BlockBase): # R448
"""
<type-bound-procedure-part> = <contains-stmt>
[ <binding-private-stmt> ]
<proc-binding-stmt>
[ <proc-binding-stmt> ]...
"""
subclass_names = []
use_names = ['Contains_Stmt', 'Binding_Private_Stmt', 'Proc_Binding_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Contains_Stmt, [Binding_Private_Stmt, Proc_Binding_Stmt], None, reader)
class Binding_Private_Stmt(StmtBase, STRINGBase): # R449
"""
<binding-private-stmt> = PRIVATE
"""
subclass_names = []
def match(string): return StringBase.match('PRIVATE', string)
match = staticmethod(match)
class Proc_Binding_Stmt(Base): # R450
"""
<proc-binding-stmt> = <specific-binding>
| <generic-binding>
| <final-binding>
"""
subclass_names = ['Specific_Binding', 'Generic_Binding', 'Final_Binding']
class Specific_Binding(StmtBase): # R451
"""
<specific-binding> = PROCEDURE [ ( <interface-name> ) ] [ [ , <binding-attr-list> ] :: ] <binding-name> [ => <procedure-name> ]
"""
subclass_names = []
use_names = ['Interface_Name', 'Binding_Attr_List', 'Binding_Name', 'Procedure_Name']
@staticmethod
def match(string):
if string[:9].upper()!='PROCEDURE': return
line = string[9:].lstrip()
iname = None
if line.startswith('('):
i = line.find(')')
if i==-1: return
iname = Interface_Name(line[1:-1].strip())
line = line[i+1:].lstrip()
l = None
i = line.find('::')
if i!=-1:
if line.startswith(','):
l = Binding_Attr_List(line[1:i].strip())
line = line[i+2:].lstrip()
i = line.find('=>')
pname = None
if i!=-1:
pname = Procedure_Name(line[i+2:].lstrip())
line = line[:i].rstrip()
return iname, l, Binding_Name(line), pname
def tostr(self):
r = 'PROCEDURE'
if self.items[0] is not None:
r += '(%s)' % (self.items[0])
if self.items[1] is not None:
r += ', %s ::' % (self.items[1])
r += ' %s' % (self.items[2])
if self.items[3] is not None:
r += ' => %s' % (self.items[3])
return r
class Generic_Binding(StmtBase): # R452
"""
<generic-binding> = GENERIC [ , <access-spec> ] :: <generic-spec> => <binding-name-list>
"""
subclass_names = []
use_names = ['Access_Spec', 'Generic_Spec', 'Binding_Name_List']
@staticmethod
def match(string):
if string[:7].upper()!='GENERIC': return
line = string[7:].lstrip()
i = line.find('::')
if i==-1: return
aspec = None
if line.startswith(','):
aspec = Access_Spec(line[1:i].strip())
line = line[i+2:].lstrip()
i = line.find('=>')
if i==-1: return
return aspec, Generic_Spec(line[:i].rstrip()), Binding_Name_List(line[i+3:].lstrip())
def tostr(self):
if self.items[0] is None:
return 'GENERIC :: %s => %s' % (self.items[1:])
return 'GENERIC, %s :: %s => %s' % (self.items)
class Binding_PASS_Arg_Name(CALLBase):
"""
<binding-PASS-arg-name> = PASS ( <arg-name> )
"""
subclass_names = []
use_names = ['Arg_Name']
def match(string): return CALLBase.match('PASS', Arg_Name, string)
match = staticmethod(match)
class Binding_Attr(STRINGBase): # R453
"""
<binding-attr> = PASS [ ( <arg-name> ) ]
| NOPASS
| NON_OVERRIDABLE
| <access-spec>
"""
subclass_names = ['Access_Spec', 'Binding_PASS_Arg_Name']
def match(string): return STRINGBase.match(['PASS', 'NOPASS', 'NON_OVERRIDABLE'], string)
match = staticmethod(match)
class Final_Binding(StmtBase, WORDClsBase): # R454
"""
<final-binding> = FINAL [ :: ] <final-subroutine-name-list>
"""
subclass_names = []
use_names = ['Final_Subroutine_Name_List']
def match(string): return WORDClsBase.match('FINAL',Final_Subroutine_Name_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Derived_Type_Spec(CallBase): # R455
"""
<derived-type-spec> = <type-name> [ ( <type-param-spec-list> ) ]
"""
subclass_names = ['Type_Name']
use_names = ['Type_Param_Spec_List']
def match(string): return CallBase.match(Type_Name, Type_Param_Spec_List, string)
match = staticmethod(match)
class Type_Param_Spec(KeywordValueBase): # R456
"""
<type-param-spec> = [ <keyword> = ] <type-param-value>
"""
subclass_names = ['Type_Param_Value']
use_names = ['Keyword']
def match(string): return KeywordValueBase.match(Keyword, Type_Param_Value, string)
match = staticmethod(match)
class Structure_Constructor_2(KeywordValueBase): # R457.b
"""
<structure-constructor-2> = [ <keyword> = ] <component-data-source>
"""
subclass_names = ['Component_Data_Source']
use_names = ['Keyword']
def match(string): return KeywordValueBase.match(Keyword, Component_Data_Source, string)
match = staticmethod(match)
class Structure_Constructor(CallBase): # R457
"""
<structure-constructor> = <derived-type-spec> ( [ <component-spec-list> ] )
| <structure-constructor-2>
"""
subclass_names = ['Structure_Constructor_2']
use_names = ['Derived_Type_Spec', 'Component_Spec_List']
def match(string): return CallBase.match(Derived_Type_Spec, Component_Spec_List, string)
match = staticmethod(match)
class Component_Spec(KeywordValueBase): # R458
"""
<component-spec> = [ <keyword> = ] <component-data-source>
"""
subclass_names = ['Component_Data_Source']
use_names = ['Keyword']
def match(string): return KeywordValueBase.match(Keyword, Component_Data_Source, string)
match = staticmethod(match)
class Component_Data_Source(Base): # R459
"""
<component-data-source> = <expr>
| <data-target>
| <proc-target>
"""
subclass_names = ['Proc_Target', 'Data_Target', 'Expr']
class Enum_Def(BlockBase): # R460
"""
<enum-def> = <enum-def-stmt>
<enumerator-def-stmt>
[ <enumerator-def-stmt> ]...
<end-enum-stmt>
"""
subclass_names = []
use_names = ['Enum_Def_Stmt', 'Enumerator_Def_Stmt', 'End_Enum_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Enum_Def_Stmt, [Enumerator_Def_Stmt], End_Enum_Stmt, reader)
class Enum_Def_Stmt(StmtBase): # R461
"""
<enum-def-stmt> = ENUM, BIND(C)
"""
subclass_names = []
use_names = []
@staticmethod
def match(string):
if string.upper().replace(' ','')!='ENUM,BIND(C)': return
return 'ENUM, BIND(C)',
def tostr(self):
return '%s' % (self.items[0])
class Enumerator_Def_Stmt(StmtBase, WORDClsBase): # R462
"""
<enumerator-def-stmt> = ENUMERATOR [ :: ] <enumerator-list>
"""
subclass_names = []
use_names = ['Enumerator_List']
def match(string): return WORDClsBase.match('ENUMERATOR',Enumerator_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Enumerator(BinaryOpBase): # R463
"""
<enumerator> = <named-constant> [ = <scalar-int-initialization-expr> ]
"""
subclass_names = ['Named_Constant']
use_names = ['Scalar_Int_Initialization_Expr']
def match(string):
if '=' not in string: return
lhs,rhs = string.split('=',1)
return Named_Constant(lhs.rstrip()),'=',Scalar_Int_Initialization_Expr(rhs.lstrip())
match = staticmethod(match)
class End_Enum_Stmt(EndStmtBase): # R464
"""
<end-enum-stmt> = END ENUM
"""
subclass_names = []
def match(string): return EndStmtBase.match('ENUM',None, string, require_stmt_type=True)
match = staticmethod(match)
class Array_Constructor(BracketBase): # R465
"""
<array-constructor> = (/ <ac-spec> /)
| <left-square-bracket> <ac-spec> <right-square-bracket>
"""
subclass_names = []
use_names = ['Ac_Spec']
def match(string):
try:
obj = BracketBase.match('(//)', Ac_Spec, string)
except NoMatchError:
obj = None
if obj is None:
obj = BracketBase.match('[]', Ac_Spec, string)
return obj
match = staticmethod(match)
class Ac_Spec(Base): # R466
"""
<ac-spec> = <type-spec> ::
| [ <type-spec> :: ] <ac-value-list>
"""
subclass_names = ['Ac_Value_List']
use_names = ['Type_Spec']
def match(string):
if string.endswith('::'):
return Type_Spec(string[:-2].rstrip()),None
line, repmap = string_replace_map(string)
i = line.find('::')
if i==-1: return
ts = line[:i].rstrip()
line = line[i+2:].lstrip()
ts = repmap(ts)
line = repmap(line)
return Type_Spec(ts),Ac_Value_List(line)
match = staticmethod(match)
def tostr(self):
if self.items[0] is None:
return str(self.items[1])
if self.items[1] is None:
return str(self.items[0]) + ' ::'
return '%s :: %s' % self.items
# R467: <left-square-bracket> = [
# R468: <right-square-bracket> = ]
class Ac_Value(Base): # R469
"""
<ac-value> = <expr>
| <ac-implied-do>
"""
subclass_names = ['Ac_Implied_Do','Expr']
class Ac_Implied_Do(Base): # R470
"""
<ac-implied-do> = ( <ac-value-list> , <ac-implied-do-control> )
"""
subclass_names = []
use_names = ['Ac_Value_List','Ac_Implied_Do_Control']
def match(string):
if string[0]+string[-1] != '()': return
line, repmap = string_replace_map(string[1:-1].strip())
i = line.rfind('=')
if i==-1: return
j = line[:i].rfind(',')
assert j!=-1
s1 = repmap(line[:j].rstrip())
s2 = repmap(line[j+1:].lstrip())
return Ac_Value_List(s1),Ac_Implied_Do_Control(s2)
match = staticmethod(match)
def tostr(self): return '(%s, %s)' % tuple(self.items)
class Ac_Implied_Do_Control(Base): # R471
"""
<ac-implied-do-control> = <ac-do-variable> = <scalar-int-expr> , <scalar-int-expr> [ , <scalar-int-expr> ]
"""
subclass_names = []
use_names = ['Ac_Do_Variable','Scalar_Int_Expr']
def match(string):
i = string.find('=')
if i==-1: return
s1 = string[:i].rstrip()
line, repmap = string_replace_map(string[i+1:].lstrip())
t = line.split(',')
if not (2<=len(t)<=3): return
t = [Scalar_Int_Expr(s.strip()) for s in t]
return Ac_Do_Variable(s1), t
match = staticmethod(match)
def tostr(self): return '%s = %s' % (self.items[0], ', '.join(map(str,self.items[1])))
class Ac_Do_Variable(Base): # R472
"""
<ac-do-variable> = <scalar-int-variable>
<ac-do-variable> shall be a named variable
"""
subclass_names = ['Scalar_Int_Variable']
###############################################################################
############################### SECTION 5 ####################################
###############################################################################
class Type_Declaration_Stmt(Type_Declaration_StmtBase): # R501
"""
<type-declaration-stmt> = <declaration-type-spec> [ [ , <attr-spec> ]... :: ] <entity-decl-list>
"""
subclass_names = []
use_names = ['Declaration_Type_Spec', 'Attr_Spec_List', 'Entity_Decl_List']
@staticmethod
def match(string):
return Type_Declaration_StmtBase.match(Declaration_Type_Spec, Attr_Spec_List, Entity_Decl_List, string)
@staticmethod
def match2(string):
line, repmap = string_replace_map(string)
i = line.find('::')
if i!=-1:
j = line[:i].find(',')
if j!=-1:
i = j
else:
if line[:6].upper()=='DOUBLE':
m = re.search(r'\s[a-z_]',line[6:].lstrip(),re.I)
if m is None: return
i = m.start() + len(line)-len(line[6:].lstrip())
else:
m = re.search(r'\s[a-z_]',line,re.I)
if m is None: return
i = m.start()
type_spec = Declaration_Type_Spec(repmap(line[:i].rstrip()))
if type_spec is None: return
line = line[i:].lstrip()
if line.startswith(','):
i = line.find('::')
if i==-1: return
attr_specs = Attr_Spec_List(repmap(line[1:i].strip()))
if attr_specs is None: return
line = line[i:]
else:
attr_specs = None
if line.startswith('::'):
line = line[2:].lstrip()
entity_decls = Entity_Decl_List(repmap(line))
if entity_decls is None: return
return type_spec, attr_specs, entity_decls
def tostr(self):
if self.items[1] is None:
return '%s :: %s' % (self.items[0], self.items[2])
else:
return '%s, %s :: %s' % self.items
class Declaration_Type_Spec(Base): # R502
"""
<declaration-type-spec> = <intrinsic-type-spec>
| TYPE ( <derived-type-spec> )
| CLASS ( <derived-type-spec> )
| CLASS ( * )
"""
subclass_names = ['Intrinsic_Type_Spec']
use_names = ['Derived_Type_Spec']
def match(string):
if string[-1] != ')': return
start = string[:4].upper()
if start == 'TYPE':
line = string[4:].lstrip()
if not line.startswith('('): return
return 'TYPE',Derived_Type_Spec(line[1:-1].strip())
start = string[:5].upper()
if start == 'CLASS':
line = string[5:].lstrip()
if not line.startswith('('): return
line = line[1:-1].strip()
if line=='*': return 'CLASS','*'
return 'CLASS', Derived_Type_Spec(line)
return
match = staticmethod(match)
def tostr(self): return '%s(%s)' % self.items
class Dimension_Attr_Spec(CALLBase): # R503.d
"""
<dimension-attr-spec> = DIMENSION ( <array-spec> )
"""
subclass_names = []
use_names = ['Array_Spec']
def match(string): return CALLBase.match('DIMENSION', Array_Spec, string)
match = staticmethod(match)
class Intent_Attr_Spec(CALLBase): # R503.f
"""
<intent-attr-spec> = INTENT ( <intent-spec> )
"""
subclass_names = []
use_names = ['Intent_Spec']
def match(string): return CALLBase.match('INTENT', Intent_Spec, string)
match = staticmethod(match)
class Attr_Spec(STRINGBase): # R503
"""
<attr-spec> = <access-spec>
| ALLOCATABLE
| ASYNCHRONOUS
| DIMENSION ( <array-spec> )
| EXTERNAL
| INTENT ( <intent-spec> )
| INTRINSIC
| <language-binding-spec>
| OPTIONAL
| PARAMETER
| POINTER
| PROTECTED
| SAVE
| TARGET
| VALUE
| VOLATILE
"""
subclass_names = ['Access_Spec', 'Language_Binding_Spec',
'Dimension_Attr_Spec', 'Intent_Attr_Spec']
use_names = []
def match(string): return STRINGBase.match(pattern.abs_attr_spec, string)
match = staticmethod(match)
class Entity_Decl(Base): # R504
"""
<entity-decl> = <object-name> [ ( <array-spec> ) ] [ * <char-length> ] [ <initialization> ]
| <function-name> [ * <char-length> ]
"""
subclass_names = []
use_names = ['Object_Name', 'Array_Spec', 'Char_Length', 'Initialization', 'Function_Name']
def match(string, target=False):
m = pattern.name.match(string)
if m is None: return
name = Name(m.group())
newline = string[m.end():].lstrip()
if not newline: return name, None, None, None
array_spec = None
char_length = None
init = None
if newline.startswith('('):
line, repmap = string_replace_map(newline)
i = line.find(')')
if i==-1: return
array_spec = Array_Spec(repmap(line[1:i].strip()))
newline = repmap(line[i+1:].lstrip())
if target:
if newline: return
return name, array_spec, None, None
if newline.startswith('*'):
line, repmap = string_replace_map(newline)
i = line.find('=')
if i!=-1:
char_length = repmap(line[1:i].strip())
newline = repmap(newline[i:].lstrip())
else:
char_length = repmap(newline[1:].strip())
newline = ''
char_length = Char_Length(char_length)
if newline.startswith('='):
init = Initialization(newline)
elif newline:
return
else:
assert newline=='',repr((newline, string))
return name, array_spec, char_length, init
match = staticmethod(match)
def tostr(self):
s = str(self.items[0])
if self.items[1] is not None:
s += '(' + str(self.items[1]) + ')'
if self.items[2] is not None:
s += '*' + str(self.items[2])
if self.items[3] is not None:
s += ' ' + str(self.items[3])
return s
class Object_Name(Base): # R505
"""
<object-name> = <name>
"""
subclass_names = ['Name']
class Initialization(Base): # R506
"""
<initialization> = = <initialization-expr>
| => <null-init>
"""
subclass_names = []
use_names = ['Initialization_Expr', 'Null_Init']
def match(string):
if string.startswith('=>'):
return '=>', Null_Init(string[2:].lstrip())
if string.startswith('='):
return '=', Initialization_Expr(string[1:].lstrip())
return
match = staticmethod(match)
def tostr(self): return '%s %s' % self.items
class Null_Init(STRINGBase): # R507
"""
<null-init> = <function-reference>
<function-reference> shall be a reference to the NULL intrinsic function with no arguments.
"""
subclass_names = ['Function_Reference']
def match(string): return STRINGBase.match('NULL', string)
match = staticmethod(match)
class Access_Spec(STRINGBase): # R508
"""
:F03R:`508`::
<access-spec> = PUBLIC
| PRIVATE
"""
subclass_names = []
def match(string): return STRINGBase.match(['PUBLIC','PRIVATE'], string)
match = staticmethod(match)
class Language_Binding_Spec(Base): # R509
"""
:F03R:`509`::
<language-binding-spec> = BIND ( C [ , NAME = <scalar-char-initialization-expr> ] )
"""
subclass_names = []
use_names = ['Scalar_Char_Initialization_Expr']
def match(string):
start = string[:4].upper()
if start != 'BIND': return
line = string[4:].lstrip()
if not line or line[0]+line[-1]!='()': return
line = line[1:-1].strip()
if not line: return
start = line[0].upper()
if start!='C': return
line = line[1:].lstrip()
if not line: return None,
if not line.startswith(','): return
line = line[1:].lstrip()
start = line[:4].upper()
if start!='NAME': return
line=line[4:].lstrip()
if not line.startswith('='): return
return Scalar_Char_Initialization_Expr(line[1:].lstrip()),
match = staticmethod(match)
def tostr(self):
if self.items[0] is None: return 'BIND(C)'
return 'BIND(C, NAME = %s)' % (self.items[0])
class Array_Spec(Base): # R510
"""
:F03R:`510`::
<array-spec> = <explicit-shape-spec-list>
| <assumed-shape-spec-list>
| <deferred-shape-spec-list>
| <assumed-size-spec>
"""
subclass_names = ['Assumed_Size_Spec', 'Explicit_Shape_Spec_List', 'Assumed_Shape_Spec_List',
'Deferred_Shape_Spec_List']
class Explicit_Shape_Spec(SeparatorBase): # R511
"""
<explicit-shape-spec> = [ <lower-bound> : ] <upper-bound>
"""
subclass_names = []
use_names = ['Lower_Bound', 'Upper_Bound']
def match(string):
line, repmap = string_replace_map(string)
if ':' not in line:
return None, Upper_Bound(string)
lower,upper = line.split(':',1)
lower = lower.rstrip()
upper = upper.lstrip()
if not upper: return
if not lower: return
return Lower_Bound(repmap(lower)), Upper_Bound(repmap(upper))
match = staticmethod(match)
def tostr(self):
if self.items[0] is None: return str(self.items[1])
return SeparatorBase.tostr(self)
class Lower_Bound(Base): # R512
"""
<lower-bound> = <specification-expr>
"""
subclass_names = ['Specification_Expr']
class Upper_Bound(Base): # R513
"""
<upper-bound> = <specification-expr>
"""
subclass_names = ['Specification_Expr']
class Assumed_Shape_Spec(SeparatorBase): # R514
"""
:F03R:`514`::
<assumed-shape-spec> = [ <lower-bound> ] :
"""
subclass_names = []
use_names = ['Lower_Bound']
def match(string): return SeparatorBase.match(Lower_Bound, None, string)
match = staticmethod(match)
class Deferred_Shape_Spec(SeparatorBase): # R515
"""
:F03R:`515`::
<deferred_shape_spec> = :
"""
subclass_names = []
def match(string):
if string==':': return None,None
return
match = staticmethod(match)
class Assumed_Size_Spec(Base): # R516
"""
:F03R:`516`::
<assumed-size-spec> = [ <explicit-shape-spec-list> , ] [ <lower-bound> : ] *
"""
subclass_names = []
use_names = ['Explicit_Shape_Spec_List', 'Lower_Bound']
def match(string):
if not string.endswith('*'): return
line = string[:-1].rstrip()
if not line: return None,None
if line.endswith(':'):
line, repmap = string_replace_map(line[:-1].rstrip())
i = line.rfind(',')
if i==-1:
return None, Lower_Bound(repmap(line))
return Explicit_Shape_Spec_List(repmap(line[:i].rstrip())), Lower_Bound(repmap(line[i+1:].lstrip()))
if not line.endswith(','): return
line = line[:-1].rstrip()
return Explicit_Shape_Spec_List(line), None
match = staticmethod(match)
def tostr(self):
s = ''
if self.items[0] is not None:
s += str(self.items[0]) + ', '
if self.items[1] is not None:
s += str(self.items[1]) + ' : '
s += '*'
return s
class Intent_Spec(STRINGBase): # R517
"""
<intent-spec> = IN
| OUT
| INOUT
"""
subclass_names = []
def match(string): return STRINGBase.match(pattern.abs_intent_spec, string)
match = staticmethod(match)
class Access_Stmt(StmtBase, WORDClsBase): # R518
"""
:F03R:`518`::
<access-stmt> = <access-spec> [ [ :: ] <access-id-list> ]
"""
subclass_names = []
use_names = ['Access_Spec', 'Access_Id_List']
def match(string): return WORDClsBase.match(['PUBLIC', 'PRIVATE'],Access_Id_List,string,check_colons=True, require_cls=False)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Access_Id(Base): # R519
"""
:F03R:`519`::
<access-id> = <use-name>
| <generic-spec>
"""
subclass_names = ['Use_Name', 'Generic_Spec']
class Object_Name_Deferred_Shape_Spec_List_Item(CallBase):
"""
<..> = <object-name> [ ( <deferred-shape-spec-list> ) ]
"""
subclass_names = ['Object_Name']
use_names = ['Deferred_Shape_Spec_List']
def match(string): return CallBase.match(Object_Name, Deferred_Shape_Spec_List, string, require_rhs=True)
match = staticmethod(match)
class Allocatable_Stmt(StmtBase, WORDClsBase): # R520
"""
:F03R:`520`::
<allocateble-stmt> = ALLOCATABLE [ :: ] <object-name> [ ( <deferred-shape-spec-list> ) ] [ , <object-name> [ ( <deferred-shape-spec-list> ) ] ]...
"""
subclass_names = []
use_names = ['Object_Name_Deferred_Shape_Spec_List_Item_List']
def match(string):
return WORDClsBase.match('ALLOCATABLE', Object_Name_Deferred_Shape_Spec_List_Item_List, string,
check_colons=True, require_cls=True)
match = staticmethod(match)
class Asynchronous_Stmt(StmtBase, WORDClsBase): # R521
"""
:F03R:`521`::
<asynchronous-stmt> = ASYNCHRONOUS [ :: ] <object-name-list>
"""
subclass_names = []
use_names = ['Object_Name_List']
def match(string): return WORDClsBase.match('ASYNCHRONOUS',Object_Name_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
class Bind_Stmt(StmtBase): # R522
"""
:F03R:`522`::
<bind-stmt> = <language-binding-spec> [ :: ] <bind-entity-list>
"""
subclass_names = []
use_names = ['Language_Binding_Spec', 'Bind_Entity_List']
def match(string):
i = string.find('::')
if i==-1:
i = string.find(')')
if i==-1: return
lhs, rhs = string[:i], string[i+1:]
else:
lhs, rhs = string.split('::',1)
lhs = lhs.rstrip()
rhs = rhs.lstrip()
if not lhs or not rhs: return
return Language_Binding_Spec(lhs), Bind_Entity_List(rhs)
match = staticmethod(match)
def tostr(self):
return '%s :: %s' % self.items
class Bind_Entity(BracketBase): # R523
"""
<bind-entity> = <entity-name>
| / <common-block-name> /
"""
subclass_names = ['Entity_Name']
use_names = ['Common_Block_Name']
def match(string): return BracketBase.match('//',Common_Block_Name, string)
match = staticmethod(match)
class Data_Stmt(StmtBase): # R524
"""
:F03R:524::
<data-stmt> = DATA <data-stmt-set> [ [ , ] <data-stmt-set> ]...
"""
subclass_names = []
use_names = ['Data_Stmt_Set']
@staticmethod
def match(string):
if string[:4].upper()!='DATA':
return
line, repmap = string_replace_map(string[4:].lstrip())
i = line.find('/')
if i==-1: return
i = line.find('/',i+1)
if i==-1: return
items = [Data_Stmt_Set(repmap(line[:i+1]))]
line = line[i+1:].lstrip()
while line:
if line.startswith(','):
line = line[1:].lstrip()
i = line.find('/')
if i==-1: return
i = line.find('/',i+1)
if i==-1: return
items.append(Data_Stmt_Set(repmap(line[:i+1])))
line = line[i+1:].lstrip()
return tuple(items)
def tostr(self):
return 'DATA ' + ', '.join(map(str, self.items))
class Data_Stmt_Set(Base): # R525
"""
:F03R:525::
<data-stmt-set> = <data-stmt-object-list> / <data-stmt-value-list> /
"""
subclass_names = []
use_names = ['Data_Stmt_Object_List', 'Data_Stmt_Value_List']
@staticmethod
def match(string):
if not string.endswith('/'):
return
line, repmap = string_replace_map(string)
i = line.find('/')
if i==-1:
return
data_stmt_object_list = Data_Stmt_Object_List(repmap(line[:i].rstrip()))
data_stmt_value_list = Data_Stmt_Value_List(repmap(line[i+1:-1].strip()))
return data_stmt_object_list, data_stmt_value_list
data_stmt_object_list = property(lambda self: self.items[0])
data_stmt_value_list = property(lambda self: self.items[1])
def tostr(self):
return '%s / %s /' % tuple(self.items)
class Data_Stmt_Object(Base): # R526
"""
:F03R:526::
<data-stmt-object> = <variable>
| <data-implied-do>
"""
subclass_names = ['Variable', 'Data_Implied_Do']
class Data_Implied_Do(Base): # R527
"""
:F03R:527::
<data-implied-do> = ( <data-i-do-object-list> , <data-i-do-variable> = <scalar-int-expr > , <scalar-int-expr> [ , <scalar-int-expr> ] )
"""
subclass_names = []
use_names = ['Data_I_Do_Object_List', 'Data_I_Do_Variable', 'Scalar_Int_Expr']
@staticmethod
def match(string):
if not (string.startswith('(') and string.endswith(')')):
return
line, repmap = string_replace_map(string[1:-1].strip())
s = line.split('=',1)
if len(s) != 2:
return
lhs = s[0].rstrip()
rhs = s[1].lstrip()
s1 = lhs.rsplit(',',1)
if len(s1) != 2:
return
s2 = rhs.split(',')
if len(s2) not in [2,3]:
return
data_i_do_object_list = Data_I_Do_Object_List(repmap(s1[0].rstrip()))
data_i_do_variable = Data_I_Do_Variable(repmap(s1[1].lstrip()))
scalar_int_expr1 = Scalar_Int_Expr(repmap(s2[0].rstrip()))
scalar_int_expr2 = Scalar_Int_Expr(repmap(s2[1].strip()))
if len(s2)==3:
scalar_int_expr3 = Scalar_Int_Expr(repmap(s2[2].lstrip()))
else:
scalar_int_expr3 = None
return data_i_do_object_list, data_i_do_variable, scalar_int_expr1, scalar_int_expr2, scalar_int_expr3
data_i_do_object_list = property(lambda self: self.items[0])
data_i_do_variable = property(lambda self: self.items[1])
scalar_int_expr1 = property(lambda self: self.items[2])
scalar_int_expr2 = property(lambda self: self.items[3])
scalar_int_expr3 = property(lambda self: self.items[4])
def tostr(self):
l = '%s, %s = %s, %s' % tuple(self.items[:4])
if self.items[4] is not None:
l += ', %s' % (self.items[4])
return '('+l+')'
class Data_I_Do_Object(Base): # R528
"""
<data-i-do-object> = <array-element>
| <scalar-structure-component>
| <data-implied-do>
"""
subclass_names = ['Array_Element', 'Scalar_Structure_Component', 'Data_Implied_Do']
class Data_I_Do_Variable(Base): # R529
"""
<data-i-do-variable> = <scalar-int-variable>
"""
subclass_names = ['Scalar_Int_Variable']
class Data_Stmt_Value(Base): # R530
"""
<data-stmt-value> = [ <data-stmt-repeat> * ] <data-stmt-constant>
"""
subclass_names = ['Data_Stmt_Constant']
use_names = ['Data_Stmt_Repeat']
def match(string):
line, repmap = string_replace_map(string)
s = line.split('*',1)
if len(s)!=2: return
lhs = repmap(s[0].rstrip())
rhs = repmap(s[1].lstrip())
if not lhs or not rhs: return
return Data_Stmt_Repeat(lhs), Data_Stmt_Constant(rhs)
match = staticmethod(match)
def tostr(self):
return '%s * %s' % self.items
class Data_Stmt_Repeat(Base): # R531
"""
<data-stmt-repeat> = <scalar-int-constant>
| <scalar-int-constant-subobject>
"""
subclass_names = ['Scalar_Int_Constant', 'Scalar_Int_Constant_Subobject']
class Data_Stmt_Constant(Base): # R532
"""
<data-stmt-constant> = <scalar-constant>
| <scalar-constant-subobject>
| <signed-int-literal-constant>
| <signed-real-literal-constant>
| <null-init>
| <structure-constructor>
"""
subclass_names = ['Scalar_Constant', 'Scalar_Constant_Subobject',
'Signed_Int_Literal_Constant', 'Signed_Real_Literal_Constant',
'Null_Init', 'Structure_Constructor']
class Int_Constant_Subobject(Base): # R533
"""
<int-constant-subobject> = <constant-subobject>
"""
subclass_names = ['Constant_Subobject']
class Constant_Subobject(Base): # R534
"""
<constant-subobject> = <designator>
"""
subclass_names = ['Designator']
class Dimension_Stmt(StmtBase): # R535
"""
<dimension-stmt> = DIMENSION [ :: ] <array-name> ( <array-spec> ) [ , <array-name> ( <array-spec> ) ]...
"""
subclass_names = []
use_names = ['Array_Name', 'Array_Spec']
def match(string):
if string[:9].upper()!='DIMENSION': return
line, repmap = string_replace_map(string[9:].lstrip())
if line.startswith('::'): line = line[2:].lstrip()
decls = []
for s in line.split(','):
s = s.strip()
if not s.endswith(')'): return
i = s.find('(')
if i==-1: return
decls.append((Array_Name(repmap(s[:i].rstrip())), Array_Spec(repmap(s[i+1:-1].strip()))))
if not decls: return
return decls,
match = staticmethod(match)
def tostr(self):
return 'DIMENSION :: ' + ', '.join(['%s(%s)' % ns for ns in self.items[0]])
class Intent_Stmt(StmtBase): # R536
"""
<intent-stmt> = INTENT ( <intent-spec> ) [ :: ] <dummy-arg-name-list>
"""
subclass_names = []
use_names = ['Intent_Spec', 'Dummy_Arg_Name_List']
def match(string):
if string[:6].upper()!='INTENT': return
line = string[6:].lstrip()
if not line or not line.startswith('('): return
i = line.rfind(')')
if i==-1: return
spec = line[1:i].strip()
if not spec: return
line = line[i+1:].lstrip()
if line.startswith('::'):
line = line[2:].lstrip()
if not line: return
return Intent_Spec(spec), Dummy_Arg_Name_List(line)
match = staticmethod(match)
def tostr(self):
return 'INTENT(%s) :: %s' % self.items
class Optional_Stmt(StmtBase, WORDClsBase): # R537
"""
<optional-stmt> = OPTIONAL [ :: ] <dummy-arg-name-list>
"""
subclass_names = []
use_names = ['Dummy_Arg_Name_List']
def match(string): return WORDClsBase.match('OPTIONAL',Dummy_Arg_Name_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Parameter_Stmt(StmtBase, CALLBase): # R538
"""
<parameter-stmt> = PARAMETER ( <named-constant-def-list> )
"""
subclass_names = []
use_names = ['Named_Constant_Def_List']
def match(string): return CALLBase.match('PARAMETER', Named_Constant_Def_List, string, require_rhs=True)
match = staticmethod(match)
class Named_Constant_Def(KeywordValueBase): # R539
"""
<named-constant-def> = <named-constant> = <initialization-expr>
"""
subclass_names = []
use_names = ['Named_Constant', 'Initialization_Expr']
def match(string): return KeywordValueBase.match(Named_Constant, Initialization_Expr, string)
match = staticmethod(match)
class Pointer_Stmt(StmtBase, WORDClsBase): # R540
"""
<pointer-stmt> = POINTER [ :: ] <pointer-decl-list>
"""
subclass_names = []
use_names = ['Pointer_Decl_List']
def match(string): return WORDClsBase.match('POINTER',Pointer_Decl_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Pointer_Decl(CallBase): # R541
"""
<pointer-decl> = <object-name> [ ( <deferred-shape-spec-list> ) ]
| <proc-entity-name>
"""
subclass_names = ['Proc_Entity_Name', 'Object_Name']
use_names = ['Deferred_Shape_Spec_List']
def match(string): return CallBase.match(Object_Name, Deferred_Shape_Spec_List, string, require_rhs=True)
match = staticmethod(match)
class Protected_Stmt(StmtBase, WORDClsBase): # R542
"""
<protected-stmt> = PROTECTED [ :: ] <entity-name-list>
"""
subclass_names = []
use_names = ['Entity_Name_List']
def match(string): return WORDClsBase.match('PROTECTED',Entity_Name_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Save_Stmt(StmtBase, WORDClsBase): # R543
"""
<save-stmt> = SAVE [ [ :: ] <saved-entity-list> ]
"""
subclass_names = []
use_names = ['Saved_Entity_List']
def match(string): return WORDClsBase.match('SAVE',Saved_Entity_List,string,check_colons=True, require_cls=False)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Saved_Entity(BracketBase): # R544
"""
<saved-entity> = <object-name>
| <proc-pointer-name>
| / <common-block-name> /
"""
subclass_names = ['Object_Name', 'Proc_Pointer_Name']
use_names = ['Common_Block_Name']
def match(string): return BracketBase.match('//',Common_Block_Name, string)
match = staticmethod(match)
class Proc_Pointer_Name(Base): # R545
"""
<proc-pointer-name> = <name>
"""
subclass_names = ['Name']
class Target_Entity_Decl(Entity_Decl):
"""
<target-entity-decl> = <object-name> [ ( <array-spec> ) ]
"""
subclass_names = []
use_names = ['Object_Name', 'Array_Spec']
@staticmethod
def match(string):
return Entity_Decl.match(string, target=True)
class Target_Stmt(StmtBase): # R546
"""
<target-stmt> = TARGET [ :: ] <target-entity-decl-list>
"""
subclass_names = []
use_names = ['Target_Entity_Decl_List']
@staticmethod
def match(string):
if string[:6].upper()!='TARGET': return
line = string[6:].lstrip()
if line.startswith('::'):
line = line[2:].lstrip()
return Target_Entity_Decl_List(line),
def tostr(self):
return 'TARGET :: %s' % (self.items[0])
class Value_Stmt(StmtBase, WORDClsBase): # R547
"""
<value-stmt> = VALUE [ :: ] <dummy-arg-name-list>
"""
subclass_names = []
use_names = ['Dummy_Arg_Name_List']
@staticmethod
def match(string):
return WORDClsBase.match('VALUE',Dummy_Arg_Name_List,string,check_colons=True, require_cls=True)
tostr = WORDClsBase.tostr_a
class Volatile_Stmt(StmtBase, WORDClsBase): # R548
"""
<volatile-stmt> = VOLATILE [ :: ] <object-name-list>
"""
subclass_names = []
use_names = ['Object_Name_List']
@staticmethod
def match(string):
return WORDClsBase.match('VOLATILE',Object_Name_List,string,check_colons=True, require_cls=True)
tostr = WORDClsBase.tostr_a
class Implicit_Stmt(StmtBase): # R549
"""
::
<implicit-stmt> = IMPLICIT <implicit-spec-list>
| IMPLICIT NONE
Attributes
----------
items : ({'NONE', Implicit_Spec_List},)
"""
subclass_names = []
use_names = ['Implicit_Spec_List']
@staticmethod
def match(string):
if string[:8].upper()!='IMPLICIT':
return
line = string[8:].lstrip()
if len(line)==4 and line.upper()=='NONE':
return 'NONE',
return Implicit_Spec_List(line),
for w,cls in [(pattern.abs_implicit_none, None),
('IMPLICIT', Implicit_Spec_List)]:
try:
obj = WORDClsBase.match(w, cls, string)
except NoMatchError:
obj = None
if obj is not None: return obj
return
def tostr(self):
return 'IMPLICIT %s' % (self.items[0])
class Implicit_Spec(CallBase): # R550
"""
<implicit-spec> = <declaration-type-spec> ( <letter-spec-list> )
"""
subclass_names = []
use_names = ['Declaration_Type_Spec', 'Letter_Spec_List']
def match(string):
if not string.endswith(')'): return
i = string.rfind('(')
if i==-1: return
s1 = string[:i].rstrip()
s2 = string[i+1:-1].strip()
if not s1 or not s2: return
return Declaration_Type_Spec(s1), Letter_Spec_List(s2)
match = staticmethod(match)
class Letter_Spec(Base): # R551
"""
<letter-spec> = <letter> [ - <letter> ]
"""
subclass_names = []
def match(string):
if len(string)==1:
lhs = string.upper()
if 'A'<=lhs<='Z': return lhs, None
return
if '-' not in string: return
lhs,rhs = string.split('-',1)
lhs = lhs.strip().upper()
rhs = rhs.strip().upper()
if not len(lhs)==len(rhs)==1: return
if not ('A'<=lhs<=rhs<='Z'): return
return lhs,rhs
match = staticmethod(match)
def tostr(self):
if self.items[1] is None: return str(self.items[0])
return '%s - %s' % tuple(self.items)
class Namelist_Stmt(StmtBase): # R552
"""
::
<namelist-stmt> = NAMELIST / <namelist-group-name> / <namelist-group-object-list> [ [ , ] / <namelist-group-name> / <namelist-group-object-list> ]...
Attributes
----------
items : (Namelist_Group_Name, Namelist_Group_Object_List)-tuple
"""
subclass_names = []
use_names = ['Namelist_Group_Name', 'Namelist_Group_Object_List']
@staticmethod
def match(string):
if string[:8].upper()!='NAMELIST':
return
line = string[8:].lstrip()
parts = line.split('/')
items = []
fst = parts.pop(0)
assert not fst,repr((fst, parts))
while len(parts)>=2:
name,lst = parts[:2]
del parts[:2]
name = name.strip()
lst = lst.strip()
if lst.endswith(','):
lst = lst[:-1].rstrip()
items.append((Namelist_Group_Name(name),Namelist_Group_Object_List(lst)))
assert not parts,repr(parts)
return tuple(items)
def tostr(self):
return 'NAMELIST ' + ', '.join('/%s/ %s' % (name_lst) for name_lst in self.items)
class Namelist_Group_Object(Base): # R553
"""
<namelist-group-object> = <variable-name>
"""
subclass_names = ['Variable_Name']
class Equivalence_Stmt(StmtBase, WORDClsBase): # R554
"""
<equivalence-stmt> = EQUIVALENCE <equivalence-set-list>
"""
subclass_names = []
use_names = ['Equivalence_Set_List']
def match(string): return WORDClsBase.match('EQUIVALENCE', Equivalence_Set_List, string)
match = staticmethod(match)
class Equivalence_Set(Base): # R555
"""
<equivalence-set> = ( <equivalence-object> , <equivalence-object-list> )
"""
subclass_names = []
use_names = ['Equivalence_Object', 'Equivalence_Object_List']
def match(string):
if not string or string[0]+string[-1]!='()': return
line = string[1:-1].strip()
if not line: return
l = Equivalence_Object_List(line)
obj = l.items[0]
l.items = l.items[1:]
if not l.items: return
return obj, l
match = staticmethod(match)
def tostr(self): return '(%s, %s)' % tuple(self.items)
class Equivalence_Object(Base): # R556
"""
<equivalence-object> = <variable-name>
| <array-element>
| <substring>
"""
subclass_names = ['Variable_Name', 'Array_Element', 'Substring']
class Common_Stmt(StmtBase): # R557
"""
<common-stmt> = COMMON [ / [ <common-block-name> ] / ] <common-block-object-list> [ [ , ] / [ <common-block-name> ] / <common-block-object-list> ]...
"""
subclass_names = []
use_names = ['Common_Block_Name', 'Common_Block_Object_List']
def match(string):
if string[:6].upper()!='COMMON': return
line = string[6:]
if not line or 'A'<=line[0].upper()<='Z' or line[0]=='_': return
line, repmap = string_replace_map(line.lstrip())
items = []
if line.startswith('/'):
i = line.find('/',1)
if i==-1: return
name = line[1:i].strip() or None
if name is not None: name = Common_Block_Name(name)
line = line[i+1:].lstrip()
i = line.find('/')
if i==-1:
lst = Common_Block_Object_List(repmap(line))
line = ''
else:
l = line[:i].rstrip()
if l.endswith(','): l = l[:-1].rstrip()
if not l: return
lst = Common_Block_Object_List(repmap(l))
line = line[i:].lstrip()
else:
name = None
i = line.find('/')
if i==-1:
lst = Common_Block_Object_List(repmap(line))
line = ''
else:
l = line[:i].rstrip()
if l.endswith(','): l = l[:-1].rstrip()
if not l: return
lst = Common_Block_Object_List(repmap(l))
line = line[i:].lstrip()
items.append((name, lst))
while line:
if line.startswith(','): line = line[1:].lstrip()
if not line.startswith('/'): return
i = line.find('/',1)
name = line[1:i].strip() or None
if name is not None: name = Common_Block_Name(name)
line = line[i+1:].lstrip()
i = line.find('/')
if i==-1:
lst = Common_Block_Object_List(repmap(line))
line = ''
else:
l = line[:i].rstrip()
if l.endswith(','): l = l[:-1].rstrip()
if not l: return
lst = Common_Block_Object_List(repmap(l))
line = line[i:].lstrip()
items.append((name, lst))
return items,
match = staticmethod(match)
def tostr(self):
s = 'COMMON'
for (name, lst) in self.items[0]:
if name is not None:
s += ' /%s/ %s' % (name, lst)
else:
s += ' // %s' % (lst)
return s
class Common_Block_Object(CallBase): # R558
"""
<common-block-object> = <variable-name> [ ( <explicit-shape-spec-list> ) ]
| <proc-pointer-name>
"""
subclass_names = ['Proc_Pointer_Name','Variable_Name']
use_names = ['Variable_Name', 'Explicit_Shape_Spec_List']
def match(string): return CallBase.match(Variable_Name, Explicit_Shape_Spec_List, string, require_rhs=True)
match = staticmethod(match)
###############################################################################
############################### SECTION 6 ####################################
###############################################################################
class Variable(Base): # R601
"""
<variable> = <designator>
"""
subclass_names = ['Designator']
class Variable_Name(Base): # R602
"""
<variable-name> = <name>
"""
subclass_names = ['Name']
class Designator(Base): # R603
"""
<designator> = <object-name>
| <array-element>
| <array-section>
| <structure-component>
| <substring>
<substring-range> = [ <scalar-int-expr> ] : [ <scalar-int-expr> ]
<structure-component> = <data-ref>
"""
subclass_names = ['Object_Name','Array_Section','Array_Element','Structure_Component',
'Substring'
]
class Logical_Variable(Base): # R604
"""
<logical-variable> = <variable>
"""
subclass_names = ['Variable']
class Default_Logical_Variable(Base): # R605
"""
<default-logical-variable> = <variable>
"""
subclass_names = ['Variable']
class Char_Variable(Base): # R606
"""
<char-variable> = <variable>
"""
subclass_names = ['Variable']
class Default_Char_Variable(Base): # R607
"""
<default-char-variable> = <variable>
"""
subclass_names = ['Variable']
class Int_Variable(Base): # R608
"""
<int-variable> = <variable>
"""
subclass_names = ['Variable']
class Substring(CallBase): # R609
"""
<substring> = <parent-string> ( <substring-range> )
"""
subclass_names = []
use_names = ['Parent_String','Substring_Range']
def match(string): return CallBase.match(Parent_String, Substring_Range, string, require_rhs=True)
match = staticmethod(match)
class Parent_String(Base): # R610
"""
<parent-string> = <scalar-variable-name>
| <array-element>
| <scalar-structure-component>
| <scalar-constant>
"""
subclass_names = ['Scalar_Variable_Name', 'Array_Element', 'Scalar_Structure_Component', 'Scalar_Constant']
class Substring_Range(SeparatorBase): # R611
"""
<substring-range> = [ <scalar-int-expr> ] : [ <scalar-int-expr> ]
"""
subclass_names = []
use_names = ['Scalar_Int_Expr']
@staticmethod
def match(string):
return SeparatorBase.match(Scalar_Int_Expr, Scalar_Int_Expr, string)
class Data_Ref(SequenceBase): # R612
"""
<data-ref> = <part-ref> [ % <part-ref> ]...
"""
subclass_names = ['Part_Ref']
use_names = []
def match(string): return SequenceBase.match(r'%', Part_Ref, string)
match = staticmethod(match)
class Part_Ref(CallBase): # R613
"""
<part-ref> = <part-name> [ ( <section-subscript-list> ) ]
"""
subclass_names = ['Part_Name']
use_names = ['Section_Subscript_List']
def match(string):
return CallBase.match(Part_Name, Section_Subscript_List, string, require_rhs=True)
match = staticmethod(match)
class Structure_Component(Base): # R614
"""
<structure-component> = <data-ref>
"""
subclass_names = ['Data_Ref']
class Type_Param_Inquiry(BinaryOpBase): # R615
"""
<type-param-inquiry> = <designator> % <type-param-name>
"""
subclass_names = []
use_names = ['Designator','Type_Param_Name']
def match(string):
return BinaryOpBase.match(\
Designator, pattern.percent_op.named(), Type_Param_Name, string)
match = staticmethod(match)
class Array_Element(Base): # R616
"""
<array-element> = <data-ref>
"""
subclass_names = ['Data_Ref']
class Array_Section(CallBase): # R617
"""
<array-section> = <data-ref> [ ( <substring-range> ) ]
"""
subclass_names = ['Data_Ref']
use_names = ['Substring_Range']
def match(string):
return CallBase.match(Data_Ref, Substring_Range, string, require_rhs=True)
match = staticmethod(match)
class Subscript(Base): # R618
"""
<subscript> = <scalar-int-expr>
"""
subclass_names = ['Scalar_Int_Expr']
class Section_Subscript(Base): # R619
"""
<section-subscript> = <subscript>
| <subscript-triplet>
| <vector-subscript>
"""
subclass_names = ['Subscript_Triplet', 'Vector_Subscript', 'Subscript']
class Subscript_Triplet(Base): # R620
"""
<subscript-triplet> = [ <subscript> ] : [ <subscript> ] [ : <stride> ]
"""
subclass_names = []
use_names = ['Subscript','Stride']
def match(string):
line, repmap = string_replace_map(string)
t = line.split(':')
if len(t)<=1 or len(t)>3: return
lhs_obj,rhs_obj, stride_obj = None, None, None
if len(t)==2:
lhs,rhs = t[0].rstrip(),t[1].lstrip()
else:
lhs,rhs,stride = t[0].rstrip(),t[1].strip(),t[2].lstrip()
if stride:
stride_obj = Stride(repmap(stride))
if lhs:
lhs_obj = Subscript(repmap(lhs))
if rhs:
rhs_obj = Subscript(repmap(rhs))
return lhs_obj, rhs_obj, stride_obj
match = staticmethod(match)
def tostr(self):
s = ''
if self.items[0] is not None:
s += str(self.items[0]) + ' :'
else:
s += ':'
if self.items[1] is not None:
s += ' ' + str(self.items[1])
if self.items[2] is not None:
s += ' : ' + str(self.items[2])
return s
class Stride(Base): # R621
"""
<stride> = <scalar-int-expr>
"""
subclass_names = ['Scalar_Int_Expr']
class Vector_Subscript(Base): # R622
"""
<vector-subscript> = <int-expr>
"""
subclass_names = ['Int_Expr']
class Allocate_Stmt(StmtBase): # R623
"""
<allocate-stmt> = ALLOCATE ( [ <type-spec> :: ] <allocation-list> [ , <alloc-opt-list> ] )
"""
subclass_names = []
use_names = ['Type_Spec', 'Allocation_List', 'Alloc_Opt_List']
@staticmethod
def match(string):
if string[:8].upper() != 'ALLOCATE':
return
line = string[8:].lstrip()
if not line or line[0]!='(' or line[-1]!=')':
return
line, repmap = string_replace_map(line[1:-1].strip())
i = line.find('::')
spec = None
if i!=-1:
spec = Type_Spec(repmap(line[:i].rstrip()))
line = line[i+2:].lstrip()
i = line.find('=')
opts = None
if i!=-1:
j = line[:i].rfind(',')
assert j!=-1,repr((i,j,line))
opts = Alloc_Opt_List(repmap(line[j+1:].lstrip()))
line = line[:j].rstrip()
return spec, Allocation_List(repmap(line)), opts
def tostr(self):
spec, lst, opts = self.items
if spec is not None:
if opts is not None:
return 'ALLOCATE(%s::%s, %s)' % (spec, lst, opts)
else:
return 'ALLOCATE(%s::%s)' % (spec, lst)
elif opts is not None:
return 'ALLOCATE(%s, %s)' % (lst, opts)
else:
return 'ALLOCATE(%s)' % (lst)
class Alloc_Opt(KeywordValueBase):# R624
"""
<alloc-opt> = STAT = <stat-variable>
| ERRMSG = <errmsg-variable>
| SOURCE = <source-expr>
"""
subclass_names = []
use_names = ['Stat_Variable', 'Errmsg_Variable', 'Source_Expr']
def match(string):
for (k,v) in [('STAT', Stat_Variable),
('ERRMSG', Errmsg_Variable),
('SOURCE', Source_Expr)
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return
match = staticmethod(match)
class Stat_Variable(Base):# R625
"""
<stat-variable> = <scalar-int-variable>
"""
subclass_names = ['Scalar_Int_Variable']
class Errmsg_Variable(Base):# R626
"""
<errmsg-variable> = <scalar-default-char-variable>
"""
subclass_names = ['Scalar_Default_Char_Variable']
class Source_Expr(Base):# R627
"""
<source-expr> = <expr>
"""
subclass_names = ['Expr']
class Allocation(CallBase):# R628
"""
<allocation> = <allocate-object> [ ( <allocate-shape-spec-list> ) ]
| <variable-name>
"""
subclass_names = ['Variable_Name', 'Allocate_Object']
use_names = ['Allocate_Shape_Spec_List']
def match(string):
return CallBase.match(Allocate_Object, Allocate_Shape_Spec_List, string, require_rhs = True)
match = staticmethod(match)
class Allocate_Object(Base): # R629
"""
<allocate-object> = <variable-name>
| <structure-component>
"""
subclass_names = ['Variable_Name', 'Structure_Component']
class Allocate_Shape_Spec(SeparatorBase): # R630
"""
<allocate-shape-spec> = [ <lower-bound-expr> : ] <upper-bound-expr>
"""
subclass_names = []
use_names = ['Lower_Bound_Expr', 'Upper_Bound_Expr']
def match(string):
line, repmap = string_replace_map(string)
if ':' not in line: return None, Upper_Bound_Expr(string)
lower,upper = line.split(':',1)
lower = lower.rstrip()
upper = upper.lstrip()
if not upper: return
if not lower: return
return Lower_Bound_Expr(repmap(lower)), Upper_Bound_Expr(repmap(upper))
match = staticmethod(match)
def tostr(self):
if self.items[0] is None: return str(self.items[1])
return SeparatorBase.tostr(self)
class Lower_Bound_Expr(Base): # R631
"""
<lower-bound-expr> = <scalar-int-expr>
"""
subclass_names = ['Scalar_Int_Expr']
class Upper_Bound_Expr(Base): # R632
"""
<upper-bound-expr> = <scalar-int-expr>
"""
subclass_names = ['Scalar_Int_Expr']
class Nullify_Stmt(StmtBase, CALLBase): # R633
"""
<nullify-stmt> = NULLIFY ( <pointer-object-list> )
"""
subclass_names = []
use_names = ['Pointer_Object_List']
def match(string): return CALLBase.match('NULLIFY', Pointer_Object_List, string, require_rhs=True)
match = staticmethod(match)
class Pointer_Object(Base): # R634
"""
<pointer-object> = <variable-name>
| <structure-component>
| <proc-pointer-name>
"""
subclass_names = ['Variable_Name', 'Structure_Component', 'Proc_Pointer_Name']
class Deallocate_Stmt(StmtBase): # R635
"""
<deallocate-stmt> = DEALLOCATE ( <allocate-object-list> [ , <dealloc-opt-list> ] )
"""
subclass_names = []
use_names = ['Allocate_Object_List', 'Dealloc_Opt_List']
@staticmethod
def match(string):
if string[:10].upper()!='DEALLOCATE':
return
line = string[10:].lstrip()
if not line or line[0]!='(' or line[-1]!=')':
return
line, repmap = string_replace_map(line[1:-1].strip())
i = line.find('=')
opts = None
if i!=-1:
j = line[:i].rfind(',')
assert j!=-1,repr((i,j,line))
opts = Dealloc_Opt_List(repmap(line[j+1:].lstrip()))
line = line[:j].rstrip()
return Allocate_Object_List(repmap(line)), opts
def tostr(self):
if self.items[1] is not None:
return 'DEALLOCATE(%s, %s)' % (self.items)
return 'DEALLOCATE(%s)' % (self.items[0])
class Dealloc_Opt(KeywordValueBase): # R636
"""
<dealloc-opt> = STAT = <stat-variable>
| ERRMSG = <errmsg-variable>
"""
subclass_names = []
use_names = ['Stat_Variable', 'Errmsg_Variable']
def match(string):
for (k,v) in [('STAT', Stat_Variable),
('ERRMSG', Errmsg_Variable),
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return
match = staticmethod(match)
class Scalar_Char_Initialization_Expr(Base):
subclass_names = ['Char_Initialization_Expr']
###############################################################################
############################### SECTION 7 ####################################
###############################################################################
class Primary(Base): # R701
"""
<primary> = <constant>
| <designator>
| <array-constructor>
| <structure-constructor>
| <function-reference>
| <type-param-inquiry>
| <type-param-name>
| ( <expr> )
"""
subclass_names = ['Constant', 'Parenthesis', 'Designator','Array_Constructor',
'Structure_Constructor',
'Function_Reference', 'Type_Param_Inquiry', 'Type_Param_Name',
]
class Parenthesis(BracketBase): # R701.h
"""
<parenthesis> = ( <expr> )
"""
subclass_names = []
use_names = ['Expr']
def match(string): return BracketBase.match('()', Expr, string)
match = staticmethod(match)
class Level_1_Expr(UnaryOpBase): # R702
"""
<level-1-expr> = [ <defined-unary-op> ] <primary>
<defined-unary-op> = . <letter> [ <letter> ]... .
"""
subclass_names = ['Primary']
use_names = []
def match(string):
return UnaryOpBase.match(\
pattern.defined_unary_op.named(),Primary,string)
#exclude_op_pattern = pattern.non_defined_binary_op)
match = staticmethod(match)
class Defined_Unary_Op(STRINGBase): # R703
"""
<defined-unary-op> = . <letter> [ <letter> ]... .
"""
subclass_names = ['Defined_Op']
class Defined_Op(STRINGBase): # R703, 723
"""
<defined-op> = . <letter> [ <letter> ]... .
"""
subclass_names = []
def match(string):
if pattern.non_defined_binary_op.match(string):
raise NoMatchError('%s: %r' % (Defined_Unary_Op.__name__, string))
return STRINGBase.match(pattern.abs_defined_op, string)
match = staticmethod(match)
class Mult_Operand(BinaryOpBase): # R704
"""
<mult-operand> = <level-1-expr> [ <power-op> <mult-operand> ]
<power-op> = **
"""
subclass_names = ['Level_1_Expr']
use_names = ['Mult_Operand']
def match(string):
return BinaryOpBase.match(\
Level_1_Expr,pattern.power_op.named(),Mult_Operand,string,right=False)
match = staticmethod(match)
class Add_Operand(BinaryOpBase): # R705
"""
<add-operand> = [ <add-operand> <mult-op> ] <mult-operand>
<mult-op> = *
| /
"""
subclass_names = ['Mult_Operand']
use_names = ['Add_Operand','Mult_Operand']
def match(string):
return BinaryOpBase.match(Add_Operand,pattern.mult_op.named(),Mult_Operand,string)
match = staticmethod(match)
class Level_2_Expr(BinaryOpBase): # R706
"""
<level-2-expr> = [ [ <level-2-expr> ] <add-op> ] <add-operand>
<level-2-expr> = [ <level-2-expr> <add-op> ] <add-operand>
| <level-2-unary-expr>
<add-op> = +
| -
"""
subclass_names = ['Level_2_Unary_Expr']
use_names = ['Level_2_Expr']
def match(string):
return BinaryOpBase.match(\
Level_2_Expr,pattern.add_op.named(),Add_Operand,string, is_add=True)
match = staticmethod(match)
class Level_2_Unary_Expr(UnaryOpBase): # R706.c
"""
<level-2-unary-expr> = [ <add-op> ] <add-operand>
"""
subclass_names = ['Add_Operand']
use_names = []
def match(string): return UnaryOpBase.match(pattern.add_op.named(),Add_Operand,string)
match = staticmethod(match)
#R707: <power-op> = **
#R708: <mult-op> = * | /
#R709: <add-op> = + | -
class Level_3_Expr(BinaryOpBase): # R710
"""
<level-3-expr> = [ <level-3-expr> <concat-op> ] <level-2-expr>
<concat-op> = //
"""
subclass_names = ['Level_2_Expr']
use_names =['Level_3_Expr']
def match(string):
return BinaryOpBase.match(\
Level_3_Expr,pattern.concat_op.named(),Level_2_Expr,string)
match = staticmethod(match)
#R711: <concat-op> = //
class Level_4_Expr(BinaryOpBase): # R712
"""
<level-4-expr> = [ <level-3-expr> <rel-op> ] <level-3-expr>
<rel-op> = .EQ. | .NE. | .LT. | .LE. | .GT. | .GE. | == | /= | < | <= | > | >=
"""
subclass_names = ['Level_3_Expr']
use_names = []
def match(string):
return BinaryOpBase.match(\
Level_3_Expr,pattern.rel_op.named(),Level_3_Expr,string)
match = staticmethod(match)
#R713: <rel-op> = .EQ. | .NE. | .LT. | .LE. | .GT. | .GE. | == | /= | < | <= | > | >=
class And_Operand(UnaryOpBase): # R714
"""
<and-operand> = [ <not-op> ] <level-4-expr>
<not-op> = .NOT.
"""
subclass_names = ['Level_4_Expr']
use_names = []
def match(string):
return UnaryOpBase.match(\
pattern.not_op.named(),Level_4_Expr,string)
match = staticmethod(match)
class Or_Operand(BinaryOpBase): # R715
"""
<or-operand> = [ <or-operand> <and-op> ] <and-operand>
<and-op> = .AND.
"""
subclass_names = ['And_Operand']
use_names = ['Or_Operand','And_Operand']
@staticmethod
def match(string):
return BinaryOpBase.match(\
Or_Operand,pattern.and_op.named(),And_Operand,string)
class Equiv_Operand(BinaryOpBase): # R716
"""
<equiv-operand> = [ <equiv-operand> <or-op> ] <or-operand>
<or-op> = .OR.
"""
subclass_names = ['Or_Operand']
use_names = ['Equiv_Operand']
def match(string):
return BinaryOpBase.match(\
Equiv_Operand,pattern.or_op.named(),Or_Operand,string)
match = staticmethod(match)
class Level_5_Expr(BinaryOpBase): # R717
"""
<level-5-expr> = [ <level-5-expr> <equiv-op> ] <equiv-operand>
<equiv-op> = .EQV.
| .NEQV.
"""
subclass_names = ['Equiv_Operand']
use_names = ['Level_5_Expr']
def match(string):
return BinaryOpBase.match(\
Level_5_Expr,pattern.equiv_op.named(),Equiv_Operand,string)
match = staticmethod(match)
#R718: <not-op> = .NOT.
#R719: <and-op> = .AND.
#R720: <or-op> = .OR.
#R721: <equiv-op> = .EQV. | .NEQV.
class Expr(BinaryOpBase): # R722
"""
<expr> = [ <expr> <defined-binary-op> ] <level-5-expr>
<defined-binary-op> = . <letter> [ <letter> ]... .
"""
subclass_names = ['Level_5_Expr']
use_names = ['Expr']
def match(string):
return BinaryOpBase.match(Expr, pattern.defined_binary_op.named(), Level_5_Expr,
string, exclude_op_pattern = pattern.non_defined_binary_op)
match = staticmethod(match)
class Defined_Unary_Op(STRINGBase): # R723
"""
<defined-unary-op> = . <letter> [ <letter> ]... .
"""
subclass_names = ['Defined_Op']
class Logical_Expr(Base): # R724
"""
<logical-expr> = <expr>
"""
subclass_names = ['Expr']
class Char_Expr(Base): # R725
"""
<char-expr> = <expr>
"""
subclass_names = ['Expr']
class Default_Char_Expr(Base): # R726
"""
<default-char-expr> = <expr>
"""
subclass_names = ['Expr']
class Int_Expr(Base): # R727
"""
<int-expr> = <expr>
"""
subclass_names = ['Expr']
class Numeric_Expr(Base): # R728
"""
<numeric-expr> = <expr>
"""
subclass_names = ['Expr']
class Specification_Expr(Base): # R729
"""
<specification-expr> = <scalar-int-expr>
"""
subclass_names = ['Scalar_Int_Expr']
class Initialization_Expr(Base): # R730
"""
<initialization-expr> = <expr>
"""
subclass_names = ['Expr']
class Char_Initialization_Expr(Base): # R731
"""
<char-initialization-expr> = <char-expr>
"""
subclass_names = ['Char_Expr']
class Int_Initialization_Expr(Base): # R732
"""
<int-initialization-expr> = <int-expr>
"""
subclass_names = ['Int_Expr']
class Logical_Initialization_Expr(Base): # R733
"""
<logical-initialization-expr> = <logical-expr>
"""
subclass_names = ['Logical_Expr']
class Assignment_Stmt(StmtBase, BinaryOpBase): # R734
"""
<assignment-stmt> = <variable> = <expr>
"""
subclass_names = []
use_names = ['Variable', 'Expr']
@staticmethod
def match(string):
return BinaryOpBase.match(Variable, '=', Expr, string, right=False)
class Pointer_Assignment_Stmt(StmtBase): # R735
"""
<pointer-assignment-stmt> = <data-pointer-object> [ ( <bounds-spec-list> ) ] => <data-target>
| <data-pointer-object> ( <bounds-remapping-list> ) => <data-target>
| <proc-pointer-object> => <proc-target>
"""
subclass_names = []
use_names = ['Data_Pointer_Object', 'Bounds_Spec_List', 'Data_Target', 'Bounds_Remapping_List',
'Proc_Pointer_Object', 'Proc_Target']
@staticmethod
def match(string):
line, repmap = string_replace_map(string)
i = line.find('=>')
if i==-1: return
lhs = line[:i].rstrip()
rhs = repmap(line[i+2:].lstrip())
if lhs.endswith(')'):
i = lhs.rfind('(')
if i==-1: return
o = repmap(lhs[:i].rstrip())
l = repmap(lhs[i+1:-1].strip())
try:
return Data_Pointer_Object(o), Bounds_Spec_List(l), Data_Target(rhs)
except NoMatchError as msg:
return Data_Pointer_Object(o), Bounds_Remapping_List(l), Data_Target(rhs)
else:
lhs = repmap(lhs)
try:
return Data_Pointer_Object(lhs), None, Data_Target(rhs)
except NoMatchError as msg:
return Proc_Pointer_Object(lhs), None, Proc_Target(rhs)
def tostr(self):
if self.items[1] is None:
return '%s => %s' % (self.items[0], self.items[2])
return '%s(%s) => %s' % (self.items)
class Data_Pointer_Object(BinaryOpBase): # R736
"""
<data-pointer-object> = <variable-name>
| <variable> % <data-pointer-component-name>
"""
subclass_names = ['Variable_Name']
use_names = ['Variable', 'Data_Pointer_Component_Name']
@staticmethod
def match(string):
return BinaryOpBase.match(Variable, r'%', Data_Pointer_Component_Name, string)
class Bounds_Spec(SeparatorBase): # R737
"""
<bounds-spec> = <lower-bound-expr> :
"""
subclass_names = []
use_names = ['Lower_Bound_Expr']
def match(string): return SeparatorBase.match(Lower_Bound_Expr, None, string, require_lhs=True)
match = staticmethod(match)
class Bounds_Remapping(SeparatorBase): # R738
"""
<bounds-remapping> = <lower-bound-expr> : <upper-bound-expr>
"""
subclass_names = []
use_classes = ['Lower_Bound_Expr', 'Upper_Bound_Expr']
def match(string): return SeparatorBase.match(Lower_Bound_Expr, Upper_Bound_Expr, string, require_lhs=True, require_rhs=True)
match = staticmethod(match)
class Data_Target(Base): # R739
"""
<data-target> = <variable>
| <expr>
"""
subclass_names = ['Variable','Expr']
class Proc_Pointer_Object(Base): # R740
"""
<proc-pointer-object> = <proc-pointer-name>
| <proc-component-ref>
"""
subclass_names = ['Proc_Pointer_Name', 'Proc_Component_Ref']
class Proc_Component_Ref(BinaryOpBase): # R741
"""
<proc-component-ref> = <variable> % <procedure-component-name>
"""
subclass_names = []
use_names = ['Variable','Procedure_Component_Name']
def match(string):
return BinaryOpBase.match(Variable, r'%', Procedure_Component_Name, string)
match = staticmethod(match)
class Proc_Target(Base): # R742
"""
<proc-target> = <expr>
| <procedure-name>
| <proc-component-ref>
"""
subclass_names = ['Proc_Component_Ref', 'Procedure_Name', 'Expr']
class Where_Stmt(StmtBase): # R743
"""
<where-stmt> = WHERE ( <mask-expr> ) <where-assignment-stmt>
"""
subclass_names = []
use_names = ['Mask_Expr', 'Where_Assignment_Stmt']
def match(string):
if string[:5].upper()!='WHERE': return
line, repmap = string_replace_map(string[5:].lstrip())
if not line.startswith('('): return
i = line.find(')')
if i==-1: return
stmt = repmap(line[i+1:].lstrip())
if not stmt: return
expr = repmap(line[1:i].strip())
if not expr: return
return Mask_Expr(expr), Where_Assignment_Stmt(stmt)
match = staticmethod(match)
def tostr(self): return 'WHERE (%s) %s' % tuple(self.items)
class Where_Construct(BlockBase): # R744
"""
<where-construct> = <where-construct-stmt>
[ <where-body-construct> ]...
[ <masked-elsewhere-stmt>
[ <where-body-construct> ]...
]...
[ <elsewhere-stmt>
[ <where-body-construct> ]... ]
<end-where-stmt>
"""
subclass_names = []
use_names = ['Where_Construct_Stmt', 'Where_Body_Construct', 'Masked_Elsewhere_Stmt',
'Elsewhere_Stmt', 'End_Where_Stmt'
]
@staticmethod
def match(string):
return BlockBase.match(Where_Construct_Stmt, [Where_Body_Construct,
Masked_Elsewhere_Stmt,
Where_Body_Construct,
Elsewhere_Stmt,
Where_Body_Construct,
],
End_Where_Stmt, string,
match_names = True, # C730
match_name_classes = (Masked_Elsewhere_Stmt, Elsewhere_Stmt, End_Where_Stmt), # C730
enable_where_construct_hook = True)
def tofortran(self, tab='', isfix=None):
l = []
start = self.content[0]
end = self.content[-1]
l.append(start.tofortran(tab=tab,isfix=isfix))
for item in self.content[1:-1]:
if isinstance(item, (Masked_Elsewhere_Stmt, Elsewhere_Stmt)):
l.append(item.tofortran(tab=tab,isfix=isfix))
else:
l.append(item.tofortran(tab=tab+' ',isfix=isfix))
l.append(end.tofortran(tab=tab,isfix=isfix))
return '\n'.join(l)
class Where_Construct_Stmt(StmtBase): # R745
"""
<where-construct-stmt> = [ <where-construct-name> : ] WHERE ( <mask-expr> )
"""
subclass_names = []
use_names = ['Where_Construct_Name', 'Mask_Expr']
@staticmethod
def match(string):
if string[:5].upper()!='WHERE': return
line = string[5:].lstrip()
if not line: return
if line[0]+line[-1] != '()': return
line = line[1:-1].strip()
if not line: return
return Mask_Expr(line),
def tostr(self): return 'WHERE (%s)' % tuple(self.items)
def get_start_name(self):
return self.item.name
class Where_Body_Construct(Base): # R746
"""
::
<where-body-construct> = <where-assignment-stmt>
| <where-stmt>
| <where-construct>
"""
subclass_names = ['Where_Assignment_Stmt', 'Where_Stmt', 'Where_Construct']
class Where_Assignment_Stmt(Base): # R747
"""
::
<where-assignment-stmt> = <assignment-stmt>
"""
subclass_names = ['Assignment_Stmt']
class Mask_Expr(Base): # R748
"""
<mask-expr> = <logical-expr>
"""
subclass_names = ['Logical_Expr']
class Masked_Elsewhere_Stmt(StmtBase): # R749
"""
<masked-elsewhere-stmt> = ELSEWHERE ( <mask-expr> ) [ <where-construct-name> ]
"""
subclass_names = []
use_names = ['Mask_Expr', 'Where_Construct_Name']
@staticmethod
def match(string):
if string[:9].upper()!='ELSEWHERE': return
line = string[9:].lstrip()
if not line.startswith('('): return
i = line.rfind(')')
if i==-1: return
expr = line[1:i].strip()
if not expr: return
line = line[i+1:].rstrip()
if line:
return Mask_Expr(expr), Where_Construct_Name(line)
return Mask_Expr(expr), None
def tostr(self):
if self.items[1] is None: return 'ELSEWHERE(%s)' % (self.items[0])
return 'ELSEWHERE(%s) %s' % self.items
def get_end_name(self):
name = self.items[1]
if name is not None:
return name.string
class Elsewhere_Stmt(StmtBase, WORDClsBase): # R750
"""
<elsewhere-stmt> = ELSEWHERE [ <where-construct-name> ]
"""
subclass_names = []
use_names = ['Where_Construct_Name']
@staticmethod
def match(string):
return WORDClsBase.match('ELSEWHERE', Where_Construct_Name, string)
def get_end_name(self):
name = self.items[1]
if name is not None:
return name.string
class End_Where_Stmt(EndStmtBase): # R751
"""
<end-where-stmt> = END WHERE [ <where-construct-name> ]
"""
subclass_names = []
use_names = ['Where_Construct_Name']
@staticmethod
def match(string):
return EndStmtBase.match('WHERE',Where_Construct_Name, string, require_stmt_type=True)
class Forall_Construct(BlockBase): # R752
"""
<forall-construct> = <forall-construct-stmt>
[ <forall-body-construct> ]...
<end-forall-stmt>
"""
subclass_names = []
use_names = ['Forall_Construct_Stmt', 'Forall_Body_Construct', 'End_Forall_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Forall_Construct_Stmt, [Forall_Body_Construct], End_Forall_Stmt, reader,
match_names = True, # C732
)
class Forall_Construct_Stmt(StmtBase, WORDClsBase): # R753
"""
<forall-construct-stmt> = [ <forall-construct-name> : ] FORALL <forall-header>
"""
subclass_names = []
use_names = ['Forall_Construct_Name', 'Forall_Header']
@staticmethod
def match(string):
return WORDClsBase.match('FORALL', Forall_Header, string, require_cls = True)
def get_start_name(self):
return self.item.name
class Forall_Header(Base): # R754
"""
<forall-header> = ( <forall-triplet-spec-list> [ , <scalar-mask-expr> ] )
"""
subclass_names = []
use_names = ['Forall_Triplet_Spec_List', 'Scalar_Mask_Expr']
@staticmethod
def match(string):
if not string or string[0]+string[-1]!='()': return
line, repmap = string_replace_map(string[1:-1].strip())
lst = line.rsplit(',', 1)
if len(lst)!=2: return
if ':' not in lst[1]:
return Forall_Triplet_Spec_List(repmap(lst[0].rstrip())), Scalar_Mask_Expr(repmap(lst[1].lstrip()))
return Forall_Triplet_Spec_List(repmap(line)), None
def tostr(self):
if self.items[1] is None:
return '(%s)' % (self.items[0])
return '(%s, %s)' % (self.items)
class Forall_Triplet_Spec(Base): # R755
"""
<forall-triplet-spec> = <index-name> = <subscript> : <subscript> [ : <stride> ]
"""
subclass_names = []
use_names = ['Index_Name', 'Subscript', 'Stride']
@staticmethod
def match(string):
line, repmap = string_replace_map(string)
i = line.find('=')
if i==-1: return
n = Index_Name(repmap(line[:i].rstrip()))
line = line[i+1:].lstrip()
s = [repmap(s.strip()) for s in line.split(':')]
if len(s)==2:
return n, Subscript(s[0]), Subscript(s[1]), None
if len(s)==3:
return n, Subscript(s[0]), Subscript(s[1]), Stride(s[2])
def tostr(self):
if self.items[3] is None:
return '%s = %s : %s' % (self.items[:3])
return '%s = %s : %s : %s' % (self.items)
class Forall_Body_Construct(Base): # R756
"""
<forall-body-construct> = <forall-assignment-stmt>
| <where-stmt>
| <where-construct>
| <forall-construct>
| <forall-stmt>
"""
subclass_names = ['Forall_Assignment_Stmt', 'Where_Stmt', 'Where_Construct',
'Forall_Construct', 'Forall_Stmt']
class Forall_Assignment_Stmt(Base): # R757
"""
<forall-assignment-stmt> = <assignment-stmt>
| <pointer-assignment-stmt>
"""
subclass_names = ['Assignment_Stmt', 'Pointer_Assignment_Stmt']
class End_Forall_Stmt(EndStmtBase): # R758
"""
<end-forall-stmt> = END FORALL [ <forall-construct-name> ]
"""
subclass_names = []
use_names = ['Forall_Construct_Name']
@staticmethod
def match(string):
return EndStmtBase.match('FORALL',Forall_Construct_Name, string, require_stmt_type=True)
class Forall_Stmt(StmtBase): # R759
"""
<forall-stmt> = FORALL <forall-header> <forall-assignment-stmt>
"""
subclass_names = []
use_names = ['Forall_Header', 'Forall_Assignment_Stmt']
@staticmethod
def match(string):
if string[:6].upper()!='FORALL': return
line, repmap = string_replace_map(string[6:].lstrip())
if not line.startswith(')'): return
i = line.find(')')
if i==-1: return
header = repmap(line[1:i].strip())
if not header: return
line = repmap(line[i+1:].lstrip())
if not line: return
return Forall_Header(header), Forall_Assignment_Stmt(line)
def tostr(self): return 'FORALL %s %s' % self.items
###############################################################################
############################### SECTION 8 ####################################
###############################################################################
class Block(BlockBase): # R801
"""
<block> = [ <execution-part-construct> ]...
"""
subclass_names = []
use_names = ['Execution_Part_Construct']
@staticmethod
def match(string): return BlockBase.match(None, [Execution_Part_Construct], None, string)
class If_Construct(BlockBase): # R802
"""
<if-construct> = <if-then-stmt>
<block>
[ <else-if-stmt>
<block>
]...
[ <else-stmt>
<block>
]
<end-if-stmt>
"""
subclass_names = []
use_names = ['If_Then_Stmt', 'Block', 'Else_If_Stmt', 'Else_Stmt', 'End_If_Stmt']
@staticmethod
def match(string):
return BlockBase.match(If_Then_Stmt, [Execution_Part_Construct,
Else_If_Stmt,
Execution_Part_Construct,
Else_Stmt,
Execution_Part_Construct],
End_If_Stmt, string,
match_names = True, # C801
match_name_classes = (Else_If_Stmt, Else_Stmt, End_If_Stmt),
enable_if_construct_hook = True)
def tofortran(self, tab='', isfix=None):
l = []
start = self.content[0]
end = self.content[-1]
l.append(start.tofortran(tab=tab,isfix=isfix))
for item in self.content[1:-1]:
if isinstance(item, (Else_If_Stmt, Else_Stmt)):
l.append(item.tofortran(tab=tab,isfix=isfix))
else:
l.append(item.tofortran(tab=tab+' ',isfix=isfix))
l.append(end.tofortran(tab=tab,isfix=isfix))
return '\n'.join(l)
class If_Then_Stmt(StmtBase): # R803
"""
<if-then-stmt> = [ <if-construct-name> : ] IF ( <scalar-logical-expr> ) THEN
"""
subclass_names = []
use_names = ['If_Construct_Name', 'Scalar_Logical_Expr']
@staticmethod
def match(string):
if string[:2].upper()!='IF': return
if string[-4:].upper()!='THEN': return
line = string[2:-4].strip()
if not line: return
if line[0]+line[-1]!='()': return
return Scalar_Logical_Expr(line[1:-1].strip()),
def tostr(self):
return 'IF (%s) THEN' % self.items
def get_start_name(self):
return self.item.name
class Else_If_Stmt(StmtBase): # R804
"""
<else-if-stmt> = ELSE IF ( <scalar-logical-expr> ) THEN [ <if-construct-name> ]
"""
subclass_names = []
use_names = ['Scalar_Logical_Expr', 'If_Construct_Name']
@staticmethod
def match(string):
if string[:4].upper()!='ELSE': return
line = string[4:].lstrip()
if line[:2].upper()!='IF': return
line = line[2:].lstrip()
if not line.startswith('('): return
i = line.rfind(')')
if i==-1: return
expr = line[1:i].strip()
line = line[i+1:].lstrip()
if line[:4].upper()!='THEN': return
line = line[4:].lstrip()
if line: return Scalar_Logical_Expr(expr), If_Construct_Name(line)
return Scalar_Logical_Expr(expr), None
def tostr(self):
if self.items[1] is None:
return 'ELSE IF (%s) THEN' % (self.items[0])
return 'ELSE IF (%s) THEN %s' % self.items
def get_end_name(self):
name = self.items[1]
if name is not None:
return name.string
class Else_Stmt(StmtBase): # R805
"""
<else-stmt> = ELSE [ <if-construct-name> ]
"""
subclass_names = []
use_names = ['If_Construct_Name']
@staticmethod
def match(string):
if string[:4].upper()!='ELSE': return
line = string[4:].lstrip()
if line: return If_Construct_Name(line),
return None,
def tostr(self):
if self.items[0] is None:
return 'ELSE'
return 'ELSE %s' % self.items
def get_end_name(self):
name = self.items[0]
if name is not None:
return name.string
class End_If_Stmt(EndStmtBase): # R806
"""
<end-if-stmt> = END IF [ <if-construct-name> ]
"""
subclass_names = []
use_names = ['If_Construct_Name']
@staticmethod
def match(string):
return EndStmtBase.match('IF',If_Construct_Name, string, require_stmt_type=True)
class If_Stmt(StmtBase): # R807
"""
<if-stmt> = IF ( <scalar-logical-expr> ) <action-stmt>
"""
subclass_names = []
use_names = ['Scalar_Logical_Expr', 'Action_Stmt_C802']
@staticmethod
def match(string):
if string[:2].upper() != 'IF': return
line, repmap = string_replace_map(string)
line = line[2:].lstrip()
if not line.startswith('('): return
i = line.find(')')
if i==-1: return
expr = repmap(line[1:i].strip())
stmt = repmap(line[i+1:].lstrip())
return Scalar_Logical_Expr(expr), Action_Stmt_C802(stmt)
def tostr(self): return 'IF (%s) %s' % self.items
class Case_Construct(BlockBase): # R808
"""
<case-construct> = <select-case-stmt>
[ <case-stmt>
<block> == [<execution-part-construct>]..
]..
<end-select-stmt>
"""
subclass_names = []
use_names = ['Select_Case_Stmt', 'Case_Stmt', 'End_Select_Stmt', 'Execution_Part_Construct']
@staticmethod
def match(reader):
return BlockBase.match(Select_Case_Stmt, [Case_Stmt,
Execution_Part_Construct,
Case_Stmt],
End_Select_Stmt, reader,
match_names = True, # C803
enable_case_construct_hook = True # C803
)
def tofortran(self, tab='', isfix=None):
l = []
start = self.content[0]
end = self.content[-1]
l.append(start.tofortran(tab=tab,isfix=isfix))
for item in self.content[1:-1]:
if isinstance(item, Case_Stmt):
l.append(item.tofortran(tab=tab,isfix=isfix))
else:
l.append(item.tofortran(tab=tab+' ',isfix=isfix))
l.append(end.tofortran(tab=tab,isfix=isfix))
return '\n'.join(l)
class Select_Case_Stmt(StmtBase, CALLBase): # R809
"""
<select-case-stmt> = [ <case-construct-name> : ] SELECT CASE ( <case-expr> )
"""
subclass_names = []
use_names = ['Case_Construct_Name', 'Case_Expr']
@staticmethod
def match(string):
if string[:6].upper()!='SELECT': return
line = string[6:].lstrip()
if line[:4].upper()!='CASE': return
line = line[4:].lstrip()
if not line or line[0]+line[-1] != '()': return
line = line[1:-1].strip()
return Case_Expr(line),
def tostr(self):
return 'SELECT CASE (%s)' % (self.items[0])
def get_start_name(self):
return self.item.name
class Case_Stmt(StmtBase): # R810
"""
<case-stmt> = CASE <case-selector> [ <case-construct-name> ]
"""
subclass_names = []
use_names = ['Case_Selector', 'Case_Construct_Name']
@staticmethod
def match(string):
if string[:4].upper()!='CASE': return
line, repmap = string_replace_map(string[4:].lstrip())
if line.startswith('('):
i = line.find(')')
if i==-1: return
n = line[i+1:].lstrip() or None
if n:
n = Case_Construct_Name(repmap(n))
return Case_Selector(repmap(line[:i+1].rstrip())), n
if line[:7].upper()=='DEFAULT':
n = repmap(line[7:].lstrip()) or None
if n:
n = Case_Construct_Name(repmap(n))
return Case_Selector(line[:7]), n
def tostr(self):
if self.items[1] is None:
return 'CASE %s' % (self.items[0])
return 'CASE %s %s' % (self.items)
class End_Select_Stmt(EndStmtBase): # R811
"""
<end-select-stmt> = END SELECT [ <case-construct-name> ]
"""
subclass_names = []
use_names = ['Case_Construct_Name']
@staticmethod
def match(string):
return EndStmtBase.match('SELECT',Case_Construct_Name, string, require_stmt_type=True)
class Case_Expr(Base): # R812
"""
<case-expr> = <scalar-int-expr>
| <scalar-char-expr>
| <scalar-logical-expr>
"""
subclass_names = []
subclass_names = ['Scalar_Int_Expr', 'Scalar_Char_Expr', 'Scalar_Logical_Expr']
class Case_Selector(Base): # R813
"""
<case-selector> = ( <case-value-range-list> )
| DEFAULT
"""
subclass_names = []
use_names = ['Case_Value_Range_List']
@staticmethod
def match(string):
if len(string)==7 and string.upper()=='DEFAULT':
return None,
if not (string.startswith('(') and string.endswith(')')):
return
return Case_Value_Range_List(string[1:-1].strip()),
def tostr(self):
if self.items[0] is None:
return 'DEFAULT'
return '(%s)' % (self.items[0])
class Case_Value_Range(SeparatorBase): # R814
"""
<case-value-range> = <case-value>
| <case-value> :
| : <case-value>
| <case-value> : <case-value>
"""
subclass_names = ['Case_Value']
@staticmethod
def match(string):
return SeparatorBase.match(Case_Value, Case_Value, string)
class Case_Value(Base): # R815
"""
<case-value> = <scalar-int-initialization-expr>
| <scalar-char-initialization-expr>
| <scalar-logical-initialization-expr>
"""
subclass_names = ['Scalar_Int_Initialization_Expr', 'Scalar_Char_Initialization_Expr', 'Scalar_Logical_Initialization_Expr']
class Associate_Construct(BlockBase): # R816
"""
<associate-construct> = <associate-stmt>
<block> == [ <execution-part-construct> ]...
<end-associate-stmt>
"""
subclass_names = []
use_names = ['Associate_Stmt', 'Execution_Part_Construct', 'End_Associate_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Associate_Stmt, [Execution_Part_Construct], End_Associate_Stmt, reader,
match_names = True, # C810
)
class Associate_Stmt(StmtBase, CALLBase): # R817
"""
<associate-stmt> = [ <associate-construct-name> : ] ASSOCIATE ( <association-list> )
"""
subclass_names = []
use_names = ['Associate_Construct_Name', 'Association_List']
@staticmethod
def match(string):
return CALLBase.match('ASSOCIATE', Association_List, string)
def get_start_name(self):
return self.item.name
class Association(BinaryOpBase): # R818
"""
<association> = <associate-name> => <selector>
"""
subclass_names = []
use_names = ['Associate_Name', 'Selector']
@staticmethod
def match(string):
return BinaryOpBase.match(Associate_Name, '=>', Selector, string)
class Selector(Base): # R819
"""
<selector> = <expr>
| <variable>
"""
subclass_names = ['Expr', 'Variable']
class End_Associate_Stmt(EndStmtBase): # R820
"""
<end-associate-stmt> = END ASSOCIATE [ <associate-construct-name> ]
"""
subclass_names = []
use_names = ['Associate_Construct_Name']
@staticmethod
def match(string):
return EndStmtBase.match('ASSOCIATE',Associate_Construct_Name, string, require_stmt_type=True)
class Select_Type_Construct(BlockBase): # R821
"""
<select-type-construct> = <select-type-stmt>
[ <type-guard-stmt>
<block> == [<execution-part-construct>]..
]...
<end-select-type-stmt>
"""
subclass_names = []
use_names = ['Select_Type_Stmt', 'Type_Guard_Stmt', 'Execution_Part_Construct', 'End_Select_Type_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Select_Type_Stmt, [Type_Guard_Stmt, Execution_Part_Construct, Type_Guard_Stmt], End_Select_Type_Stmt, reader,
match_names = True, # C819
enable_select_type_construct_hook = True)
class Select_Type_Stmt(StmtBase): # R822
"""
<select-type-stmt> = [ <select-construct-name> : ] SELECT TYPE ( [ <associate-name> => ] <selector> )
"""
subclass_names = []
use_names = ['Select_Construct_Name', 'Associate_Name', 'Selector']
@staticmethod
def match(string):
if string[:6].upper()!='SELECT': return
line = string[6:].lstrip()
if line[:4].upper()!='TYPE': return
line = line[4:].lstrip()
if not line or line[0]+line[-1] != '()': return
line = line[1:-1].strip()
i = line.find('=>')
if i!=-1:
return Associate_Name(line[:i].rstrip()), Selector(line[i+2:].lstrip())
return None, Selector(line)
def tostr(self):
if self.items[0] is None:
return 'SELECT TYPE(%s)' % (self.items[1])
return 'SELECT TYPE(%s=>%s)' % (self.items)
def get_start_name(self):
return self.item.name
class Type_Guard_Stmt(StmtBase): # R823
"""
::
<type-guard-stmt> = TYPE IS ( <type-spec> ) [ <select-construct-name> ]
| CLASS IS ( <type-spec> ) [ <select-construct-name> ]
| CLASS DEFAULT [ <select-construct-name> ]
Attributes
----------
items : ({'TYPE IS', 'CLASS IS', 'CLASS DEFAULT'}, Type_Spec, Select_Construct_Name)
"""
subclass_names = []
use_names = ['Type_Spec', 'Select_Construct_Name']
@staticmethod
def match(string):
if string[:4].upper()=='TYPE':
line = string[4:].lstrip()
if not line[:2].upper()=='IS': return
line = line[2:].lstrip()
kind = 'TYPE IS'
elif string[:5].upper()=='CLASS':
line = string[5:].lstrip()
if line[:2].upper()=='IS':
line = line[2:].lstrip()
kind = 'CLASS IS'
elif line[:7].upper()=='DEFAULT':
line = line[7:].lstrip()
if line:
if isalnum(line[0]): return
return 'CLASS DEFAULT', None, Select_Construct_Name(line)
return 'CLASS DEFAULT', None, None
else:
return
else:
return
if not line.startswith('('): return
i = line.rfind(')')
if i==-1: return
l = line[1:i].strip()
if not l: return
line = line[i+1:].lstrip()
if line:
return kind, Type_Spec(l), Select_Construct_Name(line)
return kind, Type_Spec(l), None
def tostr(self):
s = str(self.items[0])
if self.items[1] is not None:
s += ' (%s)' % (self.items[1])
if self.items[2] is not None:
s += ' %s' % (self.items[2])
return s
class End_Select_Type_Stmt(EndStmtBase): # R824
"""
<end-select-type-stmt> = END SELECT [ <select-construct-name> ]
"""
subclass_names = []
use_names = ['Select_Construct_Name']
@staticmethod
def match(string):
return EndStmtBase.match('SELECT',Select_Construct_Name, string, require_stmt_type=True)
class Do_Construct(Base): # R825
"""
<do-construct> = <block-do-construct>
| <nonblock-do-construct>
"""
subclass_names = ['Block_Do_Construct', 'Nonblock_Do_Construct']
class Block_Do_Construct(Base): # R826
"""
<block-do-construct> = <block-label-do-construct> | <block-nonlabel-do-construct>
"""
subclass_names = ['Block_Label_Do_Construct', 'Block_Nonlabel_Do_Construct']
class Block_Label_Do_Construct(BlockBase): # R826_1
"""
<block-label-do-construct> = <label-do-stmt>
[ <execution-part-construct> ]...
<end-do>
"""
subclass_names = []
use_names = ['Label_Do_Stmt', 'Execution_Part_Construct', 'End_Do']
@staticmethod
def match(reader):
return BlockBase.match(Label_Do_Stmt, [Execution_Part_Construct],
End_Do, reader,
match_labels=True, enable_do_label_construct_hook=True)
def tofortran(self, tab='', isfix=None):
l = []
start = self.content[0]
end = self.content[-1]
extra_tab = ' '
l.append(start.tofortran(tab=tab,isfix=isfix))
for item in self.content[1:-1]:
l.append(item.tofortran(tab=tab+extra_tab,isfix=isfix))
if len(self.content)>1:
l.append(end.tofortran(tab=tab,isfix=isfix))
return '\n'.join(l)
class Block_Nonlabel_Do_Construct(BlockBase): # R826_2
"""
<block-nonlabel-do-construct> = <nonlabel-do-stmt>
[ <execution-part-construct> ]...
<end-do-stmt>
"""
subclass_names = []
use_names = ['Nonlabel_Do_Stmt', 'Execution_Part_Construct', 'End_Do_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Nonlabel_Do_Stmt, [Execution_Part_Construct],
End_Do_Stmt, reader
)
class Do_Stmt(Base): # R827
"""
<do-stmt> = <label-do-stmt>
| <nonlabel-do-stmt>
"""
subclass_names = ['Label_Do_Stmt', 'Nonlabel_Do_Stmt']
class Label_Do_Stmt(StmtBase): # R828
"""
<label-do-stmt> = [ <do-construct-name> : ] DO <label> [ <loop-control> ]
"""
subclass_names = []
use_names = ['Do_Construct_Name', 'Label', 'Loop_Control']
@staticmethod
def match(string):
# do-construct-name is determined by reader
if string[:2].upper()!='DO': return
line = string[2:].lstrip()
m = pattern.label.match(line)
if m is None: return
label = m.group()
line = line[m.end():].lstrip()
if line:
return None, Label(label), Loop_Control(line)
return None, Label(label), None
def tostr(self):
name, label, loop_control = self.items
if name is None:
s = 'DO %s' % (label)
else:
s = '%s: DO %s' % (label)
if loop_control is not None:
s += ' %s' % (loop_control)
return s
def get_start_name(self):
return self.item.name
def get_start_label(self):
return int(self.items[1])
do_construct_name = property(lambda self: self.items[0])
label = property(lambda self: self.items[1])
loop_control = property(lambda self: self.items[2])
class Nonlabel_Do_Stmt(StmtBase, WORDClsBase): # R829
"""
<nonlabel-do-stmt> = [ <do-construct-name> : ] DO [ <loop-control> ]
"""
subclass_names = []
use_names = ['Do_Construct_Name', 'Loop_Control']
def match(string): return WORDClsBase.match('DO', Loop_Control, string)
match = staticmethod(match)
class Loop_Control(Base): # R830
"""
<loop-control> = [ , ] <do-variable> = <scalar-int-expr> , <scalar-int-expr> [ , <scalar-int-expr> ]
| [ , ] WHILE ( <scalar-logical-expr> )
"""
subclass_names = []
use_names = ['Do_Variable', 'Scalar_Int_Expr', 'Scalar_Logical_Expr']
def match(string):
if string.startswith(','):
line, repmap = string_replace_map(string[1:].lstrip())
else:
line, repmap = string_replace_map(string)
if line[:5].upper()=='WHILE' and line[5:].lstrip().startswith('('):
l = line[5:].lstrip()
i = l.find(')')
if i!=-1 and i==len(l)-1:
return Scalar_Logical_Expr(repmap(l[1:i].strip())),
if line.count('=')!=1: return
var,rhs = line.split('=')
rhs = [s.strip() for s in rhs.lstrip().split(',')]
if not 2<=len(rhs)<=3: return
return Variable(repmap(var.rstrip())),list(map(Scalar_Int_Expr, list(map(repmap,rhs))))
match = staticmethod(match)
def tostr(self):
if len(self.items)==1: return ', WHILE (%s)' % (self.items[0])
return ', %s = %s' % (self.items[0], ', '.join(map(str,self.items[1])))
class Do_Variable(Base): # R831
"""
<do-variable> = <scalar-int-variable>
"""
subclass_names = ['Scalar_Int_Variable']
class Do_Block(BlockBase): # R832
"""
<do-block> = [ <execution-part-construct> ]...
"""
subclass_names = ['Block']
subclass_names = []
use_names = ['Execution_Part_Construct']
@staticmethod
def match(string): return BlockBase.match(None, [Execution_Part_Construct], None, string)
class End_Do(Base): # R833
"""
<end-do> = <end-do-stmt>
| <continue-stmt>
"""
subclass_names = ['End_Do_Stmt', 'Continue_Stmt']
class End_Do_Stmt(EndStmtBase): # R834
"""
<end-do-stmt> = END DO [ <do-construct-name> ]
"""
subclass_names = []
use_names = ['Do_Construct_Name']
@staticmethod
def match(string):
return EndStmtBase.match('DO',Do_Construct_Name, string, require_stmt_type=True)
class Nonblock_Do_Construct(Base): # R835
"""
<nonblock-do-stmt> = <action-term-do-construct>
| <outer-shared-do-construct>
"""
subclass_names = ['Action_Term_Do_Construct', 'Outer_Shared_Do_Construct']
class Action_Term_Do_Construct(BlockBase): # R836
"""
::
<action-term-do-construct> = <label-do-stmt>
<do-body>
<do-term-action-stmt>
::
<action-term-do-construct> = <label-do-stmt>
[ <execution-part-construct> ]...
<do-term-action-stmt>
"""
subclass_names = []
use_names = ['Label_Do_Stmt', 'Execution_Part_Construct', 'Do_Term_Action_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Label_Do_Stmt, [Execution_Part_Construct],
Do_Term_Action_Stmt, reader,
match_labels=True, enable_do_label_construct_hook=True)
def tofortran(self, tab='', isfix=None):
l = []
start = self.content[0]
end = self.content[-1]
extra_tab = ' '
l.append(start.tofortran(tab=tab,isfix=isfix))
for item in self.content[1:-1]:
l.append(item.tofortran(tab=tab+extra_tab,isfix=isfix))
if isinstance(item, Label_Do_Stmt):
extra_tab += ' '
if len(self.content)>1:
l.append(end.tofortran(tab=tab,isfix=isfix))
return '\n'.join(l)
class Do_Body(BlockBase): # R837
"""
<do-body> = [ <execution-part-construct> ]...
"""
subclass_names = []
use_names = ['Execution_Part_Construct']
def match(string): return BlockBase.match(None, [Execution_Part_Construct], None, string)
match = staticmethod(match)
class Do_Term_Action_Stmt(StmtBase): # R838
"""
::
<do-term-action-stmt> = <action-stmt>
Notes
-----
C824: <do-term-action-stmt> shall not be <continue-stmt>, <goto-stmt>,
<return-stmt>, <stop-stmt>, <exit-stmt>, <cycle-stmt>,
<end-function-stmt>, <end-subroutine-stmt>, <end-program-stmt>,
<arithmetic-if-stmt>
"""
subclass_names = ['Action_Stmt_C824']
class Outer_Shared_Do_Construct(BlockBase): # R839
"""
<outer-shared-do-construct> = <label-do-stmt>
<do-body>
<shared-term-do-construct>
"""
subclass_names = []
use_names = ['Label_Do_Stmt', 'Do_Body', 'Shared_Term_Do_Construct']
def match(reader):
content = []
for cls in [Label_Do_Stmt, Do_Body, Shared_Term_Do_Construct]:
obj = cls(reader)
if obj is None: # todo: restore reader
return
content.append(obj)
return content,
match = staticmethod(match)
class Shared_Term_Do_Construct(Base): # R840
"""
<shared-term-do-construct> = <outer-shared-do-construct>
| <inner-shared-do-construct>
"""
subclass_names = ['Outer_Shared_Do_Construct', 'Inner_Shared_Do_Construct']
class Inner_Shared_Do_Construct(BlockBase): # R841
"""
<inner-shared-do-construct> = <label-do-stmt>
<do-body>
<do-term-shared-stmt>
"""
subclass_names = []
use_names = ['Label_Do_Stmt', 'Do_Body', 'Do_Term_Shared_Stmt']
def match(reader):
content = []
for cls in [Label_Do_Stmt, Do_Body, Do_Term_Shared_Stmt]:
obj = cls(reader)
if obj is None: # todo: restore reader
return
content.append(obj)
return content,
match = staticmethod(match)
class Do_Term_Shared_Stmt(StmtBase): # R842
"""
<do-term-shared-stmt> = <action-stmt>
C826: see C824 above.
"""
subclass_names = ['Action_Stmt']
class Cycle_Stmt(StmtBase, WORDClsBase): # R843
"""
<cycle-stmt> = CYCLE [ <do-construct-name> ]
"""
subclass_names = []
use_names = ['Do_Construct_Name']
def match(string): return WORDClsBase.match('CYCLE', Do_Construct_Name, string)
match = staticmethod(match)
class Exit_Stmt(StmtBase, WORDClsBase): # R844
"""
<exit-stmt> = EXIT [ <do-construct-name> ]
"""
subclass_names = []
use_names = ['Do_Construct_Name']
def match(string): return WORDClsBase.match('EXIT', Do_Construct_Name, string)
match = staticmethod(match)
class Goto_Stmt(StmtBase): # R845
"""
<goto-stmt> = GO TO <label>
"""
subclass_names = []
use_names = ['Label']
def match(string):
if string[:2].upper() != 'GO': return
line = string[2:].lstrip()
if line[:2].upper() != 'TO': return
return Label(line[2:].lstrip()),
match = staticmethod(match)
def tostr(self): return 'GO TO %s' % (self.items[0])
class Computed_Goto_Stmt(StmtBase): # R846
"""
<computed-goto-stmt> = GO TO ( <label-list> ) [ , ] <scalar-int-expr>
"""
subclass_names = []
use_names = ['Label_List', 'Scalar_Int_Expr']
def match(string):
if string[:2].upper()!='GO': return
line = string[2:].lstrip()
if line[:2].upper()!='TO': return
line = line[2:].lstrip()
if not line.startswith('('): return
i = line.find(')')
if i==-1: return
lst = line[1:i].strip()
if not lst: return
line = line[i+1:].lstrip()
if line.startswith(','):
line = line[1:].lstrip()
if not line: return
return Label_List(lst), Scalar_Int_Expr(line)
match = staticmethod(match)
def tostr(self): return 'GO TO (%s), %s' % self.items
class Arithmetic_If_Stmt(StmtBase): # R847
"""
<arithmetic-if-stmt> = IF ( <scalar-numeric-expr> ) <label> , <label> , <label>
"""
subclass_names = []
use_names = ['Scalar_Numeric_Expr', 'Label']
def match(string):
if string[:2].upper() != 'IF': return
line = string[2:].lstrip()
if not line.startswith('('): return
i = line.rfind(')')
if i==-1: return
labels = line[i+1:].lstrip().split(',')
if len(labels) != 3: return
labels = [Label(l.strip()) for l in labels]
return (Scalar_Numeric_Expr(line[1:i].strip()),) + tuple(labels)
match = staticmethod(match)
def tostr(self): return 'IF (%s) %s, %s, %s' % self.items
class Continue_Stmt(StmtBase, STRINGBase): # R848
"""
<continue-stmt> = CONTINUE
"""
subclass_names = []
def match(string): return STRINGBase.match('CONTINUE', string)
match = staticmethod(match)
def get_end_label(self):
return self.item.label
class Stop_Stmt(StmtBase, WORDClsBase): # R849
"""
<stop-stmt> = STOP [ <stop-code> ]
"""
subclass_names = []
use_names = ['Stop_Code']
def match(string): return WORDClsBase.match('STOP', Stop_Code, string)
match = staticmethod(match)
class Stop_Code(StringBase): # R850
"""
<stop-code> = <scalar-char-constant>
| <digit> [ <digit> [ <digit> [ <digit> [ <digit> ] ] ] ]
"""
subclass_names = ['Scalar_Char_Constant']
def match(string): return StringBase.match(pattern.abs_label, string)
match = staticmethod(match)
###############################################################################
############################### SECTION 9 ####################################
###############################################################################
class Io_Unit(StringBase): # R901
"""
<io-unit> = <file-unit-number>
| *
| <internal-file-variable>
"""
subclass_names = ['File_Unit_Number', 'Internal_File_Variable']
def match(string): return StringBase.match('*', string)
match = staticmethod(match)
class File_Unit_Number(Base): # R902
"""
<file-unit-number> = <scalar-int-expr>
"""
subclass_names = ['Scalar_Int_Expr']
class Internal_File_Variable(Base): # R903
"""
<internal-file-variable> = <char-variable>
C901: <char-variable> shall not be an array section with a vector subscript.
"""
subclass_names = ['Char_Variable']
class Open_Stmt(StmtBase, CALLBase): # R904
"""
<open-stmt> = OPEN ( <connect-spec-list> )
"""
subclass_names = []
use_names = ['Connect_Spec_List']
@staticmethod
def match(string):
return CALLBase.match('OPEN', Connect_Spec_List, string, require_rhs=True)
class Connect_Spec(KeywordValueBase): # R905
"""
<connect-spec> = [ UNIT = ] <file-unit-number>
| ACCESS = <scalar-default-char-expr>
| ACTION = <scalar-default-char-expr>
| ASYNCHRONOUS = <scalar-default-char-expr>
| BLANK = <scalar-default-char-expr>
| DECIMAL = <scalar-default-char-expr>
| DELIM = <scalar-default-char-expr>
| ENCODING = <scalar-default-char-expr>
| ERR = <label>
| FILE = <file-name-expr>
| FORM = <scalar-default-char-expr>
| IOMSG = <iomsg-variable>
| IOSTAT = <scalar-int-variable>
| PAD = <scalar-default-char-expr>
| POSITION = <scalar-default-char-expr>
| RECL = <scalar-int-expr>
| ROUND = <scalar-default-char-expr>
| SIGN = <scalar-default-char-expr>
| STATUS = <scalar-default-char-expr>
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Scalar_Default_Char_Expr', 'Label', 'File_Name_Expr', 'Iomsg_Variable',
'Scalar_Int_Expr', 'Scalar_Int_Variable']
def match(string):
for (k,v) in [\
(['ACCESS','ACTION','ASYNCHRONOUS','BLANK','DECIMAL','DELIM','ENCODING',
'FORM','PAD','POSITION','ROUND','SIGN','STATUS'], Scalar_Default_Char_Expr),
('ERR', Label),
('FILE',File_Name_Expr),
('IOSTAT', Scalar_Int_Variable),
('IOMSG', Iomsg_Variable),
('RECL', Scalar_Int_Expr),
('UNIT', File_Unit_Number),
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return 'UNIT', File_Unit_Number
match = staticmethod(match)
class File_Name_Expr(Base): # R906
"""
<file-name-expr> = <scalar-default-char-expr>
"""
subclass_names = ['Scalar_Default_Char_Expr']
class Iomsg_Variable(Base): # R907
"""
<iomsg-variable> = <scalar-default-char-variable>
"""
subclass_names = ['Scalar_Default_Char_Variable']
class Close_Stmt(StmtBase, CALLBase): # R908
"""
<close-stmt> = CLOSE ( <close-spec-list> )
"""
subclass_names = []
use_names = ['Close_Spec_List']
def match(string):
return CALLBase.match('CLOSE', Close_Spec_List, string, require_rhs=True)
match = staticmethod(match)
class Close_Spec(KeywordValueBase): # R909
"""
<close-spec> = [ UNIT = ] <file-unit-number>
| IOSTAT = <scalar-int-variable>
| IOMSG = <iomsg-variable>
| ERR = <label>
| STATUS = <scalar-default-char-expr>
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Scalar_Default_Char_Expr', 'Label', 'Iomsg_Variable',
'Scalar_Int_Variable']
def match(string):
for (k,v) in [\
('ERR', Label),
('IOSTAT', Scalar_Int_Variable),
('IOMSG', Iomsg_Variable),
('STATUS', Scalar_Default_Char_Expr),
('UNIT', File_Unit_Number),
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return 'UNIT', File_Unit_Number(string)
match = staticmethod(match)
class Read_Stmt(StmtBase): # R910
"""
:F03R:`910`::
<read-stmt> = READ ( <io-control-spec-list> ) [ <input-item-list> ]
| READ <format> [ , <input-item-list> ]
Attributes
----------
items : (Io_Control_Spec_List, Format, Input_Item_List)
"""
subclass_names = []
use_names = ['Io_Control_Spec_List', 'Input_Item_List', 'Format']
@staticmethod
def match(string):
if string[:4].upper()!='READ': return
line = string[4:].lstrip()
if line.startswith('('):
line, repmap = string_replace_map(line)
i = line.find(')')
if i==-1: return
l = line[1:i].strip()
if not l: return
if i==len(line)-1:
return Io_Control_Spec_List(repmap(l)),None,None
return Io_Control_Spec_List(repmap(l)), None, Input_Item_List(repmap(line[i+1:].lstrip()))
if not line: return
c = line[0].upper()
if 'A'<=c<='Z' or c=='_' or '0'<=c<='9': return
line, repmap = string_replace_map(line.lstrip())
i = line.find(',')
if i==-1: return Format(repmap(line)),None,None
l = repmap(line[i+1:].lstrip())
if not l: return
return None, Format(repmap(line[:i].rstrip())), Output_Item_List(l)
def tostr(self):
if self.items[0] is not None:
assert self.items[1] is None,repr(self.items)
if self.items[2] is None:
return 'READ(%s)' % (self.items[0])
return 'READ(%s) %s' % (self.items[0], self.items[2])
assert self.items[1] is not None, repr(self.items)
if self.items[2] is None:
return 'READ %s' % (self.items[1])
return 'READ %s, %s' % (self.items[1], self.items[2])
class Write_Stmt(StmtBase): # R911
"""
:F03R:`911`::
<write-stmt> = WRITE ( <io-control-spec-list> ) [ <output-item-list> ]
Parameters
----------
items : (Io_Control_Spec_List, Output_Item_List)
"""
subclass_names = []
use_names = ['Io_Control_Spec_List', 'Output_Item_List']
def match(string):
if string[:5].upper()!='WRITE': return
line = string[5:].lstrip()
if not line.startswith('('): return
line, repmap = string_replace_map(line)
i = line.find(')')
if i==-1: return
l = line[1:i].strip()
if not l: return
l = repmap(l)
if i==len(line)-1:
return Io_Control_Spec_List(l),None
return Io_Control_Spec_List(l), Output_Item_List(repmap(line[i+1:].lstrip()))
match = staticmethod(match)
def tostr(self):
if self.items[1] is None: return 'WRITE(%s)' % (self.items[0])
return 'WRITE(%s) %s' % tuple(self.items)
class Print_Stmt(StmtBase): # R912
"""
:F03R:`912`::
<print-stmt> = PRINT <format> [ , <output-item-list> ]
Parameters
----------
items : (Format, Output_Item_List)
"""
subclass_names = []
use_names = ['Format', 'Output_Item_List']
def match(string):
if string[:5].upper()!='PRINT': return
line = string[5:]
if not line: return
c = line[0].upper()
if 'A'<=c<='Z' or c=='_' or '0'<=c<='9': return
line, repmap = string_replace_map(line.lstrip())
i = line.find(',')
if i==-1: return Format(repmap(line)), None
l = repmap(line[i+1:].lstrip())
if not l: return
return Format(repmap(line[:i].rstrip())), Output_Item_List(l)
match = staticmethod(match)
def tostr(self):
if self.items[1] is None: return 'PRINT %s' % (self.items[0])
return 'PRINT %s, %s' % tuple(self.items)
class Io_Control_Spec_List(SequenceBase): # R913-list
"""
<io-control-spec-list> is a list taking into account C910, C917, C918
"""
subclass_names = []
use_names = ['Io_Control_Spec']
def match(string):
line, repmap = string_replace_map(string)
splitted = line.split(',')
if not splitted: return
lst = []
for i in range(len(splitted)):
p = splitted[i].strip()
if i==0:
if '=' not in p: p = 'UNIT=%s' % (repmap(p))
else: p = repmap(p)
elif i==1:
if '=' not in p:
p = repmap(p)
try:
f = Format(p)
# todo: make sure that f is char-expr, if not, raise NoMatchError
p = 'FMT=%s' % (Format(p))
except NoMatchError:
p = 'NML=%s' % (Namelist_Group_Name(p))
else:
p = repmap(p)
else:
p = repmap(p)
lst.append(Io_Control_Spec(p))
return ',', tuple(lst)
match = staticmethod(match)
class Io_Control_Spec(KeywordValueBase): # R913
"""
<io-control-spec> = [ UNIT = ] <io-unit>
| [ FMT = ] <format>
| [ NML = ] <namelist-group-name>
| ADVANCE = <scalar-default-char-expr>
| ASYNCHRONOUS = <scalar-char-initialization-expr>
| BLANK = <scalar-default-char-expr>
| DECIMAL = <scalar-default-char-expr>
| DELIM = <scalar-default-char-expr>
| END = <label>
| EOR = <label>
| ERR = <label>
| ID = <scalar-int-variable>
| IOMSG = <iomsg-variable>
| IOSTAT = <scalar-int-variable>
| PAD = <scalar-default-char-expr>
| POS = <scalar-int-expr>
| REC = <scalar-int-expr>
| ROUND = <scalar-default-char-expr>
| SIGN = <scalar-default-char-expr>
| SIZE = <scalar-int-variable>
"""
subclass_names = []
use_names = ['Io_Unit', 'Format', 'Namelist_Group_Name', 'Scalar_Default_Char_Expr',
'Scalar_Char_Initialization_Expr', 'Label', 'Scalar_Int_Variable',
'Iomsg_Variable', 'Scalar_Int_Expr']
def match(string):
for (k,v) in [\
(['ADVANCE', 'BLANK', 'DECIMAL', 'DELIM', 'PAD', 'ROUND', 'SIGN'], Scalar_Default_Char_Expr),
('ASYNCHRONOUS', Scalar_Char_Initialization_Expr),
(['END','EOR','ERR'], Label),
(['ID','IOSTAT','SIZE'], Scalar_Int_Variable),
('IOMSG', Iomsg_Variable),
(['POS', 'REC'], Scalar_Int_Expr),
('UNIT', Io_Unit),
('FMT', Format),
('NML', Namelist_Group_Name)
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return
match = staticmethod(match)
class Format(StringBase): # R914
"""
<format> = <default-char-expr>
| <label>
| *
"""
subclass_names = ['Label', 'Default_Char_Expr']
def match(string): return StringBase.match('*', string)
match = staticmethod(match)
class Input_Item(Base): # R915
"""
<input-item> = <variable>
| <io-implied-do>
"""
subclass_names = ['Variable', 'Io_Implied_Do']
class Output_Item(Base): # R916
"""
<output-item> = <expr>
| <io-implied-do>
"""
subclass_names = ['Expr', 'Io_Implied_Do']
class Io_Implied_Do(Base): # R917
"""
<io-implied-do> = ( <io-implied-do-object-list> , <io-implied-do-control> )
"""
subclass_names = []
use_names = ['Io_Implied_Do_Object_List', 'Io_Implied_Do_Control']
@staticmethod
def match(string):
if len(string)<=9 or string[0]!='(' or string[-1]!=')':
return
line, repmap = string_replace_map(string[1:-1].strip())
i = line.rfind('=')
if i==-1:
return
j = line[:i].rfind(',')
if j==-1:
return
return Io_Implied_Do_Object_List(repmap(line[:j].rstrip())), Io_Implied_Do_Control(repmap(line[j+1:].lstrip()))
def tostr(self):
return '(%s, %s)' % (self.items)
class Io_Implied_Do_Object(Base): # R918
"""
<io-implied-do-object> = <input-item>
| <output-item>
"""
subclass_names = ['Input_Item', 'Output_Item']
class Io_Implied_Do_Control(Base): # R919
"""
<io-implied-do-control> = <do-variable> = <scalar-int-expr> , <scalar-int-expr> [ , <scalar-int-expr> ]
"""
subclass_names = []
use_names = ['Do_Variable', 'Scalar_Int_Expr']
@staticmethod
def match(string):
line, repmap = string_replace_map(string)
if '=' not in line:
return
v, exprs = line.split('=',1)
v = Do_Variable(repmap(v.rstrip()))
exprs = exprs.lstrip().split(',')
if len(exprs) not in [2,3]: return
exprs = tuple([Scalar_Int_Expr(repmap(e.strip())) for e in exprs])
if len(exprs)==2:
return (v,)+exprs+(None,)
return (v,)+exprs
def tostr(self):
if self.items[3] is not None:
return '%s = %s, %s, %s' % (self.items)
return '%s = %s, %s' % (self.items[:-1])
class Dtv_Type_Spec(CALLBase): # R920
"""
<dtv-type-spec> = TYPE ( <derived-type-spec> )
| CLASS ( <derived-type-spec> )
"""
subclass_names = []
use_names = ['Derived_Type_Spec']
@staticmethod
def match(string):
return CALLBase.match(['TYPE', 'CLASS'], Derived_Type_Spec, string, require_rhs=True)
class Wait_Stmt(StmtBase, CALLBase): # R921
"""
<wait-stmt> = WAIT ( <wait-spec-list> )
"""
subclass_names = []
use_names = ['Wait_Spec_List']
@staticmethod
def match(string):
return CALLBase.match('WAIT', Wait_Spec_List, string, require_rhs=True)
class Wait_Spec(KeywordValueBase): # R922
"""
<wait-spec> = [ UNIT = ] <file-unit-number>
| END = <label>
| EOR = <label>
| ERR = <label>
| ID = <scalar-int-expr>
| IOMSG = <iomsg-variable>
| IOSTAT = <scalar-int-variable>
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Label', 'Scalar_Int_Expr', 'Iomsg_Variable', 'Scalar_Int_Variable']
@staticmethod
def match(string):
for (k,v) in [\
(['END','EOR','ERR'], Label),
('IOSTAT', Scalar_Int_Variable),
('IOMSG', Iomsg_Variable),
('ID', Scalar_Int_Expr),
('UNIT', File_Unit_Number),
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return 'UNIT', File_Unit_Number(string)
class Backspace_Stmt(StmtBase): # R923
"""
:F03R:`923`::
<backspace-stmt> = BACKSPACE <file-unit-number>
| BACKSPACE ( <position-spec-list> )
Attributes
----------
items : (File_Unit_Number, Position_Spec_List)
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Position_Spec_List']
@staticmethod
def match(string):
if string[:9].upper()!='BACKSPACE': return
line = string[9:].lstrip()
if line.startswith('('):
if not line.endswith(')'): return
return None, Position_Spec_List(line[1:-1].strip())
return File_Unit_Number(line), None
def tostr(self):
if self.items[0] is not None:
assert self.items[1] is None, repr(self.items)
return 'BACKSPACE %s' % (self.items[0])
return 'BACKSPACE(%s)' % (self.items[1])
class Endfile_Stmt(StmtBase): # R924
"""
:F03R:`924`::
<endfile-stmt> = ENDFILE <file-unit-number>
| ENDFILE ( <position-spec-list> )
Attributes
----------
items : (File_Unit_Number, Position_Spec_List)
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Position_Spec_List']
@staticmethod
def match(string):
if string[:7].upper()!='ENDFILE': return
line = string[7:].lstrip()
if line.startswith('('):
if not line.endswith(')'): return
return None, Position_Spec_List(line[1:-1].strip())
return File_Unit_Number(line), None
def tostr(self):
if self.items[0] is not None:
assert self.items[1] is None, repr(self.items)
return 'ENDFILE %s' % (self.items[0])
return 'ENDFILE(%s)' % (self.items[1])
class Rewind_Stmt(StmtBase): # R925
"""
:F03R:`925`::
<rewind-stmt> = REWIND <file-unit-number>
| REWIND ( <position-spec-list> )
Attributes
----------
items : (File_Unit_Number, Position_Spec_List)
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Position_Spec_List']
@staticmethod
def match(string):
if string[:6].upper()!='REWIND': return
line = string[6:].lstrip()
if line.startswith('('):
if not line.endswith(')'): return
return None, Position_Spec_List(line[1:-1].strip())
return File_Unit_Number(line), None
def tostr(self):
if self.items[0] is not None:
assert self.items[1] is None, repr(self.items)
return 'REWIND %s' % (self.items[0])
return 'REWIND(%s)' % (self.items[1])
class Position_Spec(KeywordValueBase): # R926
"""
<position-spec> = [ UNIT = ] <file-unit-number>
| IOMSG = <iomsg-variable>
| IOSTAT = <scalar-int-variable>
| ERR = <label>
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Iomsg_Variable', 'Scalar_Int_Variable', 'Label']
def match(string):
for (k,v) in [\
('ERR', Label),
('IOSTAT', Scalar_Int_Variable),
('IOMSG', Iomsg_Variable),
('UNIT', File_Unit_Number),
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return 'UNIT', File_Unit_Number(string)
match = staticmethod(match)
class Flush_Stmt(StmtBase): # R927
"""
:F03R:`927`::
<flush-stmt> = FLUSH <file-unit-number>
| FLUSH ( <position-spec-list> )
Attributes
----------
items : (File_Unit_Number, Position_Spec_List)
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Position_Spec_List']
@staticmethod
def match(string):
if string[:5].upper()!='FLUSH': return
line = string[5:].lstrip()
if line.startswith('('):
if not line.endswith(')'): return
return None, Position_Spec_List(line[1:-1].strip())
return File_Unit_Number(line), None
def tostr(self):
if self.items[0] is not None:
assert self.items[1] is None, repr(self.items)
return 'FLUSH %s' % (self.items[0])
return 'FLUSH(%s)' % (self.items[1])
class Flush_Spec(KeywordValueBase): # R928
"""
:F03R:`928`::
<flush-spec> = [ UNIT = ] <file-unit-number>
| IOMSG = <iomsg-variable>
| IOSTAT = <scalar-int-variable>
| ERR = <label>
Attributes
----------
items : ({'UNIT', 'IOMSG', 'IOSTAT', 'ERR'}, {File_Unit_Number, Iomsg_Variable, Scalar_Int_Variable, Label})
"""
subclass_names = []
use_names = ['File_Unit_Number', 'Iomsg_Variable', 'Scalar_Int_Variable', 'Label']
def match(string):
for (k,v) in [\
('ERR', Label),
('IOSTAT', Scalar_Int_Variable),
('IOMSG', Iomsg_Variable),
('UNIT', File_Unit_Number),
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return 'UNIT', File_Unit_Number(string)
match = staticmethod(match)
class Inquire_Stmt(StmtBase): # R929
"""
:F03R:`929`::
<inquire-stmt> = INQUIRE ( <inquire-spec-list> )
| INQUIRE ( IOLENGTH = <scalar-int-variable> ) <output-item-list>
Attributes
----------
items : (Inquire_Spec_List, Scalar_Int_Variable, Output_Item_List)
"""
subclass_names = []
use_names = ['Inquire_Spec_List', 'Scalar_Int_Variable', 'Output_Item_List']
@staticmethod
def match(string):
if string[:7].upper()!='INQUIRE': return
line = string[7:].lstrip()
if not line.startswith('('): return
if line.endswith(')'):
return Inquire_Spec_List(line[1:-1].strip()), None, None
line, repmap = string_replace_map(line)
i = line.find(')')
if i==-1: return
l = repmap(line[1:i])
if l[:8].upper()!='IOLENGTH': return
l = l[8:].lstrip()
if not l.startswith('='): return
l = l[1:].lstrip()
return None, Scalar_Int_Variable(l), Output_Item_List(repmap(line[i+1:].lstrip()))
def tostr(self):
if self.items[0] is None:
assert None not in self.items[1:],repr(self.items)
return 'INQUIRE(IOLENGTH=%s) %s' % (self.items[1:])
return 'INQUIRE(%s)' % (self.items[0])
class Inquire_Spec(KeywordValueBase): # R930
"""
:F03R:`930`::
<inquire-spec> = [ UNIT = ] <file-unit-number>
| FILE = <file-name-expr>
| ACCESS = <scalar-default-char-variable>
| ACTION = <scalar-default-char-variable>
| ASYNCHRONOUS = <scalar-default-char-variable>
| BLANK = <scalar-default-char-variable>
| DECIMAL = <scalar-default-char-variable>
| DELIM = <scalar-default-char-variable>
| DIRECT = <scalar-default-char-variable>
| ENCODING = <scalar-default-char-variable>
| ERR = <label>
| EXIST = <scalar-default-logical-variable>
| FORM = <scalar-default-char-variable>
| FORMATTED = <scalar-default-char-variable>
| ID = <scalar-int-expr>
| IOMSG = <iomsg-variable>
| IOSTAT = <scalar-int-variable>
| NAME = <scalar-default-char-variable>
| NAMED = <scalar-default-logical-variable>
| NEXTREC = <scalar-int-variable>
| NUMBER = <scalar-int-variable>
| OPENED = <scalar-default-logical-variable>
| PAD = <scalar-default-char-variable>
| PENDING = <scalar-default-logical-variable>
| POS = <scalar-int-variable>
| POSITION = <scalar-default-char-variable>
| READ = <scalar-default-char-variable>
| READWRITE = <scalar-default-char-variable>
| RECL = <scalar-int-variable>
| ROUND = <scalar-default-char-variable>
| SEQUENTIAL = <scalar-default-char-variable>
| SIGN = <scalar-default-char-variable>
| SIZE = <scalar-int-variable>
| STREAM = <scalar-default-char-variable>
| UNFORMATTED = <scalar-default-char-variable>
| WRITE = <scalar-default-char-variable>
Attributes
----------
items : (str, instance)
"""
subclass_names = []
use_names = ['File_Unit_Number', 'File_Name_Expr', 'Scalar_Default_Char_Variable',
'Scalar_Default_Logical_Variable', 'Scalar_Int_Variable', 'Scalar_Int_Expr',
'Label', 'Iomsg_Variable']
def match(string):
for (k,v) in [\
(['ACCESS','ACTION','ASYNCHRONOUS', 'BLANK', 'DECIMAL', 'DELIM',
'DIRECT','ENCODING','FORM','NAME','PAD', 'POSITION','READ','READWRITE',
'ROUND', 'SEQUENTIAL', 'SIGN','STREAM','UNFORMATTED','WRITE'],
Scalar_Default_Char_Variable),
('ERR', Label),
(['EXIST','NAMED','PENDING'], Scalar_Default_Logical_Variable),
('ID', Scalar_Int_Expr),
(['IOSTAT','NEXTREC','NUMBER','POS','RECL','SIZE'], Scalar_Int_Variable),
('IOMSG', Iomsg_Variable),
('FILE', File_Name_Expr),
('UNIT', File_Unit_Number),
]:
try:
obj = KeywordValueBase.match(k, v, string, upper_lhs = True)
except NoMatchError:
obj = None
if obj is not None: return obj
return 'UNIT', File_Unit_Number(string)
match = staticmethod(match)
###############################################################################
############################### SECTION 10 ####################################
###############################################################################
class Format_Stmt(StmtBase, WORDClsBase): # R1001
"""
<format-stmt> = FORMAT <format-specification>
"""
subclass_names = []
use_names = ['Format_Specification']
@staticmethod
def match(string):
return WORDClsBase.match('FORMAT', Format_Specification, string, require_cls=True)
class Format_Specification(BracketBase): # R1002
"""
<format-specification> = ( [ <format-item-list> ] )
"""
subclass_names = []
use_names = ['Format_Item_List']
@staticmethod
def match(string):
return BracketBase.match('()', Format_Item_List, string, require_cls=False)
class Format_Item_C1002(Base): # C1002
"""
::
<format-item-c1002> = <k>P [,] (F|D)<w>.<d> | (E|EN|ES|G)<w>.<d>[E<e>]
| [<r>]/ [,] <format-item>
| : [,] <format-item>
| <format-item> [,] / [[,] <format-item>]
| <format-item> [,] : [[,] <format-item>]
Attributes
----------
items : (Format_Item, Format_Item)
"""
subclass_names = []
use_names = ['K', 'W', 'D', 'E', 'Format_Item', 'R']
@staticmethod
def match(string):
if len(string)<=1: return
if string[0] in ':/':
return Control_Edit_Desc(string[0]), Format_Item(string[1:].lstrip())
if string[-1] in ':/':
return Format_Item(string[:-1].rstrip()), Control_Edit_Desc(string[-1])
line, repmap = string_replace_map(string)
i = 0
while i<len(line) and line[i].isdigit():
i += 1
if i:
p = line[i].upper()
if p in '/P':
return Control_Edit_Desc(repmap(line[:i+1])), Format_Item(repmap(line[i+1:].lstrip()))
for p in '/:':
if p in line:
l,r = line.split(p,1)
return Format_Item(repmap(l.rstrip())), Format_Item(p+repmap(r.lstrip()))
def tostr(self):
return '%s, %s' % (self.items)
class Format_Item(Base): # R1003
"""
<format-item> = [ <r> ] <data-edit-desc>
| <control-edit-desc>
| <char-string-edit-desc>
| [ <r> ] ( <format-item-list> )
| <format-item-c1002>
"""
subclass_names = ['Control_Edit_Desc', 'Char_String_Edit_Desc', 'Format_Item_C1002']
use_names = ['R', 'Format_Item_List','Data_Edit_Desc']
@staticmethod
def match(string):
i = 0
while i < len(string) and string[i].isdigit():
i += 1
rpart = None
if i:
rpart = R(string[:i])
string = string[i:].lstrip()
if not string:
return
if string[0]=='(' and string[-1]==')':
rest = Format_Item_List(string[1:-1].strip())
else:
rest = Data_Edit_Desc(string)
return rpart, rest
def tostr(self):
rpart, rest = self.items
if isinstance(rest, (Data_Edit_Desc, Data_Edit_Desc_C1002)):
if rpart is not None:
return '%s%s' % (rpart, rest)
return '%s' % (rest)
if rpart is not None:
return '%s(%s)' % (rpart, rest)
return '(%s)' % (rest)
class R(Base): # R1004
"""
::
<r> = <int-literal-constant>
Notes
-----
C1003, C1004: <r> shall be positive and without kind parameter specified.
"""
subclass_names = ['Digit_String']
class Data_Edit_Desc_C1002(Base):
"""
::
<data-edit-desc> = F <w> . <d>
| E <w> . <d> [ E <e> ]
| EN <w> . <d> [ E <e> ]
| ES <w> . <d> [ E <e>]
| G <w> . <d> [ E <e> ]
| D <w> . <d>
"""
subclass_names = []
use_names = ['W', 'D', 'E']
@staticmethod
def match(string):
c = string[0].upper()
if c in ['D']:
line = string[1:].lstrip()
if '.' in line:
i1,i2 = line.split('.',1)
i1 = i1.rstrip()
i2 = i2.lstrip()
return c, W(i1), M(i2), None
return c,W(line), None, None
if c in ['E','F','G']:
line = string[1:].lstrip()
if line.count('.')==1:
i1,i2 = line.split('.',1)
i1 = i1.rstrip()
i2 = i2.lstrip()
return c, W(i1), D(i2), None
elif line.count('.')==2:
i1,i2,i3 = line.split('.',2)
i1 = i1.rstrip()
i2 = i2.lstrip()
i3 = i3.lstrip()
return c, W(i1), D(i2), E(i3)
else:
return
c = string[:2].upper()
if len(c)!=2: return
if c in ['EN','ES']:
line = string[2:].lstrip()
if line.count('.')==1:
i1,i2 = line.split('.',1)
i1 = i1.rstrip()
i2 = i2.lstrip()
return c, W(i1), D(i2), None
elif line.count('.')==2:
i1,i2,i3 = line.split('.',2)
i1 = i1.rstrip()
i2 = i2.lstrip()
i3 = i3.lstrip()
return c, W(i1), D(i2), E(i3)
else:
return
return
def tostr(self):
c = self.items[0]
if c in ['F', 'D']:
if self.items[2] is None:
return '%s%s' % (c, self.items[1])
return '%s%s.%s' % (c, self.items[1], self.items[2])
if c in ['E', 'EN', 'ES', 'G']:
if self.items[3] is None:
return '%s%s.%s' % (c, self.items[1], self.items[2])
return '%s%s.%sE%s' % (c, self.items[1], self.items[2], self.items[3])
raise NotImpletenetedError(repr(c))
class Data_Edit_Desc(Base): # R1005
"""
::
<data-edit-desc> = I <w> [ . <m> ]
| B <w> [ . <m> ]
| O <w> [ . <m> ]
| Z <w> [ . <m> ]
| L <w>
| A [ <w> ]
| DT [ <char-literal-constant> ] [ ( <v-list> ) ]
| <data-edit-desc-c1002>
"""
subclass_names = ['Data_Edit_Desc_C1002']
use_names = ['W', 'M', 'Char_Literal_Constant', 'V_List']
@staticmethod
def match(string):
c = string[0].upper()
if c in ['I','B','O','Z']:
line = string[1:].lstrip()
if '.' in line:
i1,i2 = line.split('.',1)
i1 = i1.rstrip()
i2 = i2.lstrip()
return c, W(i1), M(i2), NoneInt_Literal_Constant
return c,W(line), None, None
if c=='L':
line = string[1:].lstrip()
if not line: return
return c, W(line), None, None
if c=='A':
line = string[1:].lstrip()
if not line:
return c, None, None, None
return c, W(line), None, None
c = string[:2].upper()
if len(c)!=2: return
if c=='DT':
line = string[2:].lstrip()
if not line:
return c, None, None, None
lst = None
if line.endswith(')'):
i = line.rfind('(')
if i==-1: return
l = line[i+1:-1].strip()
if not l: return
lst = V_List(l)
line = line[:i].rstrip()
if not line:
return c, None, lst, None
return c, Char_Literal_Constant(line), lst, None
return
def tostr(self):
c = self.items[0]
if c in ['I', 'B', 'O', 'Z','A', 'L']:
if self.items[2] is None:
if self.items[1] is None:
return c
return '%s%s' % (c, self.items[1])
return '%s%s.%s' % (c, self.items[1], self.items[2])
if c=='DT':
if self.items[1] is None:
if self.items[2] is None:
return c
else:
return '%s(%s)' % (c, self.items[2])
else:
if self.items[2] is None:
return '%s%s' % (c, self.items[1])
else:
return '%s%s(%s)' % (c, self.items[1], self.items[2])
raise NotImpletenetedError(repr(c))
class W(Base): # R1006
"""
::
<w> = <int-literal-constant> == <digit-string>
Notes
-----
C1006, C1007: <w> is zero or postive and without kind parameters.
"""
subclass_names = ['Digit_String']
class M(Base): # R1007
"""
::
<m> = <int-literal-constant>
Notes
-----
C1007: <w> is without kind parameters.
"""
subclass_names = ['Int_Literal_Constant']
class D(Base): # R1008
"""
::
<d> = <int-literal-constant>
Notes
-----
C1007: <d> is without kind parameters.
"""
subclass_names = ['Int_Literal_Constant']
class E(Base): # R1009
"""
::
<e> = <int-literal-constant>
Notes
-----
C1005, C1007: <e> is postive and without kind parameters.
"""
subclass_names = ['Digit_String']
class V(Base): # R1010
"""
::
<v> = <signed-int-literal-constant>
Notes
-----
C1007: <w> is without kind parameters.
"""
subclass_names = ['Signed_Int_Literal_Constant']
class Control_Edit_Desc(Base): # R1011
"""
::
<control-edit-desc> = <position-edit-desc>
| [ <r> ] /
| :
| <sign-edit-desc>
| <k> P
| <blank-interp-edit-desc>
| <round-edit-desc>
| <decimal-edit-desc>
| $
Note that `$` is not in Fortran 90 or newer standards.
Attributes
----------
items : ({R, K, None}, {'/', 'P', ':'})
"""
subclass_names = ['Position_Edit_Desc', 'Sign_Edit_Desc', 'Blank_Interp_Edit_Desc', 'Round_Edit_Desc',
'Decimal_Edit_Desc']
use_names = ['R', 'K']
@staticmethod
def match(string):
if len(string)==1 and string in '/:$':
if string=='$':
logger.debug('non-standard <control-edit-desc>: %r' % (string))
# print ('non-standard <control-edit-desc>: %r' % (string))
return None, string
if string[-1]=='/':
return R(string[:-1].rstrip()), '/'
if string[-1].upper()=='P':
return K(string[:-1].rstrip()), 'P'
def tostr(self):
if self.items[0] is not None:
return '%s%s' % (self.items)
return '%s' % (self.items[1])
class K(Base): # R1012
"""
::
<k> = <signed-int-literal-constant>
Notes
-----
C1009: <k> is without kind parameters.
"""
subclass_names = ['Signed_Int_Literal_Constant']
class Position_Edit_Desc(Base): # R1013
"""
<position-edit-desc> = T <n>
| TL <n>
| TR <n>
| <n> X
Attributes
----------
items : ({'T', 'TL', 'TR', N}, {N, 'X'})
"""
subclass_names = []
use_names = ['N']
@staticmethod
def match(string):
if len(string)<=1:
return
if string[0].upper()=='T':
if string[1].upper() in 'LR':
start = string[:2]
rest = string[2:].lstrip()
else:
start = string[1]
rest = string[1:].lstrip()
return start.upper(), N(rest)
if string[-1].upper()=='X':
return N(string[:-1].rstrip()), 'X'
def tostr(self):
return '%s%s' % (self.items)
class N(Base): # R1014
"""
::
<n> = <int-literal-constant> == <digit-string>
C1010, C1011: <n> is positive and without kind parameter.
"""
subclass_names = ['Digit_String']
class Sign_Edit_Desc(STRINGBase): # R1015
"""
<sign-edit-desc> = SS
| SP
| S
"""
subclass_names = []
def match(string): return STRINGBase.match(['SS','SP','S'], string)
match = staticmethod(match)
class Blank_Interp_Edit_Desc(STRINGBase): # R1016
"""
<blank-interp-edit-desc> = BN
| BZ
"""
subclass_names = []
def match(string): return STRINGBase.match(['BN','BZ',], string)
match = staticmethod(match)
class Round_Edit_Desc(STRINGBase): # R1017
"""
<round-edit-desc> = RU
| RD
| RZ
| RN
| RC
| RP
"""
subclass_names = []
def match(string): return STRINGBase.match(['RU','RD','RZ','RN','RC','RP'], string)
match = staticmethod(match)
class Decimal_Edit_Desc(STRINGBase): # R1018
"""
<decimal-edit-desc> = DC
| DP
"""
subclass_names = []
def match(string): return STRINGBase.match(['DC','DP'], string)
match = staticmethod(match)
class Char_String_Edit_Desc(Base): # R1019
"""
<char-string-edit-desc> = <char-literal-constant>
"""
subclass_names = ['Char_Literal_Constant']
###############################################################################
############################### SECTION 11 ####################################
###############################################################################
class Main_Program(BlockBase): # R1101
"""
<main-program> = <program-stmt>
[ <specification-part> ]
[ <execution-part> ]
[ <internal-subprogram-part> ]
<end-program-stmt>
"""
subclass_names = []
use_names = ['Program_Stmt', 'Specification_Part', 'Execution_Part', 'Internal_Subprogram_Part',
'End_Program_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Program_Stmt, [Specification_Part, Execution_Part, Internal_Subprogram_Part], End_Program_Stmt, reader)
class Main_Program0(BlockBase):
"""
<main-program> =
[ <specification-part> ]
[ <execution-part> ]
[ <internal-subprogram-part> ]
<end-program-stmt>
"""
subclass_names = []
use_names = ['Program_Stmt', 'Specification_Part', 'Execution_Part', 'Internal_Subprogram_Part',
'End_Program_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(None, [Specification_Part, Execution_Part, Internal_Subprogram_Part], End_Program_Stmt, reader)
class Program_Stmt(StmtBase, WORDClsBase): # R1102
"""
<program-stmt> = PROGRAM <program-name>
"""
subclass_names = []
use_names = ['Program_Name']
def match(string): return WORDClsBase.match('PROGRAM',Program_Name, string, require_cls = True)
match = staticmethod(match)
def get_name(self): return self.items[1]
class End_Program_Stmt(EndStmtBase): # R1103
"""
<end-program-stmt> = END [ PROGRAM [ <program-name> ] ]
"""
subclass_names = []
use_names = ['Program_Name']
@staticmethod
def match(string): return EndStmtBase.match('PROGRAM',Program_Name, string)
class Module(BlockBase): # R1104
"""
<module> = <module-stmt>
[ <specification-part> ]
[ <module-subprogram-part> ]
<end-module-stmt>
"""
subclass_names = []
use_names = ['Module_Stmt', 'Specification_Part', 'Module_Subprogram_Part', 'End_Module_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Module_Stmt, [Specification_Part, Module_Subprogram_Part], End_Module_Stmt, reader)
class Module_Stmt(StmtBase, WORDClsBase): # R1105
"""
<module-stmt> = MODULE <module-name>
"""
subclass_names = []
use_names = ['Module_Name']
@staticmethod
def match(string):
return WORDClsBase.match('MODULE',Module_Name, string, require_cls = True)
def get_name(self): return self.items[1]
class End_Module_Stmt(EndStmtBase): # R1106
"""
<end-module-stmt> = END [ MODULE [ <module-name> ] ]
"""
subclass_names = []
use_names = ['Module_Name']
@staticmethod
def match(string):
return EndStmtBase.match('MODULE',Module_Name, string)
class Module_Subprogram_Part(BlockBase): # R1107
"""
<module-subprogram-part> = <contains-stmt>
<module-subprogram>
[ <module-subprogram> ]...
"""
subclass_names = []
use_names = ['Contains_Stmt', 'Module_Subprogram']
@staticmethod
def match(reader):
return BlockBase.match(Contains_Stmt, [Module_Subprogram], None, reader)
class Module_Subprogram(Base): # R1108
"""
<module-subprogram> = <function-subprogram>
| <subroutine-subprogram>
"""
subclass_names = ['Function_Subprogram', 'Subroutine_Subprogram']
class Use_Stmt(StmtBase): # R1109
"""
<use-stmt> = USE [ [ , <module-nature> ] :: ] <module-name> [ , <rename-list> ]
| USE [ [ , <module-nature> ] :: ] <module-name> , ONLY: [ <only-list> ]
"""
subclass_names = []
use_names = ['Module_Nature', 'Module_Name', 'Rename_List', 'Only_List']
def match(string):
if string[:3].upper() != 'USE': return
line = string[3:]
if not line: return
if isalnum(line[0]): return
line = line.lstrip()
i = line.find('::')
nature = None
if i!=-1:
if line.startswith(','):
l = line[1:i].strip()
if not l: return
nature = Module_Nature(l)
line = line[i+2:].lstrip()
if not line: return
i = line.find(',')
if i==-1: return nature, Module_Name(line), '', None
name = line[:i].rstrip()
if not name: return
name = Module_Name(name)
line = line[i+1:].lstrip()
if not line: return
if line[:5].upper()=='ONLY:':
line = line[5:].lstrip()
if not line:
return nature, name, ', ONLY:', None
return nature, name, ', ONLY:', Only_List(line)
return nature, name, ',', Rename_List(line)
match = staticmethod(match)
def tostr(self):
s = 'USE'
if self.items[0] is not None:
s += ', %s' % (self.items[0])
s += ' :: %s%s' % (self.items[1], self.items[2])
if self.items[3] is not None:
s += ' %s' % (self.items[3])
return s
class Module_Nature(STRINGBase): # R1110
"""
<module-nature> = INTRINSIC
| NON_INTRINSIC
"""
subclass_names = []
def match(string): return STRINGBase.match(['INTRINSIC','NON_INTRINSIC'], string)
match = staticmethod(match)
class Rename(Base): # R1111
"""
<rename> = <local-name> => <use-name>
| OPERATOR(<local-defined-operator>) => OPERATOR(<use-defined-operator>)
"""
subclass_names = []
use_names = ['Local_Name', 'Use_Name', 'Local_Defined_Operator', 'Use_Defined_Operator']
def match(string):
s = string.split('=>', 1)
if len(s) != 2: return
lhs, rhs = s[0].rstrip(), s[1].lstrip()
if not lhs or not rhs: return
if lhs[:8].upper()=='OPERATOR' and rhs[:8].upper()=='OPERATOR':
l = lhs[8:].lstrip()
r = rhs[8:].lstrip()
if l and r and l[0]+l[-1]=='()':
if r[0]+r[-1] != '()': return
l = l[1:-1].strip()
r = r[1:-1].strip()
if not l or not r: return
return 'OPERATOR', Local_Defined_Operator(l), Use_Defined_Operator(r)
return None, Local_Name(lhs), Use_Name(rhs)
match = staticmethod(match)
def tostr(self):
if not self.items[0]:
return '%s => %s' % self.items[1:]
return '%s(%s) => %s(%s)' % (self.items[0], self.items[1],self.items[0], self.items[2])
class Only(Base): # R1112
"""
<only> = <generic-spec>
| <only-use-name>
| <rename>
"""
subclass_names = ['Generic_Spec', 'Only_Use_Name', 'Rename']
class Only_Use_Name(Base): # R1113
"""
<only-use-name> = <name>
"""
subclass_names = ['Name']
class Local_Defined_Operator(Base): # R1114
"""
<local-defined-operator> = <defined-unary-op>
| <defined-binary-op>
"""
subclass_names = ['Defined_Unary_Op', 'Defined_Binary_Op']
class Use_Defined_Operator(Base): # R1115
"""
<use-defined-operator> = <defined-unary-op>
| <defined-binary-op>
"""
subclass_names = ['Defined_Unary_Op', 'Defined_Binary_Op']
class Block_Data(BlockBase): # R1116
"""
::
<block-data> = <block-data-stmt>
[ <specification-part> ]
<end-block-data-stmt>
"""
subclass_names = []
use_names = ['Block_Data_Stmt', 'Specification_Part', 'End_Block_Data_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Block_Data_Stmt, [Specification_Part], End_Block_Data_Stmt, reader)
class Block_Data_Stmt(StmtBase): # R1117
"""
::
<block-data-stmt> = BLOCK DATA [ <block-data-name> ]
"""
subclass_names = []
use_names = ['Block_Data_Name']
@staticmethod
def match(string):
if string[:5].upper()!='BLOCK': return
line = string[5:].lstrip()
if line[:4].upper()!='DATA': return
line = line[4:].lstrip()
if not line: return None,
return Block_Data_Name(line),
def tostr(self):
if self.items[0] is None: return 'BLOCK DATA'
return 'BLOCK DATA %s' % self.items
def get_name(self):
return self.items[0]
class End_Block_Data_Stmt(EndStmtBase): # R1118
"""
::
<end-block-data-stmt> = END [ BLOCK DATA [ <block-data-name> ] ]
"""
subclass_names = []
use_names = ['Block_Data_Name']
@staticmethod
def match(string):
return EndStmtBase.match('BLOCK DATA',Block_Data_Name, string)
###############################################################################
############################### SECTION 12 ####################################
###############################################################################
class Interface_Block(BlockBase): # R1201
"""
::
<interface-block> = <interface-stmt>
[ <interface-specification> ]...
<end-interface-stmt>
"""
subclass_names = []
use_names = ['Interface_Stmt', 'Interface_Specification', 'End_Interface_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Interface_Stmt, [Interface_Specification], End_Interface_Stmt, reader)
class Interface_Specification(Base): # R1202
"""
<interface-specification> = <interface-body>
| <procedure-stmt>
"""
subclass_names = ['Interface_Body', 'Procedure_Stmt']
class Interface_Stmt(StmtBase): # R1203
"""
::
<interface-stmt> = INTERFACE [ <generic-spec> ]
| ABSTRACT INTERFACE
Attributes
----------
items : ({Generic_Spec, 'ABSTRACT'},)
"""
subclass_names = []
use_names = ['Generic_Spec']
@staticmethod
def match(string):
if string[:9].upper()=='INTERFACE':
line = string[9:].strip()
if not line:
return None,
return Generic_Spec(line),
if string[:8].upper()=='ABSTRACT':
line = string[8:].strip()
if line.upper()=='INTERFACE':
return 'ABSTRACT',
def tostr(self):
if self.items[0]=='ABSTRACT':
return 'ABSTRACT INTERFACE'
if self.items[0] is None:
return 'INTERFACE'
return 'INTERFACE %s' % (self.items[0])
class End_Interface_Stmt(EndStmtBase): # R1204
"""
::
<end-interface-stmt> = END INTERFACE [ <generic-spec> ]
Attributes
----------
items : (Generic_Spec, )
"""
subclass_names = []
use_names = ['Generic_Spec']
def match(string): return EndStmtBase.match('INTERFACE',Generic_Spec, string, require_stmt_type=True)
match = staticmethod(match)
class Function_Body(BlockBase):
"""
::
<function-body> = <function-stmt>
[ <specification-part> ]
<end-function-stmt>
"""
subclass_names = []
use_names = ['Function_Stmt', 'Specification_Part', 'End_Function_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Function_Stmt, [Specification_Part], End_Function_Stmt, reader)
class Subroutine_Body(BlockBase):
"""
::
<subroutine-body> = <subroutine-stmt>
[ <specification-part> ]
<end-subroutine-stmt>
"""
subclass_names = []
use_names = ['Subroutine_Stmt', 'Specification_Part', 'End_Subroutine_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Subroutine_Stmt, [Specification_Part], End_Subroutine_Stmt, reader)
class Interface_Body(Base): # R1205
"""
::
<interface-body> = <function-body> | <subroutine-body>
See also
--------
Function_Body, Subroutine_Body
"""
subclass_names = ['Function_Body', 'Subroutine_Body']
use_names = []
class Procedure_Stmt(StmtBase): # R1206
"""
::
<procedure-stmt> = [ MODULE ] PROCEDURE <procedure-name-list>
Attributes
----------
items : (Procedure_Name_List, )
"""
subclass_names = []
use_names = ['Procedure_Name_List']
@staticmethod
def match(string):
if string[:6].upper()=='MODULE':
line = string[6:].lstrip()
else:
line = string
if line[:9].upper()!='PROCEDURE':
return
line = line[9:].lstrip()
return Procedure_Name_List(line),
def tostr(self):
return 'MODULE PROCEDURE %s' % (self.items[0])
class Generic_Spec(Base): # R1207
"""
::
<generic-spec> = <generic-name>
| OPERATOR ( <defined-operator> )
| ASSIGNMENT ( = )
| <dtio-generic-spec>
Attributes
----------
items : ({'OPERATOR', 'ASSIGNMENT'}, {Defined_Operator, '='})
"""
subclass_names = ['Generic_Name', 'Dtio_Generic_Spec']
use_names = ['Defined_Operator']
@staticmethod
def match(string):
if string[:8].upper()=='OPERATOR':
line = string[8:].lstrip()
if not line or line[0]!='(' or line[-1]!=')': return
return 'OPERATOR', Defined_Operator(line[1:-1].strip())
if string[:10].upper()=='ASSIGNMENT':
line = string[10:].lstrip()
if not line or line[0]!='(' or line[-1]!=')': return
if line[1:-1].strip()=='=':
return 'ASSIGNMENT', '='
def tostr(self):
return '%s(%s)' % (self.items)
class Dtio_Generic_Spec(Base): # R1208
"""
::
<dtio-generic-spec> = READ ( FORMATTED )
| READ ( UNFORMATTED )
| WRITE ( FORMATTED )
| WRITE ( UNFORMATTED )
Attributes
----------
items : (str, )
"""
subclass_names = []
@staticmethod
def match(string):
for rw in ['READ', 'WRITE']:
if string[:len(rw)].upper()==rw:
line = string[len(rw):].lstrip()
if not line: return
if line[0]!='(' or line[-1]!=')': return
line = line[1:-1].strip().upper()
if line in ['FORMATTED', 'UNFORMATTED']:
return '%s(%s)' % (rw, line),
def tostr(self):
return '%s' % (self.items[0])
class Import_Stmt(StmtBase, WORDClsBase): # R1209
"""
<import-stmt> = IMPORT [ :: ] <import-name-list>
"""
subclass_names = []
use_names = ['Import_Name_List']
@staticmethod
def match(string):
return WORDClsBase.match('IMPORT',Import_Name_List,string,check_colons=True, require_cls=True)
tostr = WORDClsBase.tostr_a
class External_Stmt(StmtBase, WORDClsBase): # R1210
"""
<external-stmt> = EXTERNAL [ :: ] <external-name-list>
"""
subclass_names = []
use_names = ['External_Name_List']
def match(string): return WORDClsBase.match('EXTERNAL',External_Name_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Procedure_Declaration_Stmt(StmtBase): # R1211
"""
::
<procedure-declaration-stmt> = PROCEDURE ( [ <proc-interface> ] ) [ [ , <proc-attr-spec> ]... :: ] <proc-decl-list>
Attributes
----------
items : (Proc_Interface, Proc_Attr_Spec_List, Proc_Decl_List)
"""
subclass_names = []
use_names = ['Proc_Interface', 'Proc_Attr_Spec_List', 'Proc_Decl_List']
@staticmethod
def match(string):
if string[:9].upper()!='PROCEDURE':
return
line = string[9:].lstrip()
if not line.startswith('('): return
line, repmap = string_replace_map(line)
i = line.find(')')
if i==-1: return
l = line[1:i].strip()
proc_interface = Proc_Interface(repmap(l)) if l else None
line = line[i+1:].lstrip()
i = line.find('::')
proc_attr_spec_list = None
if i!=-1:
l = line[:i].rstrip()
if l and l[0]==',':
proc_attr_spec_list = Proc_Attr_Spec_List(repmap(l[1:].lstrip()))
line = line[i+2:].lstrip()
return proc_interface, proc_attr_spec_list, Proc_Decl_List(repmap(line))
def tostr(self):
r = 'PROCEDURE'
if self.items[0] is not None:
r += '(%s)' % (self.items[0])
else:
r += '()'
if self.items[1] is not None:
r += ', %s ::' % (self.items[1])
return '%s %s' % (r, self.items[2])
class Proc_Interface(Base): # R1212
"""
<proc-interface> = <interface-name>
| <declaration-type-spec>
"""
subclass_names = ['Interface_Name', 'Declaration_Type_Spec']
class Proc_Attr_Spec(Base): # R1213
"""
<proc-attr-spec> = <access-spec>
| <proc-language-binding-spec>
| INTENT ( <intent-spec> )
| OPTIONAL
| SAVE
Attributes
----------
items : ({'INTENT', 'OPTIONAL', 'SAVE'}, Intent_Spec)
"""
subclass_names = ['Access_Spec', 'Proc_Language_Binding_Spec']
use_names = ['Intent_Spec']
@staticmethod
def match(string):
if string[:6].upper()=='INTENT':
line = string[6:].lstrip()
if not line: return
if line[0]!='(' or line[-1]!=')': return
return 'INTENT', Intent_Spec(line[1:-1].strip())
if len(string)==8 and string.upper()=='OPTIONAL':
return 'OPTIONAL', None
if len(string)==4 and string.upper()=='SAVE':
return 'SAVE', None
def tostr(self):
if self.items[1] is None:
return '%s' % (self.items[0])
return '%s(%s)' % (self.items)
class Proc_Decl(BinaryOpBase): # R1214
"""
::
<proc-decl> = <procedure-entity-name> [ => <null-init> ]
Attributes
----------
items : (Procedure_Entity_Name, Null_Init)
"""
subclass_names = ['Procedure_Entity_Name']
use_names = ['Null_Init']
def match(string): return BinaryOpBase.match(Procedure_Entity_Name,'=>', Null_Init, string)
match = staticmethod(match)
class Interface_Name(Base): # R1215
"""
<interface-name> = <name>
"""
subclass_names = ['Name']
class Intrinsic_Stmt(StmtBase, WORDClsBase): # R1216
"""
<intrinsic-stmt> = INTRINSIC [ :: ] <intrinsic-procedure-name-list>
"""
subclass_names = []
use_names = ['Intrinsic_Procedure_Name_List']
def match(string):
return WORDClsBase.match('INTRINSIC',Intrinsic_Procedure_Name_List,string,check_colons=True, require_cls=True)
match = staticmethod(match)
tostr = WORDClsBase.tostr_a
class Function_Reference(CallBase): # R1217
"""
<function-reference> = <procedure-designator> ( [ <actual-arg-spec-list> ] )
"""
subclass_names = []
use_names = ['Procedure_Designator','Actual_Arg_Spec_List']
def match(string):
return CallBase.match(Procedure_Designator, Actual_Arg_Spec_List, string)
match = staticmethod(match)
class Call_Stmt(StmtBase): # R1218
"""
<call-stmt> = CALL <procedure-designator> [ ( [ <actual-arg-spec-list> ] ) ]
Attributes
----------
items : (Procedure_Designator, Actual_Arg_Spec_List)
"""
subclass_names = []
use_names = ['Procedure_Designator', 'Actual_Arg_Spec_List']
def match(string):
if string[:4].upper()!='CALL': return
line, repmap = string_replace_map(string[4:].lstrip())
if line.endswith(')'):
i = line.rfind('(')
if i==-1: return
args = repmap(line[i+1:-1].strip())
if args:
return Procedure_Designator(repmap(line[:i].rstrip())),Actual_Arg_Spec_List(args)
return Procedure_Designator(repmap(line[:i].rstrip())),None
return Procedure_Designator(string[4:].lstrip()),None
match = staticmethod(match)
def tostr(self):
if self.items[1] is None: return 'CALL %s' % (self.items[0])
return 'CALL %s(%s)' % self.items
class Procedure_Designator(BinaryOpBase): # R1219
"""
<procedure-designator> = <procedure-name>
| <proc-component-ref>
| <data-ref> % <binding-name>
"""
subclass_names = ['Procedure_Name','Proc_Component_Ref']
use_names = ['Data_Ref','Binding_Name']
def match(string):
return BinaryOpBase.match(\
Data_Ref, pattern.percent_op.named(), Binding_Name, string)
match = staticmethod(match)
class Actual_Arg_Spec(KeywordValueBase): # R1220
"""
<actual-arg-spec> = [ <keyword> = ] <actual-arg>
"""
subclass_names = ['Actual_Arg']
use_names = ['Keyword']
@staticmethod
def match(string):
return KeywordValueBase.match(Keyword, Actual_Arg, string)
class Actual_Arg(Base): # R1221
"""
<actual-arg> = <expr>
| <variable>
| <procedure-name>
| <proc-component-ref>
| <alt-return-spec>
"""
subclass_names = ['Procedure_Name','Proc_Component_Ref','Alt_Return_Spec', 'Variable', 'Expr']
class Alt_Return_Spec(Base): # R1222
"""
<alt-return-spec> = * <label>
"""
subclass_names = []
use_names = ['Label']
def match(string):
if not string.startswith('*'): return
line = string[1:].lstrip()
if not line: return
return Label(line),
match = staticmethod(match)
def tostr(self): return '*%s' % (self.items[0])
class Function_Subprogram(BlockBase): # R1223
"""
<function-subprogram> = <function-stmt>
[ <specification-part> ]
[ <execution-part> ]
[ <internal-subprogram-part> ]
<end-function-stmt>
"""
subclass_names = []
use_names = ['Function_Stmt', 'Specification_Part', 'Execution_Part',
'Internal_Subprogram_Part', 'End_Function_Stmt']
@staticmethod
def match(reader):
return BlockBase.match(Function_Stmt, [Specification_Part, Execution_Part, Internal_Subprogram_Part], End_Function_Stmt, reader)
class Function_Stmt(StmtBase): # R1224
"""
<function-stmt> = [ <prefix> ] FUNCTION <function-name> ( [ <dummy-arg-name-list> ] ) [ <suffix> ]
"""
subclass_names = []
use_names = ['Prefix','Function_Name','Dummy_Arg_Name_List', 'Suffix']
@staticmethod
def match(string):
line, repmap = string_replace_map(string)
m = pattern.function.search(line)
if m is None:
return
prefix = line[:m.start()].rstrip() or None
if prefix is not None:
prefix = Prefix(repmap(prefix))
line = line[m.end():].lstrip()
m = pattern.name.match(line)
if m is None:
return
name = Function_Name(m.group())
line = line[m.end():].lstrip()
if not line.startswith('('):
return
i = line.find(')')
if i==-1:
return
dummy_args = line[1:i].strip() or None
if dummy_args is not None:
dummy_args = Dummy_Arg_List(repmap(dummy_args))
line = line[i+1:].lstrip()
suffix = None
if line:
suffix = Suffix(repmap(line))
return prefix, name, dummy_args, suffix
def tostr(self):
prefix, name, dummy_args, suffix = self.items
if prefix is not None:
s = '%s FUNCTION %s' % (prefix, name)
else:
s = 'FUNCTION %s' % (name)
if dummy_args is not None:
s += '(%s)' % (dummy_args)
else:
s += '()'
if suffix is not None:
s += ' %s' % (suffix)
return s
class Proc_Language_Binding_Spec(Base): #1225
"""
<proc-language-binding-spec> = <language-binding-spec>
"""
subclass_names = ['Language_Binding_Spec']
class Dummy_Arg_Name(Base): # R1226
"""
<dummy-arg-name> = <name>
"""
subclass_names = ['Name']
class Prefix(SequenceBase): # R1227
"""
<prefix> = <prefix-spec> [ <prefix-spec> ]..
"""
subclass_names = ['Prefix_Spec']
_separator = (' ',re.compile(r'\s+(?=[a-z_])',re.I))
def match(string): return SequenceBase.match(Prefix._separator, Prefix_Spec, string)
match = staticmethod(match)
class Prefix_Spec(STRINGBase): # R1228
"""
<prefix-spec> = <declaration-type-spec>
| RECURSIVE
| PURE
| ELEMENTAL
"""
subclass_names = ['Declaration_Type_Spec']
def match(string):
return STRINGBase.match(['RECURSIVE', 'PURE', 'ELEMENTAL'], string)
match = staticmethod(match)
class Suffix(Base): # R1229
"""
<suffix> = <proc-language-binding-spec> [ RESULT ( <result-name> ) ]
| RESULT ( <result-name> ) [ <proc-language-binding-spec> ]
"""
subclass_names = ['Proc_Language_Binding_Spec']
use_names = ['Result_Name']
def match(string):
if string[:6].upper()=='RESULT':
line = string[6:].lstrip()
if not line.startswith('('): return
i = line.find(')')
if i==-1: return
name = line[1:i].strip()
if not name: return
line = line[i+1:].lstrip()
if line: return Result_Name(name), Proc_Language_Binding_Spec(line)
return Result_Name(name), None
if not string.endswith(')'): return
i = string.rfind('(')
if i==-1: return
name = string[i+1:-1].strip()
if not name: return
line = string[:i].rstrip()
if line[-6:].upper()!='RESULT': return
line = line[:-6].rstrip()
if not line: return
return Result_Name(name), Proc_Language_Binding_Spec(line)
match = staticmethod(match)
def tostr(self):
if self.items[1] is None:
return 'RESULT(%s)' % (self.items[0])
return 'RESULT(%s) %s' % self.items
class End_Function_Stmt(EndStmtBase): # R1230
"""
<end-function-stmt> = END [ FUNCTION [ <function-name> ] ]
"""
subclass_names = []
use_names = ['Function_Name']
def match(string): return EndStmtBase.match('FUNCTION',Function_Name, string)
match = staticmethod(match)
class Subroutine_Subprogram(BlockBase): # R1231
"""
<subroutine-subprogram> = <subroutine-stmt>
[ <specification-part> ]
[ <execution-part> ]
[ <internal-subprogram-part> ]
<end-subroutine-stmt>
"""
subclass_names = []
use_names = ['Subroutine_Stmt', 'Specification_Part', 'Execution_Part',
'Internal_Subprogram_Part', 'End_Subroutine_Stmt']
def match(reader):
return BlockBase.match(Subroutine_Stmt, [Specification_Part, Execution_Part, Internal_Subprogram_Part], End_Subroutine_Stmt, reader)
match = staticmethod(match)
class Subroutine_Stmt(StmtBase): # R1232
"""
<subroutine-stmt> = [ <prefix> ] SUBROUTINE <subroutine-name> [ ( [ <dummy-arg-list> ] ) [ <proc-language-binding-spec> ] ]
"""
subclass_names = []
use_names = ['Prefix', 'Subroutine_Name', 'Dummy_Arg_List', 'Proc_Language_Binding_Spec']
def match(string):
line, repmap = string_replace_map(string)
m = pattern.subroutine.search(line)
if m is None: return
prefix = line[:m.start()].rstrip() or None
if prefix is not None:
prefix = Prefix(repmap(prefix))
line = line[m.end():].lstrip()
m = pattern.name.match(line)
if m is None: return
name = Subroutine_Name(m.group())
line = line[m.end():].lstrip()
dummy_args = None
if line.startswith('('):
i = line.find(')')
if i==-1: return
dummy_args = line[1:i].strip() or None
if dummy_args is not None:
dummy_args = Dummy_Arg_List(repmap(dummy_args))
line = line[i+1:].lstrip()
binding_spec = None
if line:
binding_spec = Proc_Language_Binding_Spec(repmap(line))
return prefix, name, dummy_args, binding_spec
match = staticmethod(match)
def get_name(self): return self.items[1]
def tostr(self):
if self.items[0] is not None:
s = '%s SUBROUTINE %s' % (self.items[0], self.items[1])
else:
s = 'SUBROUTINE %s' % (self.items[1])
if self.items[2] is not None:
s += '(%s)' % (self.items[2])
if self.items[3] is not None:
s += ' %s' % (self.items[3])
return s
class Dummy_Arg(StringBase): # R1233
"""
<dummy-arg> = <dummy-arg-name>
| *
"""
subclass_names = ['Dummy_Arg_Name']
@staticmethod
def match(string): return StringBase.match('*', string)
class End_Subroutine_Stmt(EndStmtBase): # R1234
"""
<end-subroutine-stmt> = END [ SUBROUTINE [ <subroutine-name> ] ]
"""
subclass_names = []
use_names = ['Subroutine_Name']
@staticmethod
def match(string): return EndStmtBase.match('SUBROUTINE', Subroutine_Name, string)
class Entry_Stmt(StmtBase): # R1235
"""
::
<entry-stmt> = ENTRY <entry-name> [ ( [ <dummy-arg-list> ] ) [ <suffix> ] ]
Attributes
----------
items : (Entry_Name, Dummy_Arg_List, Suffix)
"""
subclass_names = []
use_names = ['Entry_Name', 'Dummy_Arg_List', 'Suffix']
@staticmethod
def match(string):
if string[:5].upper()!='ENTRY': return
line = string[5:].lstrip()
i = line.find('(')
if i==-1:
return Entry_Name(line), None, None
name = Entry_Name(line[:i].rstrip())
line, repmap = string_replace_map(line[i:])
i = line.find(')')
if i==-1: return
args = line[1:i].strip()
args = Dummy_Arg_List(repmap(args)) if args else None
line = line[i+1:].lstrip()
if line:
return name, args, Suffix(repmap(line))
return name, args, None
def tostr(self):
name, args, suffix = self.items
if suffix is None:
if args is None:
return 'ENTRY %s()' % (name)
return 'ENTRY %s(%s)' % (name, args)
elif args is None:
return 'ENTRY %s() %s' % (name, suffix)
return 'ENTRY %s(%s) %s' % (name, args, suffix)
class Return_Stmt(StmtBase): # R1236
"""
<return-stmt> = RETURN [ <scalar-int-expr> ]
"""
subclass_names = []
use_names = ['Scalar_Int_Expr']
def match(string):
start = string[:6].upper()
if start!='RETURN': return
if len(string)==6: return None,
return Scalar_Int_Expr(string[6:].lstrip()),
match = staticmethod(match)
def tostr(self):
if self.items[0] is None: return 'RETURN'
return 'RETURN %s' % self.items
class Contains_Stmt(StmtBase, STRINGBase): # R1237
"""
<contains-stmt> = CONTAINS
"""
subclass_names = []
def match(string): return STRINGBase.match('CONTAINS',string)
match = staticmethod(match)
class Stmt_Function_Stmt(StmtBase): # R1238
"""
<stmt-function-stmt> = <function-name> ( [ <dummy-arg-name-list> ] ) = Scalar_Expr
"""
subclass_names = []
use_names = ['Function_Name', 'Dummy_Arg_Name_List', 'Scalar_Expr']
def match(string):
i = string.find('=')
if i==-1: return
expr = string[i+1:].lstrip()
if not expr: return
line = string[:i].rstrip()
if not line or not line.endswith(')'): return
i = line.find('(')
if i==-1: return
name = line[:i].rstrip()
if not name: return
args = line[i+1:-1].strip()
if args:
return Function_Name(name), Dummy_Arg_Name_List(args), Scalar_Expr(expr)
return Function_Name(name), None, Scalar_Expr(expr)
match = staticmethod(match)
def tostr(self):
if self.items[1] is None:
return '%s () = %s' % (self.items[0], self.items[2])
return '%s (%s) = %s' % self.items
###############################################################################
################ GENERATE Scalar_, _List, _Name CLASSES #######################
###############################################################################
ClassType = type(Base)
_names = dir()
for clsname in _names:
cls = eval(clsname)
if not (isinstance(cls, ClassType) and issubclass(cls, Base) and not cls.__name__.endswith('Base')): continue
names = getattr(cls, 'subclass_names', []) + getattr(cls, 'use_names', [])
for n in names:
if n in _names: continue
if n.endswith('_List'):
_names.append(n)
n = n[:-5]
#print 'Generating %s_List' % (n)
exec('''\
class %s_List(SequenceBase):
subclass_names = [\'%s\']
use_names = []
def match(string): return SequenceBase.match(r\',\', %s, string)
match = staticmethod(match)
''' % (n, n, n))
elif n.endswith('_Name'):
_names.append(n)
n = n[:-5]
#print 'Generating %s_Name' % (n)
exec('''\
class %s_Name(Base):
subclass_names = [\'Name\']
''' % (n))
elif n.startswith('Scalar_'):
_names.append(n)
n = n[7:]
#print 'Generating Scalar_%s' % (n)
exec('''\
class Scalar_%s(Base):
subclass_names = [\'%s\']
''' % (n,n))
__autodoc__ = []
Base_classes = {}
for clsname in dir():
cls = eval(clsname)
if isinstance(cls, ClassType) and issubclass(cls, Base) and not cls.__name__.endswith('Base'):
Base_classes[cls.__name__] = cls
if len(__autodoc__)<10:
__autodoc__.append(cls.__name__)
###############################################################################
##################### OPTIMIZE subclass_names tree ############################
###############################################################################
if 1: # Optimize subclass tree:
def _rpl_list(clsname):
if clsname not in Base_classes:
logger.debug('Not implemented: %s' % clsname)
# print 'Not implemented:',clsname
return [] # remove this code when all classes are implemented
cls = Base_classes[clsname]
if 'match' in cls.__dict__:
return [clsname]
l = []
for n in getattr(cls,'subclass_names',[]):
l1 = _rpl_list(n)
for n1 in l1:
if n1 not in l:
l.append(n1)
return l
for cls in list(Base_classes.values()):
if not hasattr(cls, 'subclass_names'): continue
opt_subclass_names = []
for n in cls.subclass_names:
for n1 in _rpl_list(n):
if n1 not in opt_subclass_names: opt_subclass_names.append(n1)
if not opt_subclass_names==cls.subclass_names:
#print cls.__name__,':',', '.join(cls.subclass_names),'->',', '.join(opt_subclass_names)
cls.subclass_names[:] = opt_subclass_names
#else:
# print cls.__name__,':',opt_subclass_names
# Initialize Base.subclasses dictionary:
for clsname, cls in list(Base_classes.items()):
subclass_names = getattr(cls, 'subclass_names', None)
if subclass_names is None:
logger.debug('%s class is missing subclass_names list' % (clsname))
# print '%s class is missing subclass_names list' % (clsname)
continue
try:
l = Base.subclasses[clsname]
except KeyError:
Base.subclasses[clsname] = l = []
for n in subclass_names:
if n in Base_classes:
l.append(Base_classes[n])
else:
logger.debug('%s not implemented needed by %s' % (n,clsname))
# print '%s not implemented needed by %s' % (n,clsname)
if 1:
for cls in list(Base_classes.values()):
subclasses = Base.subclasses.get(cls.__name__,[])
subclasses_names = [c.__name__ for c in subclasses]
subclass_names = getattr(cls,'subclass_names', [])
use_names = getattr(cls,'use_names',[])
for n in subclasses_names:
break
if n not in subclass_names:
logger.debug('%s needs to be added to %s subclasses_name list' % (n,cls.__name__))
# print '%s needs to be added to %s subclasses_name list' % (n,cls.__name__)
for n in subclass_names:
break
if n not in subclasses_names:
logger.debug('%s needs to be added to %s subclass_name list' % (n,cls.__name__))
# print '%s needs to be added to %s subclass_name list' % (n,cls.__name__)
for n in use_names + subclass_names:
if n not in Base_classes:
logger.debug('%s not defined used by %s' % (n, cls.__name__))
# print '%s not defined used by %s' % (n, cls.__name__)
#EOF
| bsd-3-clause | 3,990,001,464,344,939,000 | 32.947482 | 153 | 0.523289 | false |
waddedMeat/asteroids-ish | Asteroids/Ship.py | 1 | 3639 | __author__ = 'jmoran'
from Asteroids import Object, Bullet
import math
from Asteroids.Colors import *
class Ship(Object):
radius = 20
def __init__(self, window, game):
Object.__init__(self, window, game)
self.point = (self.wWidth / 2, self.wHeight / 2)
self.start_point = self.point
self.radians = tuple(math.radians(x) for x in (270, 30, 90, 150))
self.start_radians = self.radians
self.acceleration = 0
self.heading = None
self.frames = 0
self.crashed = False
self.crash_time = 0
self.crash_point = None
def turn_left(self):
self.radians = tuple(p - math.radians(15) for p in self.radians)
def turn_right(self):
self.radians = tuple(p + math.radians(15) for p in self.radians)
def accelerate(self):
if self.heading == self.radians[0]:
self.acceleration += 0.04
else:
self.heading = self.radians[0]
def decelerate(self):
self.acceleration -= 0.04
if self.acceleration < 0:
self.acceleration = 0
def draw(self):
if self.crashed:
self.draw_crash()
else:
self.gObj = self.game.draw.aalines(self.window, WHITE, True, self.get_points(), 1)
def move(self, point):
x, y = point
if x > self.wWidth:
x -= self.wWidth
elif x < 0:
x += self.wWidth
elif y > self.wHeight:
y -= self.wHeight
elif y < 0:
y += self.wHeight
self.point = (x, y)
def fire(self):
a = self.radians[0]
r = self.radius
x, y = self.point
x2, y2 = (x + (r * math.cos(a)), y + (r * math.sin(a)))
return Bullet(self.window, self.game, (x2, y2), (x - x2, y - y2))
def get_points(self):
x, y = self.point
r = self.radius
if self.acceleration > 0 and self.heading is not None:
a = self.heading
x2, y2 = (x + (r * math.cos(a)), y + (r * math.sin(a)))
self.move((x - ((x - x2) * self.acceleration), y - ((y - y2) * self.acceleration)))
x, y = self.point
p0 = (x + (r * math.cos(self.radians[0])), y + (r * math.sin(self.radians[0])))
p1 = (x + (r * math.cos(self.radians[1])), y + (r * math.sin(self.radians[1])))
p2 = (x + ((r / 4) * math.cos(self.radians[2])), y + ((r / 4) * math.sin(self.radians[2])))
p3 = (x + (r * math.cos(self.radians[3])), y + (r * math.sin(self.radians[3])))
return p0, p1, p2, p3
def reset(self):
self.point = self.start_point
self.radians = self.start_radians
self.acceleration = 0
self.heading = None
self.crashed = False
self.no_collide = False
def draw_crash(self):
x, y = self.crash_point
self.game.draw.aaline(self.window, WHITE, (x-5, y), (x-10, y), 1)
self.game.draw.aaline(self.window, WHITE, (x+5, y), (x+10, y), 1)
self.game.draw.aaline(self.window, WHITE, (x, y-5), (x, y-10), 1)
self.game.draw.aaline(self.window, WHITE, (x, y+5), (x, y+10), 1)
self.game.draw.aaline(self.window, WHITE, (x+5, y+5), (x+10, y+10), 1)
self.game.draw.aaline(self.window, WHITE, (x-5, y-5), (x-10, y-10), 1)
self.game.draw.aaline(self.window, WHITE, (x+5, y-5), (x+10, y-10), 1)
self.game.draw.aaline(self.window, WHITE, (x-5, y+5), (x-10, y+10), 1)
def do_collision(self, obj):
Object.do_collision(self, obj)
self.no_collide = True
self.crashed = True
self.crash_point = self.point
| mit | 8,277,368,852,374,912,000 | 33.009346 | 99 | 0.535037 | false |
BenBoZ/hexaco | Engine/Components/tst/Test_MoveComponent.py | 1 | 3513 | """
This file is part of HexACO.
HexACO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
HexACO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with HexACO. If not, see <http://www.gnu.org/licenses/>.
########################################################################
Move Component Test Class
########################################################################
Description
-----------
"""
import unittest
from ..MoveComponent import MoveComponent
from ..PositionComponent import PositionComponent
###################################################################
#
# Test Code
#
###################################################################
class TestMoveComponent(unittest.TestCase):
######################################################
@classmethod
def setUpClass(cls):
"This method is called once, when starting the tests"
cls.movComp = MoveComponent(None)
@classmethod
def tearDownClass(cls):
"This method is called after finishing all tests"
pass
#######################################################
def setUp(self):
"This method is called before each test case"
self.movComp.speed = 0.0
def tearDown(self):
"This method is called after each test case"
pass
#######################################################
def test_defaultSpeed(self):
self.assertEqual(self.movComp.speed, 0.0)
def test_xyz_speed_or0(self):
self.movComp.speed = 2.0
xyz_speed = self.movComp.get_xyz_speed(0)
self.assertEqual(xyz_speed, [2.0, -2.0, 0.0])
def test_xyz_speed_or1(self):
self.movComp.speed = 2.0
xyz_speed = self.movComp.get_xyz_speed(1)
self.assertEqual(xyz_speed, [2.0, 0.0, -2.0])
def test_xyz_speed_or2(self):
self.movComp.speed = 2.0
xyz_speed = self.movComp.get_xyz_speed(2)
self.assertEqual(xyz_speed, [0.0, 2.0, -2.0])
def test_xyz_speed_or3(self):
self.movComp.speed = 2.0
xyz_speed = self.movComp.get_xyz_speed(3)
self.assertEqual(xyz_speed, [-2.0, 2.0, 0.0])
def test_xyz_speed_or4(self):
self.movComp.speed = 2.0
xyz_speed = self.movComp.get_xyz_speed(4)
self.assertEqual(xyz_speed, [-2.0, 0.0, 2.0])
def test_xyz_speed_or5(self):
self.movComp.speed = 2.0
xyz_speed = self.movComp.get_xyz_speed(5)
self.assertEqual(xyz_speed, [0.0, -2.0, 2.0])
def test_update(self):
# Setup
self.movComp.speed = 5.0
pos = [-2.0, 0.0, 2.0]
pos_comp = PositionComponent(None)
pos_comp.set_position_xyz(pos)
deltas = self.movComp.get_xyz_speed(pos_comp.orientation)
expected = [pos[i] + deltas[i] for i in range(3)]
self.movComp.components['position'] = pos_comp
# Exercise
self.movComp.update()
# Verify
self.assertEqual(expected, pos_comp.xyz())
if __name__ == '__main__':
unittest.main(verbosity=2)
| gpl-3.0 | 8,509,366,894,244,802,000 | 25.613636 | 72 | 0.547111 | false |
JohnMaguire/Cardinal | plugins/ticker/plugin.py | 1 | 18949 | from collections import OrderedDict
import datetime
import logging
import re
import pytz
import requests
from twisted.internet import defer, error, reactor
from twisted.internet.threads import deferToThread
from cardinal import util
from cardinal.bot import user_info
from cardinal.decorators import command, help, regex
from cardinal.util import F
# IEX API Endpoint
IEX_QUOTE_API_URL = "https://cloud.iexapis.com/stable/stock/{symbol}/quote?token={token}" # noqa: E501
# Regex pattern that matches PyLink relay bots
RELAY_REGEX = r'^(?:<(.+?)>\s+)'
# For 'stock' command - checking stock price
STOCK_RELAY_REGEX = RELAY_REGEX + r'(\.stock.*?)$'
# For 'predict' command - predicting stock price
PREDICT_REGEX = r'^(.+?) (?:([-+])?(\d+(?:\.\d+)?)%|\$?(\d+(?:\.\d+)?))$' # noqa: E501
# For 'predict' command - predicting a stock price
PREDICT_RELAY_REGEX = RELAY_REGEX + r'(\.predict.*?)$'
def est_now():
tz = pytz.timezone('America/New_York')
now = datetime.datetime.now(tz)
return now
def market_is_open():
"""Not aware of holidays or anything like that..."""
now = est_now()
# Determine if the market is currently open
is_market_closed = (now.weekday() >= 5) or \
(now.hour < 9 or now.hour >= 17) or \
(now.hour == 9 and now.minute < 30) or \
(now.hour == 16 and now.minute > 0)
return not is_market_closed
def get_delta(new_value, old_value):
return float(new_value) / float(old_value) * 100 - 100
def colorize(percentage):
message = '{:.2f}%'.format(percentage)
if percentage > 0:
return F.C.light_green(message)
else:
return F.C.light_red(message)
class TickerPlugin:
def __init__(self, cardinal, config):
self.logger = logging.getLogger(__name__)
self.cardinal = cardinal
self.config = config or {}
self.config.setdefault('api_key', None)
self.config.setdefault('channels', [])
self.config.setdefault('stocks', [])
self.config.setdefault('relay_bots', [])
if not self.config["channels"]:
self.logger.warning("No channels for ticker defined in config --"
"ticker will be disabled")
if not self.config["stocks"]:
self.logger.warning("No stocks for ticker defined in config -- "
"ticker will be disabled")
if not self.config["api_key"]:
raise KeyError("Missing required api_key in ticker config")
if len(self.config["stocks"]) > 5:
raise ValueError("No more than 5 stocks may be present in ticker "
"config")
self.relay_bots = []
for relay_bot in self.config['relay_bots']:
user = user_info(
relay_bot['nick'],
relay_bot['user'],
relay_bot['vhost'])
self.relay_bots.append(user)
self.db = cardinal.get_db('ticker', default={
'predictions': {},
})
self.call_id = None
self.wait()
@property
def stocks(self):
return OrderedDict(self.config["stocks"])
def is_relay_bot(self, user):
"""Compares a user against the registered relay bots."""
for bot in self.relay_bots:
if (bot.nick is None or bot.nick == user.nick) and \
(bot.user is None or bot.user == user.user) and \
(bot.vhost is None or bot.vhost == user.vhost):
return True
return False
def wait(self):
"""Tell the reactor to call tick() at the next 15 minute interval"""
now = est_now()
minutes_to_sleep = 15 - now.minute % 15
seconds_to_sleep = minutes_to_sleep * 60
seconds_to_sleep = seconds_to_sleep - now.second
self.call_id = reactor.callLater(minutes_to_sleep * 60, self.tick)
def close(self, cardinal):
if self.call_id:
try:
self.call_id.cancel()
except error.AlreadyCancelled as e:
self.logger.debug(e)
@defer.inlineCallbacks
def tick(self):
"""Send a message with daily stock movements"""
# Start the timer for the next tick -- do this first, as the rest of
# this function may take time. While that's OK, and it shouldn't take
# anywhere close to 15 minutes, reloading the plugin during that time
# could result in close() cancelling the event, and then wait() getting
# called from the old (reloaded) instance.
self.wait()
# If it's after 4pm ET or before 9:30am ET on a weekday, or if it's
# a weekend (Saturday or Sunday), don't tick, just wait.
now = est_now()
# Determine if the market is currently open
is_market_open = not (
(now.weekday() >= 5) or
(now.hour < 9 or now.hour >= 17) or
(now.hour == 9 and now.minute < 30) or
(now.hour == 16 and now.minute > 0))
# Determine if this is the market opening or market closing
is_open = now.hour == 9 and now.minute == 30
is_close = now.hour == 16 and now.minute == 0
# Determine if we should do predictions after sending ticker
should_do_predictions = True \
if is_market_open and (is_open or is_close) \
else False
# If there are no stocks to send in the ticker, or no channels to send
# them to, don't tick, just wait.
should_send_ticker = is_market_open and \
self.config["channels"] and self.config["stocks"]
if should_send_ticker:
yield self.send_ticker()
if should_do_predictions:
# Try to avoid hitting rate limiting (5 calls per minute) by giving
# a minute of buffer after the ticker.
yield util.sleep(60)
yield self.do_predictions()
@defer.inlineCallbacks
def send_ticker(self):
# Used a DeferredList so that we can make requests for all the symbols
# we care about simultaneously
deferreds = []
for symbol, name in self.stocks.items():
d = self.get_daily(symbol)
deferreds.append(d)
# convert result to a (symbol, delta) mapping for the list
def errback(f):
self.logger.error("Failed to get stock {}: {}".format(
symbol, f))
return f
def callback(res):
return (res['symbol'], res['change'])
d.addErrback(errback)
d.addCallback(callback)
dl = defer.DeferredList(deferreds)
# Loop the results, ignoring errored requests
dl_results = yield dl
results = {}
for success, result in dl_results:
if not success:
continue
symbol, change = result
results.update({symbol: change})
message = self.format_ticker(results)
for channel in self.config["channels"]:
self.cardinal.sendMsg(channel, message)
def format_ticker(self, results):
message_parts = []
for symbol, name in self.stocks.items():
if symbol in results:
message_parts.append(
self.format_symbol(symbol, results[symbol])
)
message = " | ".join(message_parts)
return message
def format_symbol(self, symbol, change):
name = self.stocks[symbol]
return "{name} ({symbol}): {change}".format(
symbol=F.bold(symbol),
name=name,
change=colorize(change),
)
@defer.inlineCallbacks
def do_predictions(self):
# Loop each prediction, grouped by symbols to avoid rate limits
with self.db() as db:
# TODO will this generator still work if it's iterated outside the
# context manager?
predicted_symbols = list(db['predictions'].keys())
for symbol in predicted_symbols:
try:
data = yield self.get_daily(symbol)
# this is not 100% accurate as to the value at open... it's
# just a value close to the open, iex cloud doesn't let us get
# at the true open without paying
actual = data['price']
except Exception:
self.logger.exception(
"Failed to fetch information for symbol {} -- skipping"
.format(symbol))
for channel in self.config["channels"]:
self.cardinal.sendMsg(
channel, "Error with predictions for symbol {}."
.format(symbol))
continue
# Loop each nick's prediction, and look for the closest prediction
# for the current symbol
closest_prediction = None
closest_delta = None
closest_nick = None
with self.db() as db:
predictions = db['predictions'][symbol]
del db['predictions'][symbol]
for nick, prediction in list(predictions.items()):
# Check if this is the closest guess for the symbol so far
delta = abs(actual - prediction['prediction'])
if not closest_delta or delta < closest_delta:
closest_prediction = prediction['prediction']
closest_delta = delta
closest_nick = nick
self.send_prediction(
nick,
symbol,
prediction,
actual,
)
market_open_close = 'open' if market_is_open() else 'close'
for channel in self.config["channels"]:
self.cardinal.sendMsg(
channel,
"{} had the closest guess for {} out of {} "
"predictions with a prediction of {:.2f} ({}) "
"compared to the actual {} of {:.2f} ({}).".format(
closest_nick,
F.bold(symbol),
len(predictions),
closest_prediction,
colorize(get_delta(closest_prediction,
prediction['base'])),
market_open_close,
actual,
colorize(get_delta(actual, prediction['base'])),
))
# Try to avoid hitting rate limiting (5 calls per minute) by
# only checking predictions of 4 symbols per minute
yield util.sleep(15)
def send_prediction(
self,
nick,
symbol,
prediction,
actual,
):
market_open_close = 'open' if market_is_open() else 'close'
for channel in self.config["channels"]:
self.cardinal.sendMsg(
channel,
"Prediction by {} for \x02{}\x02: {:.2f} ({}). "
"Actual value at {}: {:.2f} ({}). "
"Prediction set at {}.".format(
nick,
symbol,
prediction['prediction'],
colorize(get_delta(
prediction['prediction'], prediction['base'])),
market_open_close,
actual,
colorize(get_delta(
actual, prediction['base'])),
prediction['when']
))
@command('stock')
@help("Check the latest price of a stock")
@help("Syntax: .stock <stock symbol>")
@defer.inlineCallbacks
def stock(self, cardinal, user, channel, msg):
nick = user.nick # other values may not exist for relayed users
parts = msg.split(' ')
if len(parts) != 2:
cardinal.sendMsg(channel, "Syntax: .stock <stock symbol>")
return
symbol = parts[1]
try:
data = yield self.get_daily(symbol)
except Exception as exc:
self.logger.warning("Error trying to look up symbol {}: {}".format(
symbol, exc))
cardinal.sendMsg(
channel, "{}: I couldn't look that symbol up".format(nick))
return
cardinal.sendMsg(
channel,
"{} (\x02{}\x02) = {:.2f} USD - Daily Change: {}".format(
data['companyName'],
data['symbol'],
data['price'],
colorize(data['change'])))
@regex(STOCK_RELAY_REGEX)
@defer.inlineCallbacks
def stock_relayed(self, cardinal, user, channel, msg):
"""Hack to support relayed messages"""
match = re.match(STOCK_RELAY_REGEX, msg)
# this regex should only match when a relay bot is relaying a message
# for another user - make sure this is really a relay bot
if not self.is_relay_bot(user):
return
user = user_info(util.strip_formatting(match.group(1)),
user.user,
user.vhost,
)
yield self.stock(cardinal, user, channel, match.group(2))
@command('predict')
@help("Predict a stock price at the next market open/close")
@help("Syntax: .predict <stock> [-]<X>% | .predict <stock> $<X>")
@defer.inlineCallbacks
def predict(self, cardinal, user, channel, msg):
nick = user.nick
try:
msg = msg.split(' ', 1)[1]
except IndexError:
cardinal.sendMsg(channel,
"Syntax: .predict <stock> [-]<X>% |"
" .predict <stock> $<X>")
return
if not re.match(PREDICT_REGEX, msg):
cardinal.sendMsg(channel,
"Syntax: .predict <stock> [-]<X>% |"
" .predict <stock> $<X>")
return
try:
prediction = yield self.parse_prediction(nick, msg)
except Exception as exc:
self.logger.warning("Error trying to parse prediction: {}"
.format(exc))
cardinal.sendMsg(
channel,
"{}: Are you sure the symbol is correct?".format(user.nick))
return
nick, symbol, prediction, base = prediction
# If the user already had a prediction for the symbol, create a message
# with the old prediction's info
try:
with self.db() as db:
old_prediction = db['predictions'][symbol][nick]
except KeyError:
old_str = ''
else:
old_str = '(replaces old prediction of {:.2f} ({}) set at {})' \
.format(
old_prediction['prediction'],
colorize(get_delta(old_prediction['prediction'],
old_prediction['base'])),
old_prediction['when'],
)
# Save the prediction
self.save_prediction(symbol, nick, base, prediction)
cardinal.sendMsg(
channel,
"Prediction by {} for \x02{}\x02 at market {}: {:.2f} ({}) {}"
.format(nick,
symbol,
'close' if market_is_open() else 'open',
prediction,
colorize(get_delta(prediction, base)),
old_str))
@regex(PREDICT_RELAY_REGEX)
@defer.inlineCallbacks
def predict_relayed(self, cardinal, user, channel, msg):
"""Hack to support relayed messages"""
match = re.match(PREDICT_RELAY_REGEX, msg)
# this regex should only match when a relay bot is relaying a message
# for another user - make sure this is really a relay bot
if not self.is_relay_bot(user):
return
user = user_info(util.strip_formatting(match.group(1)),
user.user,
user.vhost,
)
yield self.predict(cardinal, user, channel, match.group(2))
@defer.inlineCallbacks
def parse_prediction(self, nick, message):
match = re.match(PREDICT_REGEX, message)
data = yield self.get_daily(match.group(1))
if market_is_open():
# get value at previous close
base = data['previous close']
else:
# get latest price
base = data['price']
symbol = data['symbol'] # consistent casing
negative_percentage = match.group(2) == '-'
percentage = float(match.group(3)) if match.group(3) else None
price = float(match.group(4)) if match.group(4) else None
if percentage is not None:
prediction = percentage * .01 * base
if negative_percentage:
prediction = base - prediction
else:
prediction = base + prediction
elif price is not None:
prediction = price
else:
# this shouldn't happen
raise Exception("No price or percentage: {}".format(message))
return (
nick,
symbol,
prediction,
base,
)
def save_prediction(self, symbol, nick, base, prediction):
with self.db() as db:
predictions = db['predictions'].get(symbol, {})
predictions[nick] = {
'when': est_now().strftime('%Y-%m-%d %H:%M:%S %Z'),
'base': base,
'prediction': prediction,
}
db['predictions'][symbol] = predictions
def get_prediction(self, symbol, nick):
with self.db() as db:
return db['predictions'][symbol][nick]
def get_daily(self, symbol):
return self.make_iex_request(symbol)
@defer.inlineCallbacks
def make_iex_request(self, symbol):
url = IEX_QUOTE_API_URL.format(
symbol=symbol,
token=self.config["api_key"],
)
r = yield deferToThread(requests.get, url)
data = r.json()
try:
price = float(data['latestPrice'])
previous_close = float(data['previousClose'])
change_percent = ((price - previous_close) / previous_close) * 100
return ({'symbol': data['symbol'],
'companyName': data['companyName'],
'exchange': data['primaryExchange'],
'price': price,
'previous close': previous_close,
'change': change_percent,
})
except KeyError as e:
self.logger.error("{}, with data: {}".format(e, data))
raise
entrypoint = TickerPlugin
| mit | 5,108,083,788,129,619,000 | 34.090741 | 103 | 0.526941 | false |
jjhelmus/CSU_RadarTools | csu_radartools/csu_kdp.py | 1 | 10457 | """
Timothy James Lang
[email protected]
Last Updated 04 September 2015 (Python 2.7/3.4)
Last Updated 26 July 2005 (IDL)
csu_kdp v1.4
Change Log
----------
v1.4 Major Changes (09/04/2015):
1. Added window keyword to enable stretching the FIR window (e.g.,
use a 21-pt filter over 5 km with 250-m gate spacing
2. Forcing FIR order to be even, _calc_kdp_ray will crash otherwise
v1.3 Major Changes (08/05/2015):
1. Made Python 3 compatible.
2. Fixed issue with non-integer array indices.
v1.2 Major Changes (07/10/2015):
1. Made sub-module pep8 compliant.
v1.1 Major Changes (04/27/2015):
1. Made algorithm work with a user-defined gate spacing (via gs keyword).
Untested on gate spacings that do not divide evenly into the 3-km window
used for filtering the PHIDP data, however. But common gate spacings
like 50, 100, 150, 200, 250, and 300 meters should all work fine.
2. Made the algorithm capable of receiving 2D array inputs (i.e., azimuth &
range) as well as 1D inputs (range only). If 2D, rng needs to be 2D as
well. However, thsd should remain a scalar, or 1D and only vary by range.
To Do
-----
1. Performance improvements
2. Make object-oriented
"""
from __future__ import division, print_function
import numpy as np
from numpy import linalg
from scipy.signal import firwin
from warnings import warn
# import time
VERSION = '1.4'
# Used by FIR coefficient function (get_fir)
FIR_GS = 150.0
FIR_WIN = 3.0
FIR_ORDER = 20
FIR_GAIN = 1.0
FIR_FREQ = 0.08
FIR_STD = 28.0
KM2M = 1000.0
def calc_kdp_bringi(dp=None, dz=None, rng=None, thsd=12, nfilter=1,
bad=-32768, gs=FIR_GS, window=FIR_WIN):
"""
Overview
--------
This is an old algorithm that uses an FIR filter to process differential
phase and extract specific differential phase. It works on polarimetric
radar data. It is based on code provided by V. N. Bringi and Yanting Wang
of CSU Electrical Engineering. It assumes differential phase has been
unfolded already. You can send this function either 1D or 2D arrays of
data. If 2D, it assumes the first index is azimuth so it will loop over
that, calculating KDP along individual rays.
Steps
-----
1. Standard deviation of differential phase is calculated and used to
QC the phase data. The stdev calculation uses up to 11 consecutive
gates regardless of gate spacing.
2. Differential phase is filtered using the FIR filter, which has been
tuned to the number of gates contained within the FIR window. This
algorithm only works for window / gate spacing = even number.
3. Specific differential phase is calculated by consulting reflectivity.
As reflectivity declines progressively more and more gates are needed
in the window used to fit a line to the filtered phase. Specific
differential phase is half the slope of that line.
Reference
---------
Timothy J. Lang, David A. Ahijevych, Stephen W. Nesbitt, Richard E.
Carbone, Steven A. Rutledge, and Robert Cifelli, 2007: Radar-Observed
Characteristics of Precipitating Systems during NAME 2004. J. Climate,
20, 1713–1733. doi: http://dx.doi.org/10.1175/JCLI4082.1
Arguments
---------
dp = Differential phase (deg, 1D or 2D array)
dz = Reflectivity (dBZ, 1D or 2D array)
rng = Range (km, 1D or 2D array -
use np.meshgrid() first tp make rng 2D if needed)
thsd = Threshold for standard deviation of differential phase, above which
the data are not considered when filtering or calculating specific
differential phase. The user can specify a 1D vector of spatially
varying thresholds instead (i.e., vary by range).
nfilter = Number of times to apply the FIR filter
bad = Value for bad/missing data
gs = Gate spacing of radar (meters)
window = Changes window over which FIR filter is applied (km). Also affects
the width of the adaptive KDP calculations.
Returns
-------
kd_lin = Specific differential phase (deg/km, 1D or 2D array)
dp_lin = Filtered differential phase (deg, 1D or 2D array)
sd_lin = Standard deviation of diff. phase (deg, 1D or 2D array)
"""
# Quick check on all vars. Used keywords so order doesn't matter.
if dp is None or dz is None or rng is None:
warn('Missing needed variables (dp, dz, and/or rng), failing ...')
return
if np.ndim(dp) != np.ndim(dz) or np.ndim(dp) != np.ndim(rng):
warn('Array sizes don\'t match, failing ...')
return
fir = get_fir(gs=gs, window=window)
# If array is 2D, then it assumes the first index refers to azimuth.
# Thus it loops over that.
if np.ndim(dp) == 2:
kd_lin = np.zeros_like(dp) + bad
dp_lin = np.zeros_like(dp) + bad
sd_lin = np.zeros_like(dp) + 100.0
for ray in np.arange(np.shape(dp)[0]):
kd_lin[ray], dp_lin[ray], sd_lin[ray] = \
_calc_kdp_ray(dp[ray], dz[ray], rng[ray], thsd=thsd,
nfilter=nfilter, bad=bad, fir=fir)
# Or
elif np.ndim(dp) == 1:
kd_lin, dp_lin, sd_lin = _calc_kdp_ray(dp, dz, rng, thsd=thsd, fir=fir,
nfilter=nfilter, bad=bad)
else:
warn('Need 2D or 1D array, failing ...')
return
return kd_lin, dp_lin, sd_lin
def get_fir(gs=FIR_GS, window=FIR_WIN):
"""
gs = Gate Spacing (m)
window = Filter Window (km)
window divided by gs should be an even number!
"""
fir = {}
fir['order'] = np.int32(window * KM2M / gs)
if fir['order'] % 2 != 0:
warn('gs / window must be an even number! #Failing ...')
return
fir['gain'] = FIR_GAIN
# ratio = FIR_GS / gs
ratio = fir['order'] / FIR_ORDER
freq = FIR_FREQ / ratio
std = ratio * FIR_STD
fir['coef'] = firwin(fir['order'] + 1, freq, window=('gaussian', std))
# print('debug', fir)
return fir
def _calc_kdp_ray(dp, dz, rng, thsd=12, nfilter=1, bad=-32768, fir=None):
"""
Arguments
---------
dp = 1D ray of differential phase
dz = 1D ray of reflectivity
rng = 1D ray of range
thsd = Scalar or 1D ray of diff phase standard deviation thresholds
nfilter = Number of times to filter the data
bad = Bad/missing data value
fir = Dictionary containing FIR filter parameters
Returns
-------
kd_lin = Specific differential phase (deg/km, 1D array)
dp_lin = Filtered differential phase (deg, 1D array)
sd_lin = Standard deviation of diff. phase (deg, 1D array)
"""
# Define needed variables
kd_lin = np.zeros_like(rng) + bad
sd_lin = np.zeros_like(rng) + 100.0
# User can provide a spatially varying stddev(dp) threshold
if not hasattr(thsd, '__len__'):
thsd = np.zeros_like(rng) + thsd
length = len(rng)
lin = np.arange(length)
# Half window size for calculating stdev of phase (fixed @ 11 gates)
half_std_win = 5
half_fir_win = fir['order'] // 2 # Half window size for FIR filtering
y = np.zeros(length) + bad # Dummy variable to store filtered phase
z = 1.0 * dp # Dummy variable to store un/pre-processed phase
# print(time.time() - begin_time, 'seconds since start (DEF)')
#####################################################################
# Calculate standard deviation of phidp
mask = dp != bad
for i in lin[mask]:
index1 = np.int32(i - half_std_win)
index2 = np.int32(i + half_std_win)
if index1 >= 0 and index2 < length - 1:
yy = dp[index1:index2]
tmp_mask = mask[index1:index2]
if len(yy[tmp_mask]) > half_std_win:
sd_lin[i] = _quick_std(yy, tmp_mask)
# ------------- MAIN LOOP of Phidp Adaptive Filtering ------------------
# FIR FILTER SECTION
for mloop in np.arange(nfilter):
mask = np.logical_and(sd_lin <= thsd, z != bad)
for i in lin[mask]:
index1 = np.int32(i - half_fir_win)
index2 = np.int32(i + half_fir_win)
if index1 >= 0 and index2 < length - 1:
yy = z[index1:index2+1]
xx = rng[index1:index2+1]
tmp_mask = mask[index1:index2+1]
siz = len(yy[tmp_mask])
if siz > 0.8 * fir['order']:
if siz < fir['order'] + 1:
result = _leastsqrs(xx, yy, siz, tmp_mask)
yy[~tmp_mask] = result[0] * xx[~tmp_mask] + result[1]
y[i] = fir['gain'] * np.dot(fir['coef'], yy)
z = 1.0 * y # Enables re-filtering of processed phase
dp_lin = 1.0 * y
# print(time.time() - begin_time, 'seconds since start (FDP)')
# *****************END LOOP for Phidp Adaptive Filtering******************
# CALCULATE KDP
# Default value for nadp is half_fir_win, but varies based on Zh
nadp = np.int16(0 * dz + half_fir_win)
tmp_mask = dz < 35
nadp[tmp_mask] = 3 * half_fir_win
tmp_mask = np.logical_and(dz >= 35, dz < 45)
nadp[tmp_mask] = 2 * half_fir_win
mask = dp_lin != bad
for i in lin[mask]:
index1, index2 = _get_nadp_indices(nadp, i)
if index1 >= 0 and index2 <= length:
tmp_mask = mask[index1:index2]
xx = rng[index1:index2]
siz = len(xx[tmp_mask])
# Improved Kdp based on LSE fit to Adap filt Phidp
if siz >= 0.8 * nadp[i]:
yy = dp_lin[index1:index2]
kd_lin[i] = _fit_line_and_get_kdp(xx, yy, siz, tmp_mask)
# *******************END KDP CALCULATION****************************
# print(time.time() - begin_time, 'seconds since start (KDP/Done)')
return kd_lin, dp_lin, sd_lin
def _leastsqrs(xx, yy, siz, tmp_mask):
"""
Following is faster than np.polyfit
e.g., return np.polyfit(xx[tmp_mask], yy[tmp_mask], 1)
"""
A = np.array([xx[tmp_mask], np.ones(siz)])
return linalg.lstsq(A.T, yy[tmp_mask])[0]
def _get_nadp_indices(nadp, i):
half_nadp = nadp[i] / 2
return np.int32(i - half_nadp), np.int32(i + half_nadp + 1)
def _fit_line_and_get_kdp(xx, yy, siz, tmp_mask):
result = _leastsqrs(xx, yy, siz, tmp_mask)
return 0.5 * result[0]
def _quick_std(array, mask):
"""Following is faster than np.std()"""
a = array[mask]
m = a.mean()
c = a - m
return (np.dot(c, c) / a.size)**0.5
| gpl-2.0 | 330,783,151,936,781,100 | 36.743682 | 79 | 0.606121 | false |
jonjahren/unity8 | tests/autopilot/unity8/dash.py | 1 | 10349 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Unity Autopilot Test Suite
# Copyright (C) 2012, 2013, 2014, 2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import ubuntuuitoolkit
from autopilot import logging as autopilot_logging
from autopilot.introspection import dbus
import unity8
logger = logging.getLogger(__name__)
class DashApp():
"""Autopilot helper for the Dash app."""
def __init__(self, app_proxy):
self.app_proxy = app_proxy
self.main_view = self.app_proxy.select_single(
ubuntuuitoolkit.MainView)
self.dash = self.main_view.select_single(Dash)
class Dash(ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase):
"""A helper that understands the Dash."""
def __init__(self, *args):
super().__init__(*args)
self.dash_content_list = self.wait_select_single(
'QQuickListView', objectName='dashContentList')
def get_applications_grid(self):
get_grid = self.get_scope('clickscope').wait_select_single(
'CardGrid', objectName='local')
return get_grid
def get_application_icon(self, text):
"""Returns a 'Tile' icon that has the text 'text' from the application
grid.
:param text: String containing the text of the icon to search for.
"""
app_grid = self.get_applications_grid()
resp_grid = app_grid.wait_select_single('ResponsiveGridView')
return resp_grid.select_single('Tile', text=text)
def get_scope(self, scope_name='clickscope'):
return self.dash_content_list.wait_select_single(
'QQuickLoader', scopeId=scope_name)
def get_scope_by_index(self, scope_index=0):
return self.dash_content_list.wait_select_single(
'QQuickLoader', objectName=("scopeLoader%i" % scope_index))
@autopilot_logging.log_action(logger.info)
def open_scope(self, scope_id):
"""Open a dash scope.
:parameter scope_id: The id of the scope.
:return: The scope.
"""
scope_loader = self._get_scope_loader(scope_id)
if scope_loader.isCurrent:
logger.info('The scope is already open.')
return self._get_scope_from_loader(scope_loader)
else:
return self._open_scope_scrolling(scope_loader)
def _get_scope_loader(self, scope_id):
try:
aux = self.dash_content_list.get_children_by_type('QQuickItem')[0]
for l in aux.get_children_by_type('QQuickLoader'):
if (l.scopeId == scope_id):
return l
raise unity8.UnityException(
'No scope found with id {0}'.format(scope_id))
except dbus.StateNotFoundError:
raise unity8.UnityException(
'No scope found with id {0}'.format(scope_id))
def _get_scope_from_loader(self, loader):
return loader.wait_select_single('GenericScopeView')
def _open_scope_scrolling(self, scope_loader):
scroll = self._get_scroll_direction(scope_loader)
while not scope_loader.isCurrent:
scroll()
self.dash_content_list.moving.wait_for(False)
scope_loader.isCurrent.wait_for(True)
scope = self._get_scope_from_loader(scope_loader)
return scope
def _get_scroll_direction(self, scope_loader):
current_scope_loader = self.dash_content_list.select_single(
'QQuickLoader', isCurrent=True)
if scope_loader.globalRect.x < current_scope_loader.globalRect.x:
return self._scroll_to_left_scope
elif scope_loader.globalRect.x > current_scope_loader.globalRect.x:
return self._scroll_to_right_scope
else:
raise unity8.UnityException('The scope is already open')
@autopilot_logging.log_action(logger.info)
def _scroll_to_left_scope(self):
original_index = self.dash_content_list.currentIndex
dash_content = self.select_single(objectName="dashContent")
x, y, width, height = dash_content.globalRect
# Make the drag range be a multiple of the drag "rate" value.
# Workarounds https://bugs.launchpad.net/mir/+bug/1399690
rate = 5
divisions = 5
jump = (width / divisions) // rate * rate
start_x = x + jump
stop_x = x + jump * (divisions - 1)
start_y = stop_y = y + 1
self.pointing_device.drag(start_x, start_y, stop_x, stop_y, rate)
self.dash_content_list.currentIndex.wait_for(original_index - 1)
@autopilot_logging.log_action(logger.info)
def _scroll_to_right_scope(self):
original_index = self.dash_content_list.currentIndex
dash_content = self.select_single(objectName="dashContent")
x, y, width, height = dash_content.globalRect
# Make the drag range be a multiple of the drag "rate" value.
# Workarounds https://bugs.launchpad.net/mir/+bug/1399690
rate = 5
divisions = 5
jump = (width / divisions) // rate * rate
start_x = x + jump * (divisions - 1)
stop_x = x + jump
start_y = stop_y = y + 1
self.pointing_device.drag(start_x, start_y, stop_x, stop_y, rate)
self.dash_content_list.currentIndex.wait_for(original_index + 1)
def enter_search_query(self, query, keyboard):
current_header = self._get_current_page_header()
search_button = \
current_header.select_single(objectName="search_header_button")
self.pointing_device.move(
search_button.globalRect.x + search_button.width / 2,
search_button.globalRect.y + search_button.height / 2)
self.pointing_device.click()
headerContainer = current_header.select_single(
objectName="headerContainer")
headerContainer.contentY.wait_for(0)
keyboard.type(query)
self.select_single(
objectName="processingIndicator").visible.wait_for(False)
def get_search_text_field(self):
page_header = self._get_current_page_header()
return page_header.select_single(objectName='searchTextField')
def _get_current_page_header(self):
dashContentList = self.select_single(objectName="dashContentList")
all_headers = dashContentList.select_many("QQuickLoader")
for i in all_headers:
if i.isCurrent:
return i.select_single(objectName="scopePageHeader")
return None
class ListViewWithPageHeader(ubuntuuitoolkit.QQuickFlickable):
margin_to_swipe_from_bottom = ubuntuuitoolkit.units.gu(4)
class GenericScopeView(ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase):
"""Autopilot helper for generic scopes."""
@autopilot_logging.log_action(logger.info)
def open_preview(self, category, app_name, press_duration=0.10):
"""Open the preview of an application.
:parameter category: The name of the category where the application is.
:parameter app_name: The name of the application.
:return: The opened preview.
"""
# FIXME some categories need a long press in order to see the preview.
# Some categories do not show previews, like recent apps.
# --elopio - 2014-1-14
self.click_scope_item(category, app_name, press_duration)
preview_list = self.wait_select_single(
'QQuickLoader', objectName='subPageLoader')
preview_list.subPageShown.wait_for(True)
preview_list.x.wait_for(0)
self.get_root_instance().select_single(
objectName='processingIndicator').visible.wait_for(False)
return preview_list.select_single(
Preview, objectName='preview{}'.format(
preview_list.initialIndex))
@autopilot_logging.log_action(logger.debug)
def click_scope_item(self, category, title, press_duration=0.10):
"""Click an item from the scope.
:parameter category: The name of the category where the item is.
:parameter title: The title of the item.
"""
category_element = self._get_category_element(category)
icon = category_element.wait_select_single(
'AbstractButton', title=title)
list_view = self.select_single(
ListViewWithPageHeader, objectName='categoryListView')
list_view.swipe_child_into_view(icon)
self.pointing_device.click_object(icon, press_duration=press_duration)
def _get_category_element(self, category):
try:
return self.wait_select_single(
'DashCategoryBase',
objectName='dashCategory{}'.format(category))
except dbus.StateNotFoundError:
raise unity8.UnityException(
'No category found with name {}'.format(category))
def get_applications(self, category):
"""Return the list of applications on a category.
:parameter category: The name of the category.
"""
category_element = self._get_category_element(category)
see_all = category_element.select_single(objectName='seeAll')
application_cards = category_element.select_many('AbstractButton')
application_cards = sorted(
(card for card in application_cards
if card.globalRect.y < see_all.globalRect.y),
key=lambda card: (card.globalRect.y, card.globalRect.x))
result = []
for card in application_cards:
if card.objectName not in ('cardToolCard', 'seeAll'):
result.append(card.title)
return result
class Preview(ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase):
"""Autopilot custom proxy object for generic previews."""
| gpl-3.0 | -6,452,512,887,664,993,000 | 38.05283 | 79 | 0.648179 | false |
zitkino/zitkino.cz | zitkino/mongo.py | 1 | 5892 | # -*- coding: utf-8 -*-
"""Base MongoDB models (heavily inspired by Flask-MongoEngine)"""
import itertools
from collections import Mapping, OrderedDict
import mongoengine
from flask import abort
from mongoengine import ValidationError
from mongoengine.base.fields import BaseField
from mongoengine.queryset import MultipleObjectsReturned, DoesNotExist
### Base MongoEngine adapter
def _include_mongoengine(obj):
for module in mongoengine, mongoengine.fields:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
class MongoEngine(object):
def __init__(self, app=None):
_include_mongoengine(self)
self.Document = Document
self.SaveOverwriteMixin = SaveOverwriteMixin
self.TagsField = TagsField
if app is not None:
self.init_app(app)
def init_app(self, app):
conn_settings = {
'db': app.config.get('MONGODB_DB', None),
'username': app.config.get('MONGODB_USERNAME', None),
'password': app.config.get('MONGODB_PASSWORD', None),
'host': app.config.get('MONGODB_HOST', None),
'port': int(app.config.get('MONGODB_PORT') or 0) or None
}
conn_settings = dict([(k, v) for k, v in conn_settings.items() if v])
# lazy connection
mongoengine.register_connection(
mongoengine.DEFAULT_CONNECTION_NAME,
conn_settings.pop('db', None),
**conn_settings
)
### Custom QuerySet
class QuerySet(mongoengine.queryset.QuerySet):
"""A base queryset with handy extras."""
def get_or_404(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except (MultipleObjectsReturned, DoesNotExist, ValidationError):
abort(404)
def first_or_404(self):
obj = self.first()
if obj is None:
abort(404)
return obj
### Custom model base class
class Document(mongoengine.Document):
meta = {'abstract': True,
'queryset_class': QuerySet}
class SaveOverwriteMixin(object):
meta = {'abstract': True}
def _has_field(self, attr):
"""Checks existence of given model attribute. Attribute can
be provided also in form of nested dot or double underscore notation.
"""
try:
self._get_field(attr)
except AttributeError:
return False
return True
def _get_field(self, attr):
"""Returns field object for given model attribute.
Attribute can be provided also in form of nested dot or
double underscore notation.
"""
obj = self.__class__
for part in attr.replace('__', '.').split('.'):
obj = getattr(getattr(obj, 'document_type', obj), part)
return obj
def _get_value(self, attr):
"""Returns value of given model attribute. Attribute can be provided
also in form of nested dot or double underscore notation.
"""
obj = self
for part in attr.replace('__', '.').split('.'):
obj = getattr(obj, part)
return obj
@property
def _unique_values(self):
"""Provides dictionary of unique attributes and their values. Nested
unique attributes are returned in double underscore notation.
"""
fields = {}
for key in self._data.keys():
if self._has_field(key):
field = self._get_field(key)
if field.unique:
fields[key] = self._get_value(key)
for key in (field.unique_with or []):
fields[key.replace('.', '__')] = self._get_value(key)
# else there were changes in model, continue
return fields
def save_overwrite(self, exclude=None, validate=True, clean=True):
"""Inserts or updates, depends on unique fields.
:param exclude: Iterable of field names to be excluded from
inserting/updating (so they'll never be saved
and their existing values are never overwritten).
"""
cls = self.__class__ # model class
if validate:
self.validate(clean=clean)
# get all unique fields
unique_values = self._unique_values
if not len(unique_values):
raise ValidationError('There are no unique constraints.')
# prepare data to set
exclude = frozenset(list(exclude or []) + ['id'])
data = {}
for key, value in self._data.items():
if not self._has_field(key):
continue
if key in exclude:
continue
if value is not None:
value = self._get_field(key).to_mongo(value)
data['set__' + key] = value
# select the object by its unique fields, perform upsert
cls.objects.filter(**unique_values).update_one(upsert=True, **data)
# set id (not very atomic...)
self.id = cls.objects.get(**unique_values).id
### Custom fields
class TagsField(BaseField):
"""Dealing with the fact that MongoDB's dict keys may not
contain "." or "$" characters - storing tags serialized as a list.
"""
def validate(self, value):
if not isinstance(value, dict):
self.error('Only dictionaries may be used in a tag field')
def to_mongo(self, value):
return list(itertools.chain(*value.items()))
def to_python(self, value):
tags = OrderedDict()
if isinstance(value, list):
value = zip(* (2 * [iter(value)]))
elif isinstance(value, Mapping):
value = value.items()
else:
raise TypeError
for k, v in sorted(value, key=lambda (k, v): k):
tags[k] = v
return tags
| mit | 2,080,371,165,480,691,700 | 30.010526 | 77 | 0.583503 | false |
luisera/gmpe-smtk | smtk/parsers/base_database_parser.py | 1 | 2223 | #!/usr/bin/env/python
"""
Abstract base class for a strong motion database reader
"""
import os
import abc
def get_float(xval):
"""
Returns a float value, or none
"""
if xval.strip():
return float(xval)
else:
return None
def get_int(xval):
"""
Returns an int value or none
"""
if xval.strip():
return int(xval)
else:
return None
class SMDatabaseReader(object):
"""
Abstract base class for strong motion database parser
"""
__metaclass__ = abc.ABCMeta
def __init__(self, db_id, db_name, filename):
"""
"""
self.id = db_id
self.name = db_name
self.filename = filename
self.database = None
@abc.abstractmethod
def parse(self):
"""
"""
class SMTimeSeriesReader(object):
"""
Abstract base class for a reader of a ground motion time series
"""
def __init__(self, input_files, folder_name=None, units="cm/s/s"):
"""
"""
__metaclass__ = abc.ABCMeta
self.input_files = []
for fname in input_files:
if folder_name:
filename = os.path.join(folder_name, fname)
if os.path.exists(filename):
self.input_files.append(filename)
else:
if os.path.exists(fname):
self.input_files.append(fname)
self.time_step = None
self.number_steps = None
self.units = units
@abc.abstractmethod
def parse_records(self):
"""
"""
class SMSpectraReader(object):
"""
Abstract Base Class for a reader of a ground motion spectra record
"""
def __init__(self, input_files, folder_name=None):
"""
"""
self.input_files = []
for fname in input_files:
if folder_name:
filename = os.path.join(folder_name, fname)
if os.path.exists(filename):
self.input_files.append(filename)
else:
if os.path.exists(fname):
self.input_files.append(fname)
@abc.abstractmethod
def parse_spectra(self):
"""
"""
| agpl-3.0 | 1,506,132,319,386,094,600 | 22.4 | 70 | 0.529465 | false |
teffalump/fhir | resources/observation.py | 1 | 1467 | from flask import request
from server.common import Resource
from server.resources import SearchRecords
from server.resources import ReadRecord
from server.resources import ValidateRecord
from server.resources import Routing
class OBS_Create(Resource):
def post(self):
'''Create interaction'''
return 'Not implemented', 405
class OBS_Search(Resource):
def get(self):
'''Search interaction'''
action = SearchRecords(endpoint='observation', request=request)
return action.records
class OBS_Validate(Resource):
def post(self, log_id=None):
'''Validate interaction'''
action = ValidateRecord(endpoint='observation', record=request.data)
return action.valid
class OBS_Record(Resource):
def get(self, log_id):
'''Read interaction'''
action = ReadRecord(endpoint='observation', log_id=log_id)
return action.record
def put(self, log_id):
'''Update interaction'''
return 'Not supported', 405
def delete(self, log_id):
'''Delete interaction'''
return 'Not implemented', 405
class OBS_Version(Resource):
def get(self, log_id, v_id=None):
'''Vread interaction'''
return 'Not supported', 405
routing = Routing('Observation')
routing['create'] = OBS_Create
routing['search'] = OBS_Search
routing['validate'] = OBS_Validate
routing['record'] = OBS_Record
routing['version'] = OBS_Version
__all__=['routing']
| gpl-3.0 | 9,101,011,486,581,953,000 | 27.211538 | 76 | 0.670757 | false |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/analog_in/analog_in.py | 1 | 5248 | # -*- coding: utf-8 -*-
"""
Analog In Plugin
Copyright (C) 2011-2012 Olaf Lüke <[email protected]>
Copyright (C) 2014-2016 Matthias Bolte <[email protected]>
analog_in.py: Analog In Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QVBoxLayout, QLabel, QHBoxLayout, QComboBox, QSpinBox, QFrame
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings.bricklet_analog_in import BrickletAnalogIn
from brickv.plot_widget import PlotWidget, CurveValueWrapper
from brickv.async_call import async_call
from brickv.callback_emulator import CallbackEmulator
from brickv.utils import format_voltage
class AnalogIn(PluginBase):
def __init__(self, *args):
super().__init__(BrickletAnalogIn, *args)
self.ai = self.device
# the firmware version of a EEPROM Bricklet can (under common circumstances)
# not change during the lifetime of an EEPROM Bricklet plugin. therefore,
# it's okay to make final decisions based on it here
self.has_range = self.firmware_version >= (2, 0, 1)
self.has_averaging = self.firmware_version >= (2, 0, 3)
self.cbe_voltage = CallbackEmulator(self,
self.ai.get_voltage,
None,
self.cb_voltage,
self.increase_error_count)
self.current_voltage = CurveValueWrapper() # float, V
plots = [('Voltage', Qt.red, self.current_voltage, format_voltage)]
self.plot_widget = PlotWidget('Voltage [V]', plots, y_resolution=0.001)
layout = QVBoxLayout(self)
layout.addWidget(self.plot_widget)
if self.has_range:
self.combo_range = QComboBox()
self.combo_range.addItem('Automatic', BrickletAnalogIn.RANGE_AUTOMATIC)
if self.has_averaging:
self.combo_range.addItem('0 - 3.30 V', BrickletAnalogIn.RANGE_UP_TO_3V)
self.combo_range.addItem('0 - 6.05 V', BrickletAnalogIn.RANGE_UP_TO_6V)
self.combo_range.addItem('0 - 10.32 V', BrickletAnalogIn.RANGE_UP_TO_10V)
self.combo_range.addItem('0 - 36.30 V', BrickletAnalogIn.RANGE_UP_TO_36V)
self.combo_range.addItem('0 - 45.00 V', BrickletAnalogIn.RANGE_UP_TO_45V)
self.combo_range.currentIndexChanged.connect(self.range_changed)
hlayout = QHBoxLayout()
hlayout.addWidget(QLabel('Range:'))
hlayout.addWidget(self.combo_range)
hlayout.addStretch()
if self.has_averaging:
self.spin_average = QSpinBox()
self.spin_average.setMinimum(0)
self.spin_average.setMaximum(255)
self.spin_average.setSingleStep(1)
self.spin_average.setValue(50)
self.spin_average.editingFinished.connect(self.spin_average_finished)
hlayout.addWidget(QLabel('Average Length:'))
hlayout.addWidget(self.spin_average)
line = QFrame()
line.setObjectName("line")
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
layout.addWidget(line)
layout.addLayout(hlayout)
def get_range_async(self, range_):
self.combo_range.setCurrentIndex(self.combo_range.findData(range_))
def get_averaging_async(self, average):
self.spin_average.setValue(average)
def start(self):
if self.has_range:
async_call(self.ai.get_range, None, self.get_range_async, self.increase_error_count)
if self.has_averaging:
async_call(self.ai.get_averaging, None, self.get_averaging_async, self.increase_error_count)
self.cbe_voltage.set_period(100)
self.plot_widget.stop = False
def stop(self):
self.cbe_voltage.set_period(0)
self.plot_widget.stop = True
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletAnalogIn.DEVICE_IDENTIFIER
def cb_voltage(self, voltage):
self.current_voltage.value = voltage / 1000.0
def range_changed(self, index):
if index >= 0 and self.has_range:
range_ = self.combo_range.itemData(index)
async_call(self.ai.set_range, range_, None, self.increase_error_count)
def spin_average_finished(self):
self.ai.set_averaging(self.spin_average.value())
| gpl-2.0 | 8,598,593,545,445,375,000 | 37.580882 | 104 | 0.650276 | false |
hickerson/bbn | fable/fable_sources/libtbx/easy_profile.py | 1 | 1160 | from __future__ import division
class easy_profile(object):
""" cProfile.Profile easy-to-use wrapper """
def __init__(self, func, file_name, func_name, line, runs=1):
""" Profiling of the callable func.
file_name, func_name, line shall tell where func is defined.
runs is the number of calls which will be performed
"""
import cProfile
self.prof = cProfile.Profile()
self.func = func
self.file_name, self.func_name, self.line = file_name, func_name, line
self.runs = runs
def time(self, *args, **kwds):
""" Time spent per-call in self.func(*args, **kwds) """
for i in xrange(self.runs):
self.prof.runcall(self.func, *args, **kwds)
self.prof.create_stats()
for (file_name, line, func), data in self.prof.stats.iteritems():
if self.file_name is not None:
if not file_name.endswith(self.file_name): continue
if self.func_name is not None:
if func != self.func_name: continue
if self.line is not None:
if line != self.line: continue
break
else:
return None
calls = data[0]
cumulative = data[3]
t = cumulative/calls
return t
| mit | 1,924,395,093,234,190,600 | 33.117647 | 74 | 0.634483 | false |
rigdenlab/SIMBAD | simbad/db/tests/test_db.py | 1 | 4044 | """Test functions for simbad.db"""
__author__ = "Adam Simpkin"
__date__ = "19 Jan 2018"
import os
import unittest
import simbad
import simbad.db
try:
ROOT_DIR = SHARE_DIR = os.environ['SIMBAD_ROOT']
EXAMPLE_DIR = os.path.join(ROOT_DIR, "test_data")
except KeyError:
from simbad.command_line import CCP4RootDirectory
ROOT_DIR = str(CCP4RootDirectory())
SHARE_DIR = os.path.join(ROOT_DIR, "share", "simbad")
EXAMPLE_DIR = os.path.join(ROOT_DIR, "examples")
class Test(unittest.TestCase):
"""Unit test"""
def test_from_dat(self):
"""Test case for simbad.db._from_dat"""
input_dat = os.path.join(SHARE_DIR, "static", "contaminants", "CHICK", "LYSC_CHICK", "P6122", "2fbb.dat")
with open(input_dat, "r") as f_in:
output_str = simbad.db._from_dat(f_in)
data = output_str.split("\n")[0]
reference_data = "HEADER HYDROLASE 09-DEC-05 2FBB "
self.assertEqual(data, reference_data)
def test_to_dat(self):
"""Test case for simbad.db._to_dat"""
input_pdb = os.path.join(EXAMPLE_DIR, "toxd", "toxd.pdb")
output_dat = os.path.join(os.getcwd(), "test.dat")
with open(input_pdb, "r") as f_in, open(output_dat, "wb") as f_out:
f_out.write(simbad.db._to_dat(f_in))
self.assertTrue(simbad.db.is_valid_dat(output_dat))
with open(input_pdb, "r") as f_in:
input_str = f_in.read().split("\n")[0]
output_str = simbad.db.read_dat(output_dat).split("\n")[0]
self.assertEqual(input_str, output_str)
os.unlink(output_dat)
def test_str_to_dat(self):
"""Test case for simbad.db._str_to_dat"""
input_str = "TEST"
output_dat = os.path.join(os.getcwd(), "test.dat")
with open(output_dat, "wb") as f_out:
f_out.write(simbad.db._str_to_dat(input_str))
self.assertTrue(simbad.db.is_valid_dat(output_dat))
output_str = simbad.db.read_dat(output_dat)
self.assertEqual(input_str, output_str)
os.unlink(output_dat)
def test_find_simbad_dat_files(self):
"""Test case for simbad.db.find_simbad_dat_files"""
test_dat_db = os.path.join(SHARE_DIR, "static", "contaminants", "CHICK", "LYSC_CHICK", "P6122")
data = os.path.basename(simbad.db.find_simbad_dat_files(test_dat_db)[0])
reference_data = "2fbb.dat"
self.assertEqual(data, reference_data)
def test_convert_pdb_to_dat(self):
"""Test case for simbad.db.convert_pdb_to_dat"""
input_pdb = os.path.join(EXAMPLE_DIR, "toxd", "toxd.pdb")
output_dat = os.path.join(os.getcwd(), "test.dat")
simbad.db.convert_pdb_to_dat(input_pdb, output_dat)
self.assertTrue(os.path.isfile(output_dat))
self.assertTrue(simbad.db.is_valid_dat(output_dat))
os.unlink(output_dat)
def test_convert_dat_to_pdb(self):
"""Test case for simbad.db.convert_dat_to_pdb"""
input_dat = os.path.join(SHARE_DIR, "static", "contaminants", "CHICK", "LYSC_CHICK", "P6122", "2fbb.dat")
output_pdb = os.path.join(os.getcwd(), "test.pdb")
simbad.db.convert_dat_to_pdb(input_dat, output_pdb)
self.assertTrue(os.path.isfile(output_pdb))
os.unlink(output_pdb)
def test_is_valid_dat(self):
"""Test case for simbad.db.is_valid_dat"""
input_dat = os.path.join(SHARE_DIR, "static", "contaminants", "CHICK", "LYSC_CHICK", "P6122", "2fbb.dat")
data = simbad.db.is_valid_dat(input_dat)
self.assertTrue(data)
def test_read_dat(self):
"""Test case for simbad.db.read_dat"""
input_dat = os.path.join(SHARE_DIR, "static", "contaminants", "CHICK", "LYSC_CHICK", "P6122", "2fbb.dat")
output_str = simbad.db.read_dat(input_dat)
data = output_str.split("\n")[0]
reference_data = "HEADER HYDROLASE 09-DEC-05 2FBB "
self.assertEqual(data, reference_data)
| bsd-3-clause | -4,571,492,509,324,189,000 | 35.107143 | 113 | 0.59545 | false |
Diti24/python-ivi | ivi/agilent/hprtl.py | 1 | 13485 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import io
import struct
import numpy as np
def parse_hprtl(rtl_file):
"""Convert HP Raster Transfer Language (RTL) to numpy array"""
color = 1
width = 0
byte_width = 0
height = 0
compression = 0
current_row = 0
plane_cnt = 1
current_plane = 0
resolution = 1
plane_data = None
in_raster = False
red = 0
green = 0
blue = 0
color_list = [
(255, 255, 255), # white
( 0, 0, 0) # black
]
if type(rtl_file) == str:
rtlf = open(rtl_file, 'rb')
else:
rtlf = rtl_file
while True:
s = rtlf.read(1)
if len(s) == 0:
break
if s[0] != 0x1b:
continue
s = rtlf.read(1)
if len(s) == 0:
break
if s[0] == ord('*'):
# valid ESC* command
# read [letter][numbers][letter]
cmd = rtlf.read(2)
while True:
if (cmd[-1] < ord('0') or cmd[-1] > ord('9')) and cmd[-1] != ord('-'):
break
s = rtlf.read(1)
# ignore null bytes
if s[0] != 0:
cmd += s
ca = cmd[0]
cb = cmd[-1]
#print(cmd)
if ca == ord('r') and (cb == ord('u') or cb == ord('U')):
# color command *r#u or *r#U
color = int(cmd[1:-1])
if color == -4:
# KCMY
plane_cnt = 4
color_list = [
(255, 255, 255), # white
(127, 127, 127), # white
( 0, 255, 255), # cyan
( 0, 127, 127), # cyan
(255, 0, 255), # magenta
(127, 0, 127), # magenta
( 0, 0, 255), # blue
( 0, 0, 127), # blue
(255, 255, 0), # yellow
(127, 127, 0), # yellow
( 0, 255, 0), # green
( 0, 127, 0), # green
(255, 0, 0), # red
(127, 0, 0), # red
( 63, 63, 63), # black
( 0, 0, 0) # black
]
elif color == -3:
# CMY
plane_cnt = 3
color_list = [
(255, 255, 255), # white
( 0, 255, 255), # cyan
(255, 0, 255), # magenta
( 0, 0, 255), # blue
(255, 255, 0), # yellow
( 0, 255, 0), # green
(255, 0, 0), # red
( 0, 0, 0) # black
]
elif color == 1:
# K
plane_cnt = 1
color_list = [
(255, 255, 255), # white
( 0, 0, 0) # black
]
elif color == 3:
# RGB
plane_cnt = 3
color_list = [
( 0, 0, 0), # black
(255, 0, 0), # red
( 0, 255, 0), # green
(255, 255, 0), # yellow
( 0, 0, 255), # blue
(255, 0, 255), # magenta
( 0, 255, 255), # cyan
(255, 255, 255) # white
]
elif color == 4:
# indexed RGB
plane_cnt = 4
color_list = [
( 0, 0, 0), # black
( 0, 0, 0), # black
(127, 0, 0), # red
(255, 0, 0), # red
( 0, 127, 0), # green
( 0, 255, 0), # green
(127, 127, 0), # yellow
(255, 255, 0), # yellow
( 0, 0, 127), # blue
( 0, 0, 255), # blue
(127, 0, 127), # magenta
(255, 0, 255), # magenta
( 0, 127, 127), # cyan
( 0, 255, 255), # cyan
(127, 127, 127), # white
(255, 255, 255) # white
]
else:
raise Exception("Invalid color")
elif ca == ord('r') and (cb == ord('a') or cb == ord('A')):
# start raster graphics
# if we missed the stop of one section, stop on the start of the next
if in_raster:
in_raster = False
# only grab the first section
if height == 0:
in_raster = True
elif ca == ord('r') and (cb == ord('c') or cb == ord('C')):
# end raster graphics
in_raster = False
elif ca == ord('r') and (cb == ord('b') or cb == ord('B')):
# unknown
pass
elif ca == ord('r') and (cb == ord('s') or cb == ord('S')):
# raster width
width = int(cmd[1:-1])
byte_width = int((width+7)/8)
elif ca == ord('r') and (cb == ord('t') or cb == ord('T')):
# raster height
#height = int(cmd[1:-1])
pass
elif ca == ord('b') and (cb == ord('m') or cb == ord('M')):
# set compression
compression = int(cmd[1:-1])
elif ca == ord('t') and (cb == ord('r') or cb == ord('R')):
# set resolution
resolution = int(cmd[1:-1])
elif ca == ord('v') and (cb == ord('a') or cb == ord('A')):
# set red component
red = int(cmd[1:-1])
elif ca == ord('v') and (cb == ord('b') or cb == ord('B')):
# set green component
green = int(cmd[1:-1])
elif ca == ord('v') and (cb == ord('c') or cb == ord('C')):
# set blue component
blue = int(cmd[1:-1])
elif ca == ord('v') and (cb == ord('i') or cb == ord('I')):
# assign index
ind = int(cmd[1:-1])
color_list[ind] = (red, green, blue)
elif ca == ord('b') and (cb == ord('v') or cb == ord('V') or cb == ord('w') or cb == ord('W')):
# image row
l = int(cmd[1:-1])
# read row
d = rtlf.read(l)
# skip if we are not in a raster section
if not in_raster:
continue
# set width if not yet set
# width must be set if compression enabled, otherwise
# all lines will be the same length
if width == 0:
width = l * 8
if byte_width == 0:
byte_width = l
# add row if on first plane
if current_plane == 0:
if height == 0:
plane_data = np.zeros((10, byte_width, plane_cnt), dtype=np.uint8)
height += 1
if height >= plane_data.shape[0]:
# need to add more rows
plane_data = np.append(plane_data, np.zeros((10, byte_width, plane_cnt), dtype=np.uint8), 0)
if compression == 0 or compression == 1:
x = 0
for b in d:
plane_data[height-1][x][current_plane] = b
x += 1
elif compression == 2:
k = 0
x = 0
while True:
if len(d) <= k:
break
h = d[k]
k += 1
if h == 128:
continue
if h < 128:
for j in range(h+1):
b = d[k]
k += 1
plane_data[height-1][x][current_plane] = b
x += 1
if h > 128:
b = d[k]
k += 1
for j in range(257-h):
plane_data[height-1][x][current_plane] = b
x += 1
else:
raise Exception("Invalid compression")
# go to next plane, if more than one plane
if plane_cnt > 0:
current_plane += 1
if current_plane == plane_cnt or cb == ord('w') or cb == ord('W'):
current_plane = 0
# convert to bits
plane_data = np.unpackbits(plane_data, axis=1)
# strip off extra rows
plane_data = plane_data[0:height, 0:width, :]
# convert plane data to RGB
plane_data = np.right_shift(np.packbits(plane_data, axis=2), 8-plane_cnt)
rgb_data = np.zeros((height, width, 3), dtype=np.uint8)
for y in range(height):
for x in range(width):
rgb_data[y][x] = color_list[plane_data[y][x][0]]
plane_data = rgb_data
return plane_data
def generate_bmp(img_data):
"""Generate a BMP format image from a numpy array"""
bmp = io.BytesIO()
width = img_data.shape[1]
height = img_data.shape[0]
if img_data.shape[2] == 1:
# monochrome
bpp = 1
color_table_entries = 2
else:
# rgb
bpp = 24
color_table_entries = 0
row_size = int((bpp*width + 31)/32)*4
image_size = row_size * height
header_size = 14+40
color_table_size = color_table_entries*4
image_offset = header_size+color_table_size
file_size = image_offset+image_size
# bitmap header
bmp.write(b'BM')
bmp.write(struct.pack('<L', file_size)) # file size
bmp.write(struct.pack('<H', 0)) # reserved
bmp.write(struct.pack('<H', 0)) # reserved
bmp.write(struct.pack('<L', image_offset)) # offset to bitmap data
# bitmapinfoheader
bmp.write(struct.pack('<L', 40)) # size of header
bmp.write(struct.pack('<l', width)) # image width
bmp.write(struct.pack('<l', height)) # image height
bmp.write(struct.pack('<H', 1)) # number of color planes
bmp.write(struct.pack('<H', bpp)) # bits per pixel
bmp.write(struct.pack('<L', 0)) # compression method
bmp.write(struct.pack('<L', image_size)) # image size
bmp.write(struct.pack('<L', 1)) # horizontal resolution
bmp.write(struct.pack('<L', 1)) # vertical resolution
bmp.write(struct.pack('<L', color_table_entries)) # number of colors in palette (0 = 2^n)
bmp.write(struct.pack('<L', 0)) # number of important colors in palette (0 = all)
if img_data.shape[2] == 1:
# monochrome
# color table
bmp.write(struct.pack('<BBBx', 255, 255, 255)) # color 0 red, green, blue
bmp.write(struct.pack('<BBBx', 0, 0, 0)) # color 1 red, green, blue
# image data
plane_data = np.packbits(img_data, axis=1)
for y in range(plane_data.shape[0]-1, -1, -1):
for x in range(plane_data.shape[1]):
bmp.write(struct.pack('<B', plane_data[y][x][0]))
if plane_data.shape[1] % 4 > 0:
for x in range(4 - (plane_data.shape[1] % 4)):
bmp.write(b'\0')
else:
# rgb
# color table
# no color table for RGB
# image data
for y in range(img_data.shape[0]-1, -1, -1):
for x in range(img_data.shape[1]):
bmp.write(struct.pack('<BBB', img_data[y][x][2], img_data[y][x][1], img_data[y][x][0]))
if (img_data.shape[1]*3) % 4 > 0:
for x in range(4 - ((img_data.shape[1]*3) % 4)):
bmp.write(b'\0')
return bmp.getvalue()
| mit | 4,937,023,965,023,341,000 | 34.486842 | 116 | 0.415647 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.