repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
endlessm/chromium-browser | third_party/catapult/telemetry/telemetry/timeline/thread.py | 1 | 9786 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.async_slice as async_slice_module
import telemetry.timeline.event_container as event_container
import telemetry.timeline.flow_event as flow_event_module
import telemetry.timeline.sample as sample_module
import telemetry.timeline.slice as slice_module
class Thread(event_container.TimelineEventContainer):
"""A Thread stores all the trace events collected for a particular
thread. We organize the synchronous slices on a thread by "subrows," where
subrow 0 has all the root slices, subrow 1 those nested 1 deep, and so on.
The asynchronous slices are stored in an AsyncSliceGroup object.
"""
def __init__(self, process, tid):
super(Thread, self).__init__('thread %s' % tid, parent=process)
self.tid = tid
self._async_slices = []
self._flow_events = []
self._samples = []
self._toplevel_slices = []
self._all_slices = []
# State only valid during import.
self._open_slices = []
self._newly_added_slices = []
@property
def toplevel_slices(self):
return self._toplevel_slices
@property
def all_slices(self):
return self._all_slices
@property
def samples(self):
return self._samples
@property
def async_slices(self):
return self._async_slices
@property
def open_slice_count(self):
return len(self._open_slices)
def IterChildContainers(self):
return
yield # pylint: disable=unreachable
def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
if event_type_predicate(slice_module.Slice):
for s in self._newly_added_slices:
if event_predicate(s):
yield s
for s in self._all_slices:
if event_predicate(s):
yield s
if event_type_predicate(async_slice_module.AsyncSlice):
for async_slice in self._async_slices:
if event_predicate(async_slice):
yield async_slice
for sub_slice in async_slice.IterEventsInThisContainerRecrusively():
if event_predicate(sub_slice):
yield sub_slice
if event_type_predicate(flow_event_module.FlowEvent):
for flow_event in self._flow_events:
if event_predicate(flow_event):
yield flow_event
if event_type_predicate(sample_module.Sample):
for sample in self._samples:
if event_predicate(sample):
yield sample
def AddSample(self, category, name, timestamp, args=None):
if len(self._samples) and timestamp < self._samples[-1].start:
raise ValueError(
'Samples must be added in increasing timestamp order')
sample = sample_module.Sample(
self, category, name, timestamp, args=args)
self._samples.append(sample)
def AddAsyncSlice(self, async_slice):
self._async_slices.append(async_slice)
def AddFlowEvent(self, flow_event):
self._flow_events.append(flow_event)
def BeginSlice(self, category, name, timestamp, thread_timestamp=None,
args=None):
"""Opens a new slice for the thread.
Calls to beginSlice and endSlice must be made with
non-monotonically-decreasing timestamps.
* category: Category to which the slice belongs.
* name: Name of the slice to add.
* timestamp: The timetsamp of the slice, in milliseconds.
* thread_timestamp: Thread specific clock (scheduled) timestamp of the
slice, in milliseconds.
* args: Arguments associated with
Returns newly opened slice
"""
if len(self._open_slices) > 0 and timestamp < self._open_slices[-1].start:
raise ValueError(
'Slices must be added in increasing timestamp order')
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self._open_slices.append(new_slice)
new_slice.did_not_finish = True
self.PushSlice(new_slice)
return new_slice
def EndSlice(self, end_timestamp, end_thread_timestamp=None):
""" Ends the last begun slice in this group and pushes it onto the slice
array.
* end_timestamp: Timestamp when the slice ended in milliseconds
* end_thread_timestamp: Timestamp when the scheduled time of the slice ended
in milliseconds
returns completed slice.
"""
if not len(self._open_slices):
raise ValueError(
'EndSlice called without an open slice')
curr_slice = self._open_slices.pop()
if end_timestamp < curr_slice.start:
raise ValueError(
'Slice %s end time is before its start.' % curr_slice.name)
curr_slice.duration = end_timestamp - curr_slice.start
# On Windows, it is possible to have a value for |end_thread_timestamp|
# but not for |curr_slice.thread_start|, because it takes some time to
# initialize the thread time timer.
if curr_slice.thread_start != None and end_thread_timestamp != None:
curr_slice.thread_duration = (end_thread_timestamp -
curr_slice.thread_start)
curr_slice.did_not_finish = False
return curr_slice
def PushCompleteSlice(self, category, name, timestamp, duration,
thread_timestamp, thread_duration, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
if duration is None:
new_slice.did_not_finish = True
else:
new_slice.duration = duration
new_slice.thread_duration = thread_duration
self.PushSlice(new_slice)
return new_slice
def PushMarkSlice(
self, category, name, timestamp, thread_timestamp, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self.PushSlice(new_slice)
return new_slice
def PushSlice(self, new_slice):
self._newly_added_slices.append(new_slice)
return new_slice
def AutoCloseOpenSlices(self, max_timestamp, max_thread_timestamp):
for s in self._newly_added_slices:
if s.did_not_finish:
s.duration = max_timestamp - s.start
assert s.duration >= 0
if s.thread_start != None:
s.thread_duration = max_thread_timestamp - s.thread_start
assert s.thread_duration >= 0
self._open_slices = []
def IsTimestampValidForBeginOrEnd(self, timestamp):
if not len(self._open_slices):
return True
return timestamp >= self._open_slices[-1].start
def FinalizeImport(self):
self._BuildSliceSubRows()
def _BuildSliceSubRows(self):
"""This function works by walking through slices by start time.
The basic idea here is to insert each slice as deep into the subrow
list as it can go such that every subslice is fully contained by its
parent slice.
Visually, if we start with this:
0: [ a ]
1: [ b ]
2: [c][d]
To place this slice:
[e]
We first check row 2's last item, [d]. [e] wont fit into [d] (they dont
even intersect). So we go to row 1. That gives us [b], and [d] wont fit
into that either. So, we go to row 0 and its last slice, [a]. That can
completely contain [e], so that means we should add [e] as a subslice
of [a]. That puts it on row 1, yielding:
0: [ a ]
1: [ b ][e]
2: [c][d]
If we then get this slice:
[f]
We do the same deepest-to-shallowest walk of the subrows trying to fit
it. This time, it doesn't fit in any open slice. So, we simply append
it to row 0 (a root slice):
0: [ a ] [f]
1: [ b ][e]
"""
def CompareSlices(s1, s2):
if s1.start == s2.start:
# Break ties by having the slice with the greatest
# end timestamp come first.
return cmp(s2.end, s1.end)
return cmp(s1.start, s2.start)
assert len(self._toplevel_slices) == 0
assert len(self._all_slices) == 0
if not len(self._newly_added_slices):
return
self._all_slices.extend(self._newly_added_slices)
sorted_slices = sorted(self._newly_added_slices, cmp=CompareSlices)
root_slice = sorted_slices[0]
self._toplevel_slices.append(root_slice)
for s in sorted_slices[1:]:
if not self._AddSliceIfBounds(root_slice, s):
root_slice = s
self._toplevel_slices.append(root_slice)
self._newly_added_slices = []
def _AddSliceIfBounds(self, root, child):
"""Adds a child slice to a root slice its proper row.
Return False if the child slice is not in the bounds
of the root slice.
Because we know that the start time of child is >= the start time
of all other slices seen so far, we can just check the last slice
of each row for bounding.
"""
# The source trace data is in microseconds but we store it as milliseconds
# in floating-point. Since we can't represent micros as millis perfectly,
# two end=start+duration combos that should be the same will be slightly
# different. Round back to micros to ensure equality below.
child_end_micros = round(child.end * 1000)
root_end_micros = round(root.end * 1000)
if child.start >= root.start and child_end_micros <= root_end_micros:
if len(root.sub_slices) > 0:
if self._AddSliceIfBounds(root.sub_slices[-1], child):
return True
child.parent_slice = root
root.AddSubSlice(child)
return True
return False
| bsd-3-clause | -2,363,025,247,585,198,600 | 35.514925 | 80 | 0.647456 | false |
saguas/jasper_erpnext_report | jasper_erpnext_report/utils/jasper_iter_hooks.py | 1 | 1437 | from __future__ import unicode_literals
__author__ = 'luissaguas'
import frappe
from frappe import _
"""
HOOKS:
jasper_after_sendmail(data, url, file_name, file_path); jasper_before_sendmail(data, file_name, output, url, **kargs);
jasper_after_get_report(file_name, file_output, url, filepath); jasper_before_get_report(data);
jasper_after_list_for_doctype(doctype, docnames, report, lista); jasper_before_list_for_doctype(doctype, docnames, report);
jasper_after_list_for_all(lista); jasper_before_list_for_all();
jasper_scriptlet(JavaScriptlet, ids=None, data=None, cols=None, doctype=None, docname=None);
"""
class JasperHooks:
def __init__(self, hook_name, docname=None, fallback=None):
self.hook_name = hook_name
self.current = 0
self.methods = frappe.get_hooks().get(self.hook_name) or (fallback if fallback is not None else [])
if isinstance(self.methods, dict):
if docname in self.methods.keys():
self.methods = self.methods[docname]
else:
self.methods = fallback if fallback is not None else []
self.methods_len = len(self.methods)
def __iter__(self):
return self
def next(self):
if self.current >= self.methods_len:
raise StopIteration
else:
return self.get_next_jasper_hook_method()
def get_next_jasper_hook_method(self):
if self.methods_len > 0:
curr_method = frappe.get_attr(self.methods[self.current])
self.current += 1
return curr_method
return None
| mit | 6,848,915,279,601,715,000 | 30.933333 | 125 | 0.712596 | false |
MySmile/sfchat | apps/csp/tests/test_view.py | 1 | 2904 | import unittest
import json
from django.test.client import Client
class CSPReportTest(unittest.TestCase):
def setUp(self):
self.client = Client()
self.csp_report_valid = {"csp-report":
{"blocked-uri":"self",
"document-uri":"https://sfchat.mysmile.com.ua/",
"original-policy":"default-src https://sfchat.mysmile.com.ua data: https://www.google-analytics.com; script-src https://sfchat.mysmile.com.ua https://www.google-analytics.com; object-src 'none'; style-src https://sfchat.mysmile.com.ua 'unsafe-inline'; img-src https://sfchat.mysmile.com.ua data: https://www.google-analytics.com; media-src 'none'; frame-src 'none'; font-src 'none'; connect-src https://sfchat.mysmile.com.ua https://www.google-analytics.com; report-uri https://sfchat.mysmile.com.ua/csp-report/",
"referrer":"https://sfchat.mysmile.com.ua/chat/5587085c55e430296d487d11",
"violated-directive":"script-src https://sfchat.mysmile.com.ua https://www.google-analytics.com"
}
}
self.csp_report_invalid = {"csp":{}}
def test_view(self):
response = self.client.post('/csp-report/',
json.dumps(self.csp_report_valid),
content_type='application/csp-report',
HTTP_USER_AGENT='Mozilla/5.0',
follow=True,
secure=True)
self.assertEqual(response.status_code, 204)
def test_view_header_failed(self):
lambda_validate_header = lambda: self.client.post('/csp-report/',
json.dumps(self.csp_report_valid),
content_type='application/json',
HTTP_USER_AGENT='Mozilla/5.0',
follow=True,
secure=True)
self.assertRaises(AttributeError, lambda_validate_header)
def test_view_json_failed(self):
lambda_validate_json = lambda: self.client.post('/csp-report/',
json.dumps(self.csp_report_invalid),
content_type='application/csp-report',
HTTP_USER_AGENT='Mozilla/5.0',
follow=True,
secure=True)
self.assertRaises(AttributeError, lambda_validate_json)
| bsd-3-clause | 8,138,226,308,682,014,000 | 62.130435 | 551 | 0.46832 | false |
tbtraltaa/medianshape | medianshape/simplicial/surfgen.py | 1 | 10038 | # encoding: utf-8
'''
2D surface embedded in 3D
-------------------------
'''
from __future__ import absolute_import
import importlib
import os
import numpy as np
from medianshape.simplicial import pointgen3d, mesh, utils
from medianshape.simplicial.meshgen import meshgen2d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from medianshape.viz import plot2d, plot3d
from distmesh.plotting import axes_simpplot3d
from medianshape.simplicial.utils import boundary_points
def func(x, y, sign=1):
'''
:math:`\sin\pi x \cos \pi y`.
'''
return np.sin(np.pi*x)*np.cos(np.pi*y)
def sample_surf(scale, step=0.2):
'''
Returns a tuple X, Y, Z of a surface for an experiment.
'''
x = y = np.arange(-4.0, 4.0, step)
X, Y = np.meshgrid(x, y)
from matplotlib.mlab import bivariate_normal
'''
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z3 = bivariate_normal(X, Y, 1, 1, -2, -2)
Z = Z2 - Z1
'''
# Ups
ZU1 = bivariate_normal(X,Y, 1.5, 1, 0,-2)
ZU2 = bivariate_normal(X, Y, 1.5, 1.5, 4, 1)
ZU3 = bivariate_normal(X, Y, 1, 1, -4, 1)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 4, 0.5, 0, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1.5, 1, 0, 1)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -4, -2)
ZD3 = bivariate_normal(X, Y, 1, 1, 4, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, 4)
Z1 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax1 = np.abs(np.amax(Z1))
Z1 = Z1/Zmax1 * scale[2]
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
# Ups
ZU1 = bivariate_normal(X,Y, 2, 1, 1,1)
ZU2 = bivariate_normal(X, Y, 3, 1, -2, 4)
ZU3 = bivariate_normal(X, Y, 1.5, 1.5, -2, -2)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 2, 2, 3, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1, 2, 4, 2)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -2, 2)
ZD3 = bivariate_normal(X, Y, 1.5, 1.5, 1, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, -4)
Z2 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax2 = np.abs(np.amax(Z2))
Z2 = Z2/Zmax2 * scale[2]
X = X * scale[0]/4.0
Y = Y * scale[1]/4.0
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
return X, Y, Z1, Z2
def interpolate_surf(points, values, ipoints, method = "nearest"):
from scipy.interpolate import griddata
'''
Used to interpolate a sample surface to a surface in a mesh.
'''
return griddata(points, values, ipoints, method= method)
def surfgen_shared_boundary(bbox=[-10,-10,-10, 10,10,10], l=3):
'''
Generates two surfaces in 3D with shared boundary for an experiment.
Writes the two surface as .poly file for tetgen.
'''
# Generating point grids for two surfaces
xmin = bbox[0]
xmax = bbox[3]
ymin = bbox[1]
ymax = bbox[4]
zmin = bbox[2]
zmax = bbox[5]
Xmin, Ymin, Zmin, Xmax, Ymax, Zmax = np.array(bbox)*0.8
X, Y, Z1, Z2 = sample_surf([Xmax, Ymax, zmax*0.3], step=0.8)
Z1 = Z1 + zmax*0.4
Z2 = Z2 - zmax*0.4
#Symmertic surfs
#Z2 = -Z1 - zmax*0.4
'''
# Plotting the two surfaces
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(X, Y, Z1.reshape(-1,1), color='b')
surf = ax.scatter(X, Y, Z2.reshape(-1,1), color='r')
plt.show()
'''
mesh = meshgen2d([Xmin, Ymin, Xmax, Ymax], l, include_corners=True)
sample_points = np.hstack((X.reshape(-1,1), Y.reshape(-1,1)))
# Interpolating the surface mesh into two different surfaces
# similar to the the sample surfaces generated before
Z1 = interpolate_surf(sample_points, Z1.reshape(-1,1), mesh.points)
Z2 = interpolate_surf(sample_points, Z2.reshape(-1,1), mesh.points)
# Integrating two surfaces
points1 = np.hstack((mesh.points, Z1))
print points1.shape
points2 = np.hstack((mesh.points, Z2))
print points2.shape
corners = utils.boundary_points(bbox)
midcorners = utils.mid_corners(bbox)
offset1 = len(corners) +len(midcorners) + 1
offset2 = len(corners) + len(midcorners) + len(points1) + 1
points = np.concatenate((corners, midcorners, points1, points2), axis=0)
print points.shape
triangles1 = mesh.simplices + offset1
triangles2 = mesh.simplices + offset2
# Adding the indices of the points as the last column of the coordainate list
Xmin_s1 = np.argwhere(points1[:,0]==Xmin)
Xmin_s1_points = np.hstack((points1[Xmin_s1.reshape(-1,)], Xmin_s1))
# Sorting the indices such that the points are in increasing order of its y-component
Xmin_s1 = (Xmin_s1_points[:,3][np.argsort(Xmin_s1_points[:,1])] + offset1).astype(int)
Xmin_s2 = np.argwhere(points2[:,0]==Xmin)
Xmin_s2_points = np.hstack((points2[Xmin_s2.reshape(-1,)], Xmin_s2))
Xmin_s2 = (Xmin_s2_points[:,3][np.argsort(Xmin_s2_points[:,1])] + offset2).astype(int)
Xmax_s1 = np.argwhere(points1[:,0]==Xmax)
Xmax_s1_points = np.hstack((points1[Xmax_s1.reshape(-1,)], Xmax_s1))
Xmax_s1 = (Xmax_s1_points[:,3][np.argsort(Xmax_s1_points[:,1])] + offset1).astype(int)
Xmax_s2 = np.argwhere(points2[:,0]==Xmax)
Xmax_s2_points = np.hstack((points2[Xmax_s2.reshape(-1,)], Xmax_s2))
Xmax_s2 = (Xmax_s2_points[:,3][np.argsort(Xmax_s2_points[:,1])] + offset2).astype(int)
Ymin_s1 = np.argwhere(points1[:,1]==Ymin)
Ymin_s1_points = np.hstack((points1[Ymin_s1.reshape(-1,)], Ymin_s1))
Ymin_s1 = (Ymin_s1_points[:,3][np.argsort(Ymin_s1_points[:,0])] + offset1).astype(int)
Ymin_s2 = np.argwhere(points2[:,1]==Ymin)
Ymin_s2_points = np.hstack((points2[Ymin_s2.reshape(-1,)], Ymin_s2))
Ymin_s2 = (Ymin_s2_points[:,3][np.argsort(Ymin_s2_points[:,0])] + offset2).astype(int)
Ymax_s1 = np.argwhere(points1[:,1]==Ymax)
Ymax_s1_points = np.hstack((points1[Ymax_s1.reshape(-1,)], Ymax_s1))
Ymax_s1 = (Ymax_s1_points[:,3][np.argsort(Ymax_s1_points[:,0])] + offset1).astype(int)
Ymax_s2 = np.argwhere(points2[:,1]==Ymax)
Ymax_s2_points = np.hstack((points2[Ymax_s2.reshape(-1,)], Ymax_s2))
Ymax_s2 = (Ymax_s2_points[:,3][np.argsort(Ymax_s2_points[:,0])] + offset2).astype(int)
for i in range(len(Xmin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Xmin_s1[i], Xmin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Xmin_s1[-1], 12]))
for i in range(len(Xmin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Xmin_s2[i], Xmin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Xmin_s2[-1], 12]))
for i in range(len(Xmax_s1)-1):
triangles1 = np.vstack((triangles1, [10, Xmax_s1[i], Xmax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [10, Xmax_s1[-1], 11]))
for i in range(len(Xmax_s2)-1):
triangles2 = np.vstack((triangles2, [10, Xmax_s2[i], Xmax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [10, Xmax_s2[-1], 11]))
for i in range(len(Ymin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Ymin_s1[i], Ymin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Ymin_s1[-1], 10]))
for i in range(len(Ymin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Ymin_s2[i], Ymin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Ymin_s2[-1], 10]))
for i in range(len(Ymax_s1)-1):
triangles1 = np.vstack((triangles1, [12, Ymax_s1[i], Ymax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [12, Ymax_s1[-1], 11]))
for i in range(len(Ymax_s2)-1):
triangles2 = np.vstack((triangles2, [12, Ymax_s2[i], Ymax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [12, Ymax_s2[-1], 11]))
triangles = np.vstack((triangles1, triangles2))
# Preparing PLC and save it to .poly file for tetgen
with open( os.environ['HOME'] +'/mediansurf.poly', 'w') as f:
f.write("#Part 1 - the node list\n")
f.write("#%d nodes in 3d, no attributes, no boundary marker\n"%points.shape[0])
f.write('%d %d %d %d\n'%(points.shape[0], 3, 0,0))
for i, p in enumerate(points):
f.write("%d %f %f %f\n"%(i+1, p[0], p[1], p[2]))
# Each 4 sides has 3 polygons
# Top and bottom
# Each triangle of the two surfaces are facets
fn = 6 + len(triangles)
f.write("#Part 2 - the facet list.\n")
f.write("#%d facets with boundary markers\n"%fn)
f.write('%d %d\n'%(fn, 1))
f.write("#Boundary facet list.\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 1 2 3 4\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 5 6 7 8\n")
#xmin side
f.write("2 0 1\n")
f.write("4 1 4 8 5\n")
f.write("2 9 12\n")
#ymin side
f.write("2 0 1\n")
f.write("4 1 2 6 5\n")
f.write("2 9 10\n")
#xmax side
f.write("2 0 1\n")
f.write("4 2 3 7 6\n")
f.write("2 10 11\n")
#ymax side
f.write("2 0 1\n")
f.write("4 3 4 8 7\n")
f.write("2 11 12\n")
f.write("#Facet list of surface1.\n")
for t in triangles1:
f.write("%d %d %d\n"%(1, 0, -1))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Facet list of surface2.\n")
for t in triangles2:
f.write("%d %d %d\n"%(1, 0, -2))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Part 3 - the hole list.\n")
f.write('%d\n'%0)
f.write("#Part 4 - the region list.\n")
f.write('%d\n'%0)
if __name__ == "__main__":
surfgen_shared_boundary()
| gpl-3.0 | -5,031,703,206,285,976,000 | 37.1673 | 90 | 0.5789 | false |
alex/kombu-fernet-serializers | setup.py | 1 | 1648 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import kombu_fernet
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'kombu_fernet',
'kombu_fernet.serializers',
]
requires = [
'anyjson>=0.3.3',
'kombu>=3.0.16',
]
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='kombu-fernet-serializers',
version=kombu_fernet.__version__,
description='Symmetrically encrypted serializers for Kombu',
long_description=readme,
author='David Gouldin',
author_email='[email protected]',
url='https://github.com/heroku/kombu-fernet-serializers',
packages=packages,
package_data={'': ['LICENSE']},
include_package_data=True,
install_requires=requires,
license=license,
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
entry_points={
'kombu.serializers': [
'fernet_json = kombu_fernet.serializers.json:register_args',
'fernet_yaml = kombu_fernet.serializers.yaml:register_args',
'fernet_pickle = kombu_fernet.serializers.pickle:register_args',
'fernet_msgpack = kombu_fernet.serializers.msgpack:register_args',
]
}
)
| mit | 423,783,469,457,479,940 | 25.580645 | 78 | 0.633495 | false |
egnyte/python-egnyte | egnyte/events.py | 1 | 4981 | from __future__ import unicode_literals
import time
from egnyte import base, exc, resources
class Event(base.Resource):
"""
Event data.
Attributes:
* id - event id
* timestamp - date of event in iso8061 format
* action_source - event source: WebUI, SyncEngine, Mobile or PublicAPI
* actor - id of user that generate event
* type - event type. For now we will support: file_system or note
* action - event action. For now we will support: create, delete, move, copy, or rename
* object_detail - url to pub api that provide detail info about object from event like https://domain.egnyte.com/pubapi/v1/fs/Shared
* data - additional data specific for event type and action
Possible fields for 'data' field:
for 'type'='file' and action create or delete
'target_id' - entry id of create/deleted file
'target_path' - path to created/deleted file
for 'type'='file' and action move/copy/rename
'source_path' - source path to moved/copied/renamed file
'target_path' - target path to moved/copied/renamed file
'source_id' - source entry id of moved/copied/renamed file (for move/rename there is one entry id so could be only one field or same data for source_id and target_id)
'target_id' - target entry id of moved/copied/renamed file
for 'type'='folder' and action create or delete
'target_path' - path to created/deleted folder
'folder_id' - folder id of created/deleted folder
for 'type'='folder' and action move/copy/rename
'source_path' - source path to moved/copied/renamed folder
'target_path' - target path to moved/copied/renamed folder
'source_id' - source folder id of moved/copied/renamed folder
'target_id' - target folder id of moved/copied/renamed folder
for 'type'='note' and any available action (create, delete)
'note_id' - id of added/deleted note
"""
_url_template = "pubapi/v1/events/%(id)s"
def user(self):
"""Get a user object based on event attributes"""
return resources.User(self._client, id=self.actor)
class Events(base.Resource):
"""
Events.
Attributes:
* latest_event_id - id of latest event
* oldest_event_id - id of oldest available event
* timestamp - iso8601 timestamp of latest event
"""
_url_template = "pubapi/v1/events/cursor"
_url_template_list = "pubapi/v1/events"
_lazy_attributes = {'latest_event_id', 'oldest_event_id', 'timestamp'}
poll_delay = 1.0
start_id = None
suppress = None
folder = None
types = None
def filter(self, start_id=None, suppress=None, folder=None, types=None):
"""
Returns a filtered view of the events,
Parameters:
* start_id - return all events occurred after id from the previous request (the events shouldn't overlap between calls). defaults to latest_event_id
* folder (optional) - return events occurred only for this folders and all its content (subfolders, files and notes).
* suppress (optional) - filter out events from requesting client or filter out events from requesting client done by requesting user. Allowed values: app, user or none (defaults to no filter)
* types (optional) - return only events of given types.
"""
if types is not None:
types = '|'.join(types)
d = self.__dict__.copy()
d.update(base.filter_none_values(dict(start_id=start_id, suppress=suppress, folder=folder, type=types)))
return self.__class__(**d)
def list(self, start_id, count=None):
"""
Get detailed data about up to 'count' events 'start_id'.
"""
if start_id is None:
start_id = self.start_id
params = base.filter_none_values(dict(id=start_id, suppress=self.suppress, type=self.types, count=count, folder=self.folder))
url = self._client.get_url(self._url_template_list)
json = exc.no_content_ok.check_json_response(self._client.GET(url, params=params))
if json is None:
return ()
else:
return base.ResultList((Event(self._client, **d) for d in json.get('events', ())), json['latest_id'], start_id)
def poll(self, count=None):
"""
List events starting with latest_event_id, if any found, update start_id and return them.
"""
if self.start_id is None:
self.start_id = self.latest_event_id
results = self.list(self.start_id, count)
if results:
last = results[-1]
self.start_id = last.id
self.timestamp = last.timestamp
return results
def __iter__(self):
"""Never ending generator of events. Will block if necessary"""
while True:
results = self.poll()
for x in results:
yield x
if not results:
time.sleep(self.poll_delay)
| mit | -2,821,686,280,432,498,700 | 37.022901 | 199 | 0.636418 | false |
tensorport/mnist | mnist.py | 1 | 11743 | # MIT License, see LICENSE
# Copyright (c) 2018 Clusterone Inc.
# Author: Adrian Yi, [email protected]
from __future__ import print_function
import json
import os
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import tensorflow as tf
from clusterone import get_data_path, get_logs_path
from utils import train_dataset, test_dataset
def parse_args():
"""Parse arguments"""
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description='''Train a convolution neural network with MNIST dataset.
For distributed mode, the script will use few environment variables as defaults:
JOB_NAME, TASK_INDEX, PS_HOSTS, and WORKER_HOSTS. These environment variables will be
available on distributed Tensorflow jobs on Clusterone platform by default.
If running this locally, you will need to set these environment variables
or pass them in as arguments (i.e. python mnist.py --job_name worker --task_index 0
--worker_hosts "localhost:2222,localhost:2223" --ps_hosts "localhost:2224").
If these are not set, the script will run in non-distributed (single instance) mode.''')
# Configuration for distributed task
parser.add_argument('--job_name', type=str, default=os.environ.get('JOB_NAME', None), choices=['worker', 'ps'],
help='Task type for the node in the distributed cluster. Worker-0 will be set as master.')
parser.add_argument('--task_index', type=int, default=os.environ.get('TASK_INDEX', 0),
help='Worker task index, should be >= 0. task_index=0 is the chief worker.')
parser.add_argument('--ps_hosts', type=str, default=os.environ.get('PS_HOSTS', ''),
help='Comma-separated list of hostname:port pairs.')
parser.add_argument('--worker_hosts', type=str, default=os.environ.get('WORKER_HOSTS', ''),
help='Comma-separated list of hostname:port pairs.')
# Experiment related parameters
parser.add_argument('--local_data_root', type=str, default=os.path.abspath('./data/'),
help='Path to dataset. This path will be /data on Clusterone.')
parser.add_argument('--local_log_root', type=str, default=os.path.abspath('./logs/'),
help='Path to store logs and checkpoints. This path will be /logs on Clusterone.')
parser.add_argument('--data_subpath', type=str, default='',
help='Which sub-directory the data will sit inside local_data_root (locally) ' +
'or /data/ (on Clusterone).')
# CNN model params
parser.add_argument('--kernel_size', type=int, default=3,
help='Size of the CNN kernels to use.')
parser.add_argument('--hidden_units', type=str, default='32,64',
help='Comma-separated list of integers. Number of hidden units to use in CNN model.')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='Initial learning rate used in Adam optimizer.')
parser.add_argument('--learning_decay', type=float, default=0.0001,
help='Exponential decay rate of the learning rate per step.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate used after each convolutional layer.')
parser.add_argument('--batch_size', type=int, default=512,
help='Batch size to use during training and evaluation.')
# Training params
parser.add_argument('--verbosity', type=str, default='INFO', choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG'],
help='TF logging level. To see intermediate results printed, set this to INFO or DEBUG.')
parser.add_argument('--fashion', action='store_true',
help='Download and use fashion MNIST data instead of the default handwritten digit MNIST.')
parser.add_argument('--parallel_batches', type=int, default=2,
help='Number of parallel batches to prepare in data pipeline.')
parser.add_argument('--max_ckpts', type=int, default=2,
help='Maximum number of checkpoints to keep.')
parser.add_argument('--ckpt_steps', type=int, default=100,
help='How frequently to save a model checkpoint.')
parser.add_argument('--save_summary_steps', type=int, default=10,
help='How frequently to save TensorBoard summaries.')
parser.add_argument('--log_step_count_steps', type=int, default=10,
help='How frequently to log loss & global steps/s.')
parser.add_argument('--eval_secs', type=int, default=60,
help='How frequently to run evaluation step.')
# Parse args
opts = parser.parse_args()
opts.data_dir = get_data_path(dataset_name='*/*',
local_root=opts.local_data_root,
local_repo='',
path=opts.data_subpath)
opts.log_dir = get_logs_path(root=opts.local_log_root)
opts.hidden_units = [int(n) for n in opts.hidden_units.split(',')]
if opts.worker_hosts:
opts.worker_hosts = opts.worker_hosts.split(',')
else:
opts.worker_hosts = []
if opts.ps_hosts:
opts.ps_hosts = opts.ps_hosts.split(',')
else:
opts.ps_hosts = []
return opts
def make_tf_config(opts):
"""Returns TF_CONFIG that can be used to set the environment variable necessary for distributed training"""
if all([opts.job_name is None, not opts.ps_hosts, not opts.worker_hosts]):
return {}
elif any([opts.job_name is None, not opts.ps_hosts, not opts.worker_hosts]):
tf.logging.warn('Distributed setting is incomplete. You must pass job_name, ps_hosts, and worker_hosts.')
if opts.job_name is None:
tf.logging.warn('Expected job_name of worker or ps. Received {}.'.format(opts.job_name))
if not opts.ps_hosts:
tf.logging.warn('Expected ps_hosts, list of hostname:port pairs. Got {}. '.format(opts.ps_hosts) +
'Example: --ps_hosts "localhost:2224" or --ps_hosts "localhost:2224,localhost:2225')
if not opts.worker_hosts:
tf.logging.warn('Expected worker_hosts, list of hostname:port pairs. Got {}. '.format(opts.worker_hosts) +
'Example: --worker_hosts "localhost:2222,localhost:2223"')
tf.logging.warn('Ignoring distributed arguments. Running single mode.')
return {}
tf_config = {
'task': {
'type': opts.job_name,
'index': opts.task_index
},
'cluster': {
'master': [opts.worker_hosts[0]],
'worker': opts.worker_hosts,
'ps': opts.ps_hosts
},
'environment': 'cloud'
}
# Nodes may need to refer to itself as localhost
local_ip = 'localhost:' + tf_config['cluster'][opts.job_name][opts.task_index].split(':')[1]
tf_config['cluster'][opts.job_name][opts.task_index] = local_ip
if opts.job_name == 'worker' and opts.task_index == 0:
tf_config['task']['type'] = 'master'
tf_config['cluster']['master'][0] = local_ip
return tf_config
def get_input_fn(opts, is_train=True):
"""Returns input_fn. is_train=True shuffles and repeats data indefinitely"""
def input_fn():
with tf.device('/cpu:0'):
if is_train:
dataset = train_dataset(opts.data_dir, fashion=opts.fashion)
dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=5 * opts.batch_size, count=None))
else:
dataset = test_dataset(opts.data_dir, fashion=opts.fashion)
dataset = dataset.batch(batch_size=opts.batch_size)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
return input_fn
def cnn_net(input_tensor, opts):
"""Return logits output from CNN net"""
temp = tf.reshape(input_tensor, shape=(-1, 28, 28, 1), name='input_image')
for i, n_units in enumerate(opts.hidden_units):
temp = tf.layers.conv2d(temp, filters=n_units, kernel_size=opts.kernel_size, strides=(2, 2),
activation=tf.nn.relu, name='cnn' + str(i))
temp = tf.layers.dropout(temp, rate=opts.dropout)
temp = tf.reduce_mean(temp, axis=(2, 3), keepdims=False, name='average')
return tf.layers.dense(temp, 10)
def get_model_fn(opts):
"""Return model fn to be used for Estimator class"""
def model_fn(features, labels, mode):
"""Returns EstimatorSpec for different mode (train/eval/predict)"""
logits = cnn_net(features, opts)
pred = tf.cast(tf.argmax(logits, axis=1), tf.int64)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions={'logits': logits, 'pred': pred})
cent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy')
loss = tf.reduce_mean(cent, name='loss')
metrics = {'accuracy': tf.metrics.accuracy(labels=labels, predictions=pred, name='accuracy')}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.AdamOptimizer(learning_rate=opts.learning_rate)
train_op = optimizer.minimize(loss, global_step=tf.train.get_or_create_global_step())
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
return model_fn
def main(opts):
"""Main"""
# Create an estimator
config = tf.estimator.RunConfig(
model_dir=opts.log_dir,
save_summary_steps=opts.save_summary_steps,
save_checkpoints_steps=opts.ckpt_steps,
keep_checkpoint_max=opts.max_ckpts,
log_step_count_steps=opts.log_step_count_steps)
estimator = tf.estimator.Estimator(
model_fn=get_model_fn(opts),
config=config)
# Create input fn
# We do not provide evaluation data, so we'll just use training data for both train & evaluation.
train_input_fn = get_input_fn(opts, is_train=True)
eval_input_fn = get_input_fn(opts, is_train=False)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=1e6)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
steps=None,
start_delay_secs=0,
throttle_secs=opts.eval_secs)
# Train and evaluate!
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == "__main__":
args = parse_args()
tf.logging.set_verbosity(args.verbosity)
tf.logging.debug('=' * 20 + ' Environment Variables ' + '=' * 20)
for k, v in os.environ.items():
tf.logging.debug('{}: {}'.format(k, v))
tf.logging.debug('=' * 20 + ' Arguments ' + '=' * 20)
for k, v in sorted(args.__dict__.items()):
if v is not None:
tf.logging.debug('{}: {}'.format(k, v))
TF_CONFIG = make_tf_config(args)
tf.logging.debug('=' * 20 + ' TF_CONFIG ' + '=' * 20)
tf.logging.debug(TF_CONFIG)
os.environ['TF_CONFIG'] = json.dumps(TF_CONFIG)
tf.logging.info('=' * 20 + ' Train starting ' + '=' * 20)
main(args)
| mit | -7,291,830,831,649,347,000 | 47.127049 | 120 | 0.606489 | false |
globaltoken/globaltoken | test/functional/rpc_rawtransaction.py | 1 | 18199 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes,0,2)
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
# Test getrawtransaction on genesis block coinbase returns an error
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo')
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.info(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.info(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.info(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
| mit | 7,541,542,807,572,354,000 | 54.654434 | 233 | 0.658882 | false |
L337hium/dhquery | dhquery.py | 1 | 7156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8
# vim:tabstop=2
from random import Random
from optparse import OptionParser
from pydhcplib.dhcp_packet import DhcpPacket
from pydhcplib.dhcp_network import DhcpClient
from pydhcplib.type_hw_addr import hwmac
from pydhcplib.type_ipv4 import ipv4
import socket
import sys
r = Random()
r.seed()
dhcpTypes = {
1: 'DISCOVER',
2: 'OFFER',
3: 'REQUEST',
4: 'DECLINE',
5: 'ACK',
6: 'NACK',
7: 'RELEASE',
8: 'INFORM',
}
nagiosRC = {
0: 'OK',
1: 'Warning',
2: 'Critical',
3: 'Unknown',
}
def nagiosExit(rc,message):
print "%s: %s"%(nagiosRC.get(rc,'???'),message)
sys.exit(rc)
class SilentClient(DhcpClient):
def HandleDhcpAck(self,p):
return
def HandleDhcpNack(self,p):
return
def HandleDhcpOffer(self,p):
return
def HandleDhcpUnknown(self,p):
return
def HandleDhcpDiscover(self,p):
return
def genxid():
decxid = r.randint(0,0xffffffff)
xid = []
for i in xrange(4):
xid.insert(0, decxid & 0xff)
decxid = decxid >> 8
return xid
def genmac():
i = []
for z in xrange(6):
i.append(r.randint(0,255))
return ':'.join(map(lambda x: "%02x"%x,i))
def receivePacket(serverip, serverport, timeout, req):
"""Sends and receives packet from DHCP server"""
client = SilentClient(client_listen_port=67, server_listen_port=serverport)
client.dhcp_socket.settimeout(timeout)
if serverip == '0.0.0.0': req.SetOption('flags',[128, 0])
req_type = req.GetOption('dhcp_message_type')[0]
client.SendDhcpPacketTo(serverip,req)
# Don't wait answer for RELEASE message
if req_type == 7: return None
res = client.GetNextDhcpPacket()
# Try next packet if this packet is the same as packet we've sent.
if res.GetOption('dhcp_message_type')[0] == req_type: res = client.GetNextDhcpPacket()
return res
def preparePacket(xid=None,giaddr='0.0.0.0',chaddr='00:00:00:00:00:00',ciaddr='0.0.0.0',msgtype='discover',required_opts=[]):
req = DhcpPacket()
req.SetOption('op',[1])
req.SetOption('htype',[1])
req.SetOption('hlen',[6])
req.SetOption('hops',[0])
if not xid: xid = genxid()
req.SetOption('xid',xid)
req.SetOption('giaddr',ipv4(giaddr).list())
req.SetOption('chaddr',hwmac(chaddr).list() + [0] * 10)
req.SetOption('ciaddr',ipv4(ciaddr).list())
if msgtype == 'request':
mt = 3
elif msgtype == 'release':
mt = 7
else:
mt = 1
req.SetOption('dhcp_message_type',[mt])
# req.SetOption('parameter_request_list',1)
return req
def main():
parser = OptionParser()
parser.add_option("-s","--server", dest="server", default='0.0.0.0', help="DHCP server IP (default %default)")
parser.add_option("-p","--port", type="int", dest="port", default=67, help="DHCP server port (default (%default)")
parser.add_option("-m","--mac","--chaddr", dest="chaddr", help="chaddr: Client's MAC address, default random")
parser.add_option("-c","--ciaddr", dest="ciaddr", default='0.0.0.0', help="ciaddr: Client's desired IP address")
parser.add_option("-g","--giaddr", dest="giaddr", default='0.0.0.0', help="giaddr: Gateway IP address (if any)")
parser.add_option("-t","--type", dest="msgtype", type="choice", choices=["discover","request","release"],
default="discover", help="DHCP message type: discover, request, release (default %default)")
parser.add_option("-w","--timeout", dest="timeout", type="int", default=4, help="UDP timeout (default %default)")
parser.add_option("-r","--require", action="append", type="int", default=[1,3,6,51], dest="required_opts", help="Require options by its number")
parser.add_option("-y","--cycle", action="store_true", dest="docycle", help="Do full cycle: DISCOVERY, REQUEST, RELEASE")
parser.add_option("-n","--cycles", dest="cycles", type="int", default="1", help="Do number of cycles (default %default)")
parser.add_option("-v","--verbose", action="store_true", dest="verbose", help="Verbose operation")
parser.add_option("-q","--quiet", action="store_false", dest="verbose", help="Quiet operation")
parser.add_option("--nagios", action="store_true", dest="nagios", help="Nagios mode of operation")
(opts, args) = parser.parse_args()
if not opts.chaddr:
chaddr = genmac()
else:
chaddr = opts.chaddr
if opts.nagios: opts.verbose = False
verbose = opts.verbose
if opts.docycle:
request_dhcp_message_type = "discover"
else:
request_dhcp_message_type = opts.msgtype
request_ciaddr = opts.ciaddr
serverip = opts.server
cycleno = 1
xid = genxid()
while True:
if opts.cycles > 1 and opts.verbose is not False and (not opts.docycle or request_dhcp_message_type == "discover"):
print "="*100
print "| Cycle %s"%cycleno
print "="*100
req = preparePacket(xid=xid, giaddr=opts.giaddr, chaddr=chaddr, ciaddr=request_ciaddr, msgtype=request_dhcp_message_type, required_opts=opts.required_opts)
if verbose != False:
print "Sending %s [%s] packet to %s"%(request_dhcp_message_type.upper(),chaddr, opts.server)
if verbose == True:
print "-"*100
req.PrintHeaders()
req.PrintOptions()
print "="*100
print "\n"
try:
res = receivePacket(serverip=serverip, serverport=opts.port, timeout=opts.timeout, req=req)
except socket.timeout:
res = None
if opts.nagios: nagiosExit(2,"%s request has been timed out."%request_dhcp_message_type.upper())
if verbose != False: print "Timed out."
pass
if res:
dhcp_message_type = res.GetOption('dhcp_message_type')[0]
server_identifier = ipv4(res.GetOption('server_identifier'))
chaddr = hwmac(res.GetOption('chaddr')[:6])
yiaddr = ipv4(res.GetOption('yiaddr'))
if opts.nagios and dhcp_message_type not in (2, 5):
nagiosExit(2,"Got %s response for our %s request"%(dhcpTypes.get(dhcp_message_type,'UNKNOWN'),dhcpTypes.get(request_dhcp_message_type,'UNKNOWN')))
if verbose != False:
print "Received %s packet from %s; [%s] was bound to %s"%(dhcpTypes.get(dhcp_message_type,'UNKNOWN'), server_identifier, chaddr, yiaddr )
if verbose == True:
print "-"*100
res.PrintHeaders()
res.PrintOptions()
print "="*100
print "\n"
if opts.docycle:
if dhcp_message_type == 2:
request_dhcp_message_type = 'request'
request_ciaddr = yiaddr.str()
serverip = server_identifier.str()
continue
if dhcp_message_type == 5:
request_dhcp_message_type = 'release'
request_ciaddr = yiaddr.str()
serverip = server_identifier.str()
continue
cycleno += 1
if cycleno > opts.cycles:
if opts.nagios:
if res:
nagiosExit(0,"%s finished successfully: %s. yiaddr: %s, chaddr: %s"%(
request_dhcp_message_type.upper(),
dhcpTypes.get(dhcp_message_type,'UNKNOWN'),
yiaddr,
chaddr,
))
elif opts.docycle:
nagiosExit(0,"Cycle has been finished successfully. Got %s for %s"%(yiaddr,chaddr))
else:
nagiosExit(0,"%s has been finished without the answer"%(request_dhcp_message_type.upper()))
break
if opts.docycle:
request_dhcp_message_type = "discover"
request_ciaddr = opts.ciaddr
serverip = opts.server
xid = genxid()
if not opts.chaddr:
chaddr = genmac()
else:
chaddr = opts.chaddr
if __name__ == '__main__':
main()
| gpl-2.0 | -6,692,521,198,314,802,000 | 29.712446 | 157 | 0.670486 | false |
thica/ORCA-Remote | src/ORCA/utils/Platform/generic/generic_CheckPermissions.py | 1 | 1299 | # -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# from ORCA.utils.Platform import OS_GetUserDataPath
from ORCA.utils.Platform import OS_GetUserDownloadsDataPath
import ORCA.Globals as Globals
def CheckPermissions() -> bool:
""" We assume to have all permissions, as long we do not have OS specific code"""
if Globals.oPathUserDownload is not None:
return Globals.oPathUserDownload.IsWriteable()
else:
return OS_GetUserDownloadsDataPath().IsWriteable()
| gpl-3.0 | -3,520,251,617,699,083,300 | 37.363636 | 85 | 0.714396 | false |
AHJenin/acm-type-problems | Timus/AC/1197-Lonesome_Knight.py | 1 | 1209 | #!/usr/bin/env python3
#
# FILE: 1197-Lonesome_Knight.py
#
# @author: Arafat Hasan Jenin <opendoor.arafat[at]gmail[dot]com>
#
# LINK:
#
# DATE CREATED: 15-06-18 15:40:41 (+06)
# LAST MODIFIED: 15-06-18 17:29:41 (+06)
#
# VERDICT: Accepted
#
# DEVELOPMENT HISTORY:
# Date Version Description
# --------------------------------------------------------------------
# 15-06-18 1.0 Deleted code is debugged code.
#
# _/ _/_/_/_/ _/ _/ _/_/_/ _/ _/
# _/ _/ _/_/ _/ _/ _/_/ _/
# _/ _/_/_/ _/ _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/_/ _/ _/ _/_/
# _/_/ _/_/_/_/ _/ _/ _/_/_/ _/ _/
#
##############################################################################
dx = [2, 1, -1, -2, -2, -1, 1, 2]
dy = [1, 2, 2, 1, -1, -2, -2, -1] # Knight Direction
N = int(input())
while (N > 0):
N -= 1
pos = input()
x = ord(pos[0]) - ord('a')
y = int(pos[1]) - 1
ans = 0
for i in range(0, 8):
new_x = x + dx[i]
new_y = y + dy[i]
if new_x >= 0 and new_x < 8 and new_y >= 0 and new_y < 8:
ans += 1
print(ans)
| mit | 8,361,550,323,378,487,000 | 27.116279 | 78 | 0.322581 | false |
nikofil/invenio-search-ui | invenio_search_ui/views.py | 1 | 2169 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""UI for Invenio-Search."""
from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, json, render_template
blueprint = Blueprint(
'invenio_search_ui',
__name__,
template_folder='templates',
static_folder='static',
)
@blueprint.route("/search")
def search():
"""Search page ui."""
return render_template(current_app.config['SEARCH_UI_SEARCH_TEMPLATE'])
def sorted_options(sort_options):
"""Sort sort options for display.
:param sort_options: A dictionary containing the field name as key and
asc/desc as value.
:returns: A dictionary with sorting options for Invenio-Search-JS.
"""
return [
{
'title': v['title'],
'value': ('-{0}'.format(k)
if v.get('default_order', 'asc') == 'desc' else k),
}
for k, v in
sorted(sort_options.items(), key=lambda x: x[1].get('order', 0))
]
@blueprint.app_template_filter('format_sortoptions')
def format_sortoptions(sort_options):
"""Create sort options JSON dump for Invenio-Search-JS."""
return json.dumps({
'options': sorted_options(sort_options)
})
| gpl-2.0 | 7,984,396,494,800,520,000 | 30.897059 | 76 | 0.68142 | false |
dmilith/SublimeText3-dmilith | Packages/pyte/all/pyte/screens.py | 1 | 46773 | # -*- coding: utf-8 -*-
"""
pyte.screens
~~~~~~~~~~~~
This module provides classes for terminal screens, currently
it contains three screens with different features:
* :class:`~pyte.screens.Screen` -- base screen implementation,
which handles all the core escape sequences, recognized by
:class:`~pyte.streams.Stream`.
* If you need a screen to keep track of the changed lines
(which you probably do need) -- use
:class:`~pyte.screens.DiffScreen`.
* If you also want a screen to collect history and allow
pagination -- :class:`pyte.screen.HistoryScreen` is here
for ya ;)
.. note:: It would be nice to split those features into mixin
classes, rather than subclasses, but it's not obvious
how to do -- feel free to submit a pull request.
:copyright: (c) 2011-2012 by Selectel.
:copyright: (c) 2012-2017 by pyte authors and contributors,
see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals, division
import copy
import json
import math
import os
import sys
import unicodedata
import warnings
from collections import deque, namedtuple, defaultdict
from wcwidth import wcwidth
# There is no standard 2.X backport for ``lru_cache``.
if sys.version_info >= (3, 2):
from functools import lru_cache
wcwidth = lru_cache(maxsize=4096)(wcwidth)
from . import (
charsets as cs,
control as ctrl,
graphics as g,
modes as mo
)
from .compat import map, range, str
from .streams import Stream
#: A container for screen's scroll margins.
Margins = namedtuple("Margins", "top bottom")
#: A container for savepoint, created on :data:`~pyte.escape.DECSC`.
Savepoint = namedtuple("Savepoint", [
"cursor",
"g0_charset",
"g1_charset",
"charset",
"origin",
"wrap"
])
class Char(namedtuple("Char", [
"data",
"fg",
"bg",
"bold",
"italics",
"underscore",
"strikethrough",
"reverse",
])):
"""A single styled on-screen character.
:param str data: unicode character. Invariant: ``len(data) == 1``.
:param str fg: foreground colour. Defaults to ``"default"``.
:param str bg: background colour. Defaults to ``"default"``.
:param bool bold: flag for rendering the character using bold font.
Defaults to ``False``.
:param bool italics: flag for rendering the character using italic font.
Defaults to ``False``.
:param bool underscore: flag for rendering the character underlined.
Defaults to ``False``.
:param bool strikethrough: flag for rendering the character with a
strike-through line. Defaults to ``False``.
:param bool reverse: flag for swapping foreground and background colours
during rendering. Defaults to ``False``.
"""
__slots__ = ()
def __new__(cls, data, fg="default", bg="default", bold=False,
italics=False, underscore=False,
strikethrough=False, reverse=False):
return super(Char, cls).__new__(cls, data, fg, bg, bold, italics,
underscore, strikethrough, reverse)
class Cursor(object):
"""Screen cursor.
:param int x: 0-based horizontal cursor position.
:param int y: 0-based vertical cursor position.
:param pyte.screens.Char attrs: cursor attributes (see
:meth:`~pyte.screens.Screen.select_graphic_rendition`
for details).
"""
__slots__ = ("x", "y", "attrs", "hidden")
def __init__(self, x, y, attrs=Char(" ")):
self.x = x
self.y = y
self.attrs = attrs
self.hidden = False
class StaticDefaultDict(dict):
"""A :func:`dict` with a static default value.
Unlike :func:`collections.defaultdict` this implementation does not
implicitly update the mapping when queried with a missing key.
>>> d = StaticDefaultDict(42)
>>> d["foo"]
42
>>> d
{}
"""
def __init__(self, default):
self.default = default
def __missing__(self, key):
return self.default
class Screen(object):
"""
A screen is an in-memory matrix of characters that represents the
screen display of the terminal. It can be instantiated on its own
and given explicit commands, or it can be attached to a stream and
will respond to events.
.. attribute:: buffer
A sparse ``lines x columns`` :class:`~pyte.screens.Char` matrix.
.. attribute:: dirty
A set of line numbers, which should be re-drawn. The user is responsible
for clearing this set when changes have been applied.
>>> screen = Screen(80, 24)
>>> screen.dirty.clear()
>>> screen.draw("!")
>>> list(screen.dirty)
[0]
.. versionadded:: 0.7.0
.. attribute:: cursor
Reference to the :class:`~pyte.screens.Cursor` object, holding
cursor position and attributes.
.. attribute:: margins
Margins determine which screen lines move during scrolling
(see :meth:`index` and :meth:`reverse_index`). Characters added
outside the scrolling region do not make the screen to scroll.
The value is ``None`` if margins are set to screen boundaries,
otherwise -- a pair 0-based top and bottom line indices.
.. attribute:: charset
Current charset number; can be either ``0`` or ``1`` for `G0`
and `G1` respectively, note that `G0` is activated by default.
.. note::
According to ``ECMA-48`` standard, **lines and columns are
1-indexed**, so, for instance ``ESC [ 10;10 f`` really means
-- move cursor to position (9, 9) in the display matrix.
.. versionchanged:: 0.4.7
.. warning::
:data:`~pyte.modes.LNM` is reset by default, to match VT220
specification. Unfortunatelly this makes :mod:`pyte` fail
``vttest`` for cursor movement.
.. versionchanged:: 0.4.8
.. warning::
If `DECAWM` mode is set than a cursor will be wrapped to the
**beginning** of the next line, which is the behaviour described
in ``man console_codes``.
.. seealso::
`Standard ECMA-48, Section 6.1.1 \
<http://ecma-international.org/publications/standards/Ecma-048.htm>`_
for a description of the presentational component, implemented
by ``Screen``.
"""
@property
def default_char(self):
"""An empty character with default foreground and background colors."""
reverse = mo.DECSCNM in self.mode
return Char(data=" ", fg="default", bg="default", reverse=reverse)
def __init__(self, columns, lines):
self.savepoints = []
self.columns = columns
self.lines = lines
self.buffer = defaultdict(lambda: StaticDefaultDict(self.default_char))
self.dirty = set()
self.reset()
def __repr__(self):
return ("{0}({1}, {2})".format(self.__class__.__name__,
self.columns, self.lines))
@property
def display(self):
"""A :func:`list` of screen lines as unicode strings."""
def render(line):
is_wide_char = False
for x in range(self.columns):
if is_wide_char: # Skip stub
is_wide_char = False
continue
char = line[x].data
assert sum(map(wcwidth, char[1:])) == 0
is_wide_char = wcwidth(char[0]) == 2
yield char
return ["".join(render(self.buffer[y])) for y in range(self.lines)]
def reset(self):
"""Reset the terminal to its initial state.
* Scrolling margins are reset to screen boundaries.
* Cursor is moved to home location -- ``(0, 0)`` and its
attributes are set to defaults (see :attr:`default_char`).
* Screen is cleared -- each character is reset to
:attr:`default_char`.
* Tabstops are reset to "every eight columns".
* All lines are marked as :attr:`dirty`.
.. note::
Neither VT220 nor VT102 manuals mention that terminal modes
and tabstops should be reset as well, thanks to
:manpage:`xterm` -- we now know that.
"""
self.dirty.update(range(self.lines))
self.buffer.clear()
self.margins = None
self.mode = set([mo.DECAWM, mo.DECTCEM])
self.title = ""
self.icon_name = ""
self.charset = 0
self.g0_charset = cs.LAT1_MAP
self.g1_charset = cs.VT100_MAP
# From ``man terminfo`` -- "... hardware tabs are initially
# set every `n` spaces when the terminal is powered up. Since
# we aim to support VT102 / VT220 and linux -- we use n = 8.
self.tabstops = set(range(8, self.columns, 8))
self.cursor = Cursor(0, 0)
self.cursor_position()
self.saved_columns = None
def resize(self, lines=None, columns=None):
"""Resize the screen to the given size.
If the requested screen size has more lines than the existing
screen, lines will be added at the bottom. If the requested
size has less lines than the existing screen lines will be
clipped at the top of the screen. Similarly, if the existing
screen has less columns than the requested screen, columns will
be added at the right, and if it has more -- columns will be
clipped at the right.
:param int lines: number of lines in the new screen.
:param int columns: number of columns in the new screen.
.. versionchanged:: 0.7.0
If the requested screen size is identical to the current screen
size, the method does nothing.
"""
lines = lines or self.lines
columns = columns or self.columns
if lines == self.lines and columns == self.columns:
return # No changes.
self.dirty.update(range(lines))
if lines < self.lines:
self.save_cursor()
self.cursor_position(0, 0)
self.delete_lines(self.lines - lines) # Drop from the top.
self.restore_cursor()
if columns < self.columns:
for line in self.buffer.values():
for x in range(columns, self.columns):
line.pop(x, None)
self.lines, self.columns = lines, columns
self.set_margins()
def set_margins(self, top=None, bottom=None):
"""Select top and bottom margins for the scrolling region.
:param int top: the smallest line number that is scrolled.
:param int bottom: the biggest line number that is scrolled.
"""
if top is None and bottom is None:
self.margins = None
return
margins = self.margins or Margins(0, self.lines - 1)
# Arguments are 1-based, while :attr:`margins` are zero
# based -- so we have to decrement them by one. We also
# make sure that both of them is bounded by [0, lines - 1].
if top is None:
top = margins.top
else:
top = max(0, min(top - 1, self.lines - 1))
if bottom is None:
bottom = margins.bottom
else:
bottom = max(0, min(bottom - 1, self.lines - 1))
# Even though VT102 and VT220 require DECSTBM to ignore
# regions of width less than 2, some programs (like aptitude
# for example) rely on it. Practicality beats purity.
if bottom - top >= 1:
self.margins = Margins(top, bottom)
# The cursor moves to the home position when the top and
# bottom margins of the scrolling region (DECSTBM) changes.
self.cursor_position()
def set_mode(self, *modes, **kwargs):
"""Set (enable) a given list of modes.
:param list modes: modes to set, where each mode is a constant
from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distingiushed from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
if mo.DECSCNM in modes:
self.dirty.update(range(self.lines))
self.mode.update(modes)
# When DECOLM mode is set, the screen is erased and the cursor
# moves to the home position.
if mo.DECCOLM in modes:
self.saved_columns = self.columns
self.resize(columns=132)
self.erase_in_display(2)
self.cursor_position()
# According to VT520 manual, DECOM should also home the cursor.
if mo.DECOM in modes:
self.cursor_position()
# Mark all displayed characters as reverse.
if mo.DECSCNM in modes:
for line in self.buffer.values():
line.default = self.default_char
for x in line:
line[x] = line[x]._replace(reverse=True)
self.select_graphic_rendition(7) # +reverse.
# Make the cursor visible.
if mo.DECTCEM in modes:
self.cursor.hidden = False
def reset_mode(self, *modes, **kwargs):
"""Reset (disable) a given list of modes.
:param list modes: modes to reset -- hopefully, each mode is a
constant from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distinguished from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
if mo.DECSCNM in modes:
self.dirty.update(range(self.lines))
self.mode.difference_update(modes)
# Lines below follow the logic in :meth:`set_mode`.
if mo.DECCOLM in modes:
if self.columns == 132 and self.saved_columns is not None:
self.resize(columns=self.saved_columns)
self.saved_columns = None
self.erase_in_display(2)
self.cursor_position()
if mo.DECOM in modes:
self.cursor_position()
if mo.DECSCNM in modes:
for line in self.buffer.values():
line.default = self.default_char
for x in line:
line[x] = line[x]._replace(reverse=False)
self.select_graphic_rendition(27) # -reverse.
# Hide the cursor.
if mo.DECTCEM in modes:
self.cursor.hidden = True
def define_charset(self, code, mode):
"""Define ``G0`` or ``G1`` charset.
:param str code: character set code, should be a character
from ``"B0UK"``, otherwise ignored.
:param str mode: if ``"("`` ``G0`` charset is defined, if
``")"`` -- we operate on ``G1``.
.. warning:: User-defined charsets are currently not supported.
"""
if code in cs.MAPS:
if mode == "(":
self.g0_charset = cs.MAPS[code]
elif mode == ")":
self.g1_charset = cs.MAPS[code]
def shift_in(self):
"""Select ``G0`` character set."""
self.charset = 0
def shift_out(self):
"""Select ``G1`` character set."""
self.charset = 1
def draw(self, data):
"""Display decoded characters at the current cursor position and
advances the cursor if :data:`~pyte.modes.DECAWM` is set.
:param str data: text to display.
.. versionchanged:: 0.5.0
Character width is taken into account. Specifically, zero-width
and unprintable characters do not affect screen state. Full-width
characters are rendered into two consecutive character containers.
"""
data = data.translate(
self.g1_charset if self.charset else self.g0_charset)
for char in data:
char_width = wcwidth(char)
# If this was the last column in a line and auto wrap mode is
# enabled, move the cursor to the beginning of the next line,
# otherwise replace characters already displayed with newly
# entered.
if self.cursor.x == self.columns:
if mo.DECAWM in self.mode:
self.dirty.add(self.cursor.y)
self.carriage_return()
self.linefeed()
elif char_width > 0:
self.cursor.x -= char_width
# If Insert mode is set, new characters move old characters to
# the right, otherwise terminal is in Replace mode and new
# characters replace old characters at cursor position.
if mo.IRM in self.mode and char_width > 0:
self.insert_characters(char_width)
line = self.buffer[self.cursor.y]
if char_width == 1:
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
elif char_width == 2:
# A two-cell character has a stub slot after it.
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
if self.cursor.x + 1 < self.columns:
line[self.cursor.x + 1] = self.cursor.attrs \
._replace(data="")
elif char_width == 0 and unicodedata.combining(char):
# A zero-cell character is combined with the previous
# character either on this or preceeding line.
if self.cursor.x:
last = line[self.cursor.x - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
line[self.cursor.x - 1] = last._replace(data=normalized)
elif self.cursor.y:
last = self.buffer[self.cursor.y - 1][self.columns - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
self.buffer[self.cursor.y - 1][self.columns - 1] = \
last._replace(data=normalized)
else:
break # Unprintable character or doesn't advance the cursor.
# .. note:: We can't use :meth:`cursor_forward()`, because that
# way, we'll never know when to linefeed.
if char_width > 0:
self.cursor.x = min(self.cursor.x + char_width, self.columns)
self.dirty.add(self.cursor.y)
def set_title(self, param):
"""Set terminal title.
.. note:: This is an XTerm extension supported by the Linux terminal.
"""
self.title = param
def set_icon_name(self, param):
"""Set icon name.
.. note:: This is an XTerm extension supported by the Linux terminal.
"""
self.icon_name = param
def carriage_return(self):
"""Move the cursor to the beginning of the current line."""
self.cursor.x = 0
def index(self):
"""Move the cursor down one line in the same column. If the
cursor is at the last line, create a new line at the bottom.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == bottom:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(top, bottom):
self.buffer[y] = self.buffer[y + 1]
self.buffer.pop(bottom, None)
else:
self.cursor_down()
def reverse_index(self):
"""Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(bottom, top, -1):
self.buffer[y] = self.buffer[y - 1]
self.buffer.pop(top, None)
else:
self.cursor_up()
def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return()
def tab(self):
"""Move to the next tab space, or the end of the screen if there
aren't anymore left.
"""
for stop in sorted(self.tabstops):
if self.cursor.x < stop:
column = stop
break
else:
column = self.columns - 1
self.cursor.x = column
def backspace(self):
"""Move cursor to the left one or keep it in its position if
it's at the beginning of the line already.
"""
self.cursor_back()
def save_cursor(self):
"""Push the current cursor position onto the stack."""
self.savepoints.append(Savepoint(copy.copy(self.cursor),
self.g0_charset,
self.g1_charset,
self.charset,
mo.DECOM in self.mode,
mo.DECAWM in self.mode))
def restore_cursor(self):
"""Set the current cursor position to whatever cursor is on top
of the stack.
"""
if self.savepoints:
savepoint = self.savepoints.pop()
self.g0_charset = savepoint.g0_charset
self.g1_charset = savepoint.g1_charset
self.charset = savepoint.charset
if savepoint.origin:
self.set_mode(mo.DECOM)
if savepoint.wrap:
self.set_mode(mo.DECAWM)
self.cursor = savepoint.cursor
self.ensure_hbounds()
self.ensure_vbounds(use_margins=True)
else:
# If nothing was saved, the cursor moves to home position;
# origin mode is reset. :todo: DECAWM?
self.reset_mode(mo.DECOM)
self.cursor_position()
def insert_lines(self, count=None):
"""Insert the indicated # of lines at line with cursor. Lines
displayed **at** and below the cursor move down. Lines moved
past the bottom margin are lost.
:param count: number of lines to insert.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
# If cursor is outside scrolling margins it -- do nothin'.
if top <= self.cursor.y <= bottom:
self.dirty.update(range(self.cursor.y, self.lines))
for y in range(bottom, self.cursor.y - 1, -1):
if y + count <= bottom and y in self.buffer:
self.buffer[y + count] = self.buffer[y]
self.buffer.pop(y, None)
self.carriage_return()
def delete_lines(self, count=None):
"""Delete the indicated # of lines, starting at line with
cursor. As lines are deleted, lines displayed below cursor
move up. Lines added to bottom of screen have spaces with same
character attributes as last line moved up.
:param int count: number of lines to delete.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
# If cursor is outside scrolling margins -- do nothin'.
if top <= self.cursor.y <= bottom:
self.dirty.update(range(self.cursor.y, self.lines))
for y in range(self.cursor.y, bottom + 1):
if y + count <= bottom:
if y + count in self.buffer:
self.buffer[y] = self.buffer.pop(y + count)
else:
self.buffer.pop(y, None)
self.carriage_return()
def insert_characters(self, count=None):
"""Insert the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.columns, self.cursor.x - 1, -1):
if x + count <= self.columns:
line[x + count] = line[x]
line.pop(x, None)
def delete_characters(self, count=None):
"""Delete the indicated # of characters, starting with the
character at cursor position. When a character is deleted, all
characters to the right of cursor move left. Character attributes
move with the characters.
:param int count: number of characters to delete.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.cursor.x, self.columns):
if x + count <= self.columns:
line[x] = line.pop(x + count, self.default_char)
else:
line.pop(x, None)
def erase_characters(self, count=None):
"""Erase the indicated # of characters, starting with the
character at cursor position. Character attributes are set
cursor attributes. The cursor remains in the same position.
:param int count: number of characters to erase.
.. note::
Using cursor attributes for character attributes may seem
illogical, but if recall that a terminal emulator emulates
a type writer, it starts to make sense. The only way a type
writer could erase a character is by typing over it.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.cursor.x,
min(self.cursor.x + count, self.columns)):
line[x] = self.cursor.attrs
def erase_in_line(self, how=0, private=False):
"""Erase a line in a specific way.
Character attributes are set to cursor attributes.
:param int how: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of line, including cursor
position.
* ``1`` -- Erases from beginning of line to cursor,
including cursor position.
* ``2`` -- Erases complete line.
:param bool private: when ``True`` only characters marked as
eraseable are affected **not implemented**.
"""
self.dirty.add(self.cursor.y)
if how == 0:
interval = range(self.cursor.x, self.columns)
elif how == 1:
interval = range(self.cursor.x + 1)
elif how == 2:
interval = range(self.columns)
line = self.buffer[self.cursor.y]
for x in interval:
line[x] = self.cursor.attrs
def erase_in_display(self, how=0, private=False):
"""Erases display in a specific way.
Character attributes are set to cursor attributes.
:param int how: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of screen, including
cursor position.
* ``1`` -- Erases from beginning of screen to cursor,
including cursor position.
* ``2`` and ``3`` -- Erases complete display. All lines
are erased and changed to single-width. Cursor does not
move.
:param bool private: when ``True`` only characters marked as
eraseable are affected **not implemented**.
"""
if how == 0:
interval = range(self.cursor.y + 1, self.lines)
elif how == 1:
interval = range(self.cursor.y)
elif how == 2 or how == 3:
interval = range(self.lines)
self.dirty.update(interval)
for y in interval:
line = self.buffer[y]
for x in line:
line[x] = self.cursor.attrs
if how == 0 or how == 1:
self.erase_in_line(how)
def set_tab_stop(self):
"""Set a horizontal tab stop at cursor position."""
self.tabstops.add(self.cursor.x)
def clear_tab_stop(self, how=0):
"""Clear a horizontal tab stop.
:param int how: defines a way the tab stop should be cleared:
* ``0`` or nothing -- Clears a horizontal tab stop at cursor
position.
* ``3`` -- Clears all horizontal tab stops.
"""
if how == 0:
# Clears a horizontal tab stop at cursor position, if it's
# present, or silently fails if otherwise.
self.tabstops.discard(self.cursor.x)
elif how == 3:
self.tabstops = set() # Clears all horizontal tab stops.
def ensure_hbounds(self):
"""Ensure the cursor is within horizontal screen bounds."""
self.cursor.x = min(max(0, self.cursor.x), self.columns - 1)
def ensure_vbounds(self, use_margins=None):
"""Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
if (use_margins or mo.DECOM in self.mode) and self.margins is not None:
top, bottom = self.margins
else:
top, bottom = 0, self.lines - 1
self.cursor.y = min(max(top, self.cursor.y), bottom)
def cursor_up(self, count=None):
"""Move cursor up the indicated # of lines in same column.
Cursor stops at top margin.
:param int count: number of lines to skip.
"""
top, _bottom = self.margins or Margins(0, self.lines - 1)
self.cursor.y = max(self.cursor.y - (count or 1), top)
def cursor_up1(self, count=None):
"""Move cursor up the indicated # of lines to column 1. Cursor
stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_up(count)
self.carriage_return()
def cursor_down(self, count=None):
"""Move cursor down the indicated # of lines in same column.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
_top, bottom = self.margins or Margins(0, self.lines - 1)
self.cursor.y = min(self.cursor.y + (count or 1), bottom)
def cursor_down1(self, count=None):
"""Move cursor down the indicated # of lines to column 1.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_down(count)
self.carriage_return()
def cursor_back(self, count=None):
"""Move cursor left the indicated # of columns. Cursor stops
at left margin.
:param int count: number of columns to skip.
"""
# Handle the case when we've just drawn in the last column
# and would wrap the line on the next :meth:`draw()` call.
if self.cursor.x == self.columns:
self.cursor.x -= 1
self.cursor.x -= count or 1
self.ensure_hbounds()
def cursor_forward(self, count=None):
"""Move cursor right the indicated # of columns. Cursor stops
at right margin.
:param int count: number of columns to skip.
"""
self.cursor.x += count or 1
self.ensure_hbounds()
def cursor_position(self, line=None, column=None):
"""Set the cursor to a specific `line` and `column`.
Cursor is allowed to move out of the scrolling region only when
:data:`~pyte.modes.DECOM` is reset, otherwise -- the position
doesn't change.
:param int line: line number to move the cursor to.
:param int column: column number to move the cursor to.
"""
column = (column or 1) - 1
line = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if self.margins is not None and mo.DECOM in self.mode:
line += self.margins.top
# Cursor is not allowed to move out of the scrolling region.
if not self.margins.top <= line <= self.margins.bottom:
return
self.cursor.x = column
self.cursor.y = line
self.ensure_hbounds()
self.ensure_vbounds()
def cursor_to_column(self, column=None):
"""Move cursor to a specific column in the current line.
:param int column: column number to move the cursor to.
"""
self.cursor.x = (column or 1) - 1
self.ensure_hbounds()
def cursor_to_line(self, line=None):
"""Move cursor to a specific line in the current column.
:param int line: line number to move the cursor to.
"""
self.cursor.y = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if mo.DECOM in self.mode:
self.cursor.y += self.margins.top
# FIXME: should we also restrict the cursor to the scrolling
# region?
self.ensure_vbounds()
def bell(self, *args):
"""Bell stub -- the actual implementation should probably be
provided by the end-user.
"""
def alignment_display(self):
"""Fills screen with uppercase E's for screen focus and alignment."""
self.dirty.update(range(self.lines))
for y in range(self.lines):
for x in range(self.columns):
self.buffer[y][x] = self.buffer[y][x]._replace(data="E")
def select_graphic_rendition(self, *attrs):
"""Set display attributes.
:param list attrs: a list of display attributes to set.
"""
replace = {}
# Fast path for resetting all attributes.
if not attrs or attrs == (0, ):
self.cursor.attrs = self.default_char
return
else:
attrs = list(reversed(attrs))
while attrs:
attr = attrs.pop()
if attr == 0:
# Reset all attributes.
replace.update(self.default_char._asdict())
elif attr in g.FG_ANSI:
replace["fg"] = g.FG_ANSI[attr]
elif attr in g.BG:
replace["bg"] = g.BG_ANSI[attr]
elif attr in g.TEXT:
attr = g.TEXT[attr]
replace[attr[1:]] = attr.startswith("+")
elif attr in g.FG_AIXTERM:
replace.update(fg=g.FG_AIXTERM[attr], bold=True)
elif attr in g.BG_AIXTERM:
replace.update(bg=g.BG_AIXTERM[attr], bold=True)
elif attr in (g.FG_256, g.BG_256):
key = "fg" if attr == g.FG_256 else "bg"
try:
n = attrs.pop()
if n == 5: # 256.
m = attrs.pop()
replace[key] = g.FG_BG_256[m]
elif n == 2: # 24bit.
# This is somewhat non-standard but is nonetheless
# supported in quite a few terminals. See discussion
# here https://gist.github.com/XVilka/8346728.
replace[key] = "{0:02x}{1:02x}{2:02x}".format(
attrs.pop(), attrs.pop(), attrs.pop())
except IndexError:
pass
self.cursor.attrs = self.cursor.attrs._replace(**replace)
def report_device_attributes(self, mode=0, **kwargs):
"""Report terminal identity.
.. versionadded:: 0.5.0
.. versionchanged:: 0.7.0
If ``private`` keyword argument is set, the method does nothing.
This behaviour is consistent with VT220 manual.
"""
# We only implement "primary" DA which is the only DA request
# VT102 understood, see ``VT102ID`` in ``linux/drivers/tty/vt.c``.
if mode == 0 and not kwargs.get("private"):
self.write_process_input(ctrl.CSI + "?6c")
def report_device_status(self, mode):
"""Report terminal status or cursor position.
:param int mode: if 5 -- terminal status, 6 -- cursor position,
otherwise a noop.
.. versionadded:: 0.5.0
"""
if mode == 5: # Request for terminal status.
self.write_process_input(ctrl.CSI + "0n")
elif mode == 6: # Request for cursor position.
x = self.cursor.x + 1
y = self.cursor.y + 1
# "Origin mode (DECOM) selects line numbering."
if mo.DECOM in self.mode:
y -= self.margins.top
self.write_process_input(ctrl.CSI + "{0};{1}R".format(y, x))
def write_process_input(self, data):
"""Write data to the process running inside the terminal.
By default is a noop.
:param str data: text to write to the process ``stdin``.
.. versionadded:: 0.5.0
"""
def debug(self, *args, **kwargs):
"""Endpoint for unrecognized escape sequences.
By default is a noop.
"""
class DiffScreen(Screen):
"""
A screen subclass, which maintains a set of dirty lines in its
:attr:`dirty` attribute. The end user is responsible for emptying
a set, when a diff is applied.
.. deprecated:: 0.7.0
The functionality contained in this class has been merged into
:class:`~pyte.screens.Screen` and will be removed in 0.8.0.
Please update your code accordingly.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"The functionality of ``DiffScreen` has been merged into "
"``Screen`` and will be removed in 0.8.0. Please update "
"your code accordingly.", DeprecationWarning)
super(DiffScreen, self).__init__(*args, **kwargs)
History = namedtuple("History", "top bottom ratio size position")
class HistoryScreen(Screen):
"""A :class:~`pyte.screens.Screen` subclass, which keeps track
of screen history and allows pagination. This is not linux-specific,
but still useful; see page 462 of VT520 User's Manual.
:param int history: total number of history lines to keep; is split
between top and bottom queues.
:param int ratio: defines how much lines to scroll on :meth:`next_page`
and :meth:`prev_page` calls.
.. attribute:: history
A pair of history queues for top and bottom margins accordingly;
here's the overall screen structure::
[ 1: .......]
[ 2: .......] <- top history
[ 3: .......]
------------
[ 4: .......] s
[ 5: .......] c
[ 6: .......] r
[ 7: .......] e
[ 8: .......] e
[ 9: .......] n
------------
[10: .......]
[11: .......] <- bottom history
[12: .......]
.. note::
Don't forget to update :class:`~pyte.streams.Stream` class with
appropriate escape sequences -- you can use any, since pagination
protocol is not standardized, for example::
Stream.escape["N"] = "next_page"
Stream.escape["P"] = "prev_page"
"""
_wrapped = set(Stream.events)
_wrapped.update(["next_page", "prev_page"])
def __init__(self, columns, lines, history=100, ratio=.5):
self.history = History(deque(maxlen=history),
deque(maxlen=history),
float(ratio),
history,
history)
super(HistoryScreen, self).__init__(columns, lines)
def _make_wrapper(self, event, handler):
def inner(*args, **kwargs):
self.before_event(event)
result = handler(*args, **kwargs)
self.after_event(event)
return result
return inner
def __getattribute__(self, attr):
value = super(HistoryScreen, self).__getattribute__(attr)
if attr in HistoryScreen._wrapped:
return HistoryScreen._make_wrapper(self, attr, value)
else:
return value
def before_event(self, event):
"""Ensure a screen is at the bottom of the history buffer.
:param str event: event name, for example ``"linefeed"``.
"""
if event not in ["prev_page", "next_page"]:
while self.history.position < self.history.size:
self.next_page()
def after_event(self, event):
"""Ensure all lines on a screen have proper width (:attr:`columns`).
Extra characters are truncated, missing characters are filled
with whitespace.
:param str event: event name, for example ``"linefeed"``.
"""
if event in ["prev_page", "next_page"]:
for line in self.buffer.values():
for x in line:
if x > self.columns:
line.pop(x)
# If we're at the bottom of the history buffer and `DECTCEM`
# mode is set -- show the cursor.
self.cursor.hidden = not (
self.history.position == self.history.size and
mo.DECTCEM in self.mode
)
def _reset_history(self):
self.history.top.clear()
self.history.bottom.clear()
self.history = self.history._replace(position=self.history.size)
def reset(self):
"""Overloaded to reset screen history state: history position
is reset to bottom of both queues; queues themselves are
emptied.
"""
super(HistoryScreen, self).reset()
self._reset_history()
def erase_in_display(self, how=0):
"""Overloaded to reset history state."""
super(HistoryScreen, self).erase_in_display(how)
if how == 3:
self._reset_history()
def index(self):
"""Overloaded to update top history with the removed lines."""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == bottom:
self.history.top.append(self.buffer[top])
super(HistoryScreen, self).index()
def reverse_index(self):
"""Overloaded to update bottom history with the removed lines."""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
self.history.bottom.append(self.buffer[bottom])
super(HistoryScreen, self).reverse_index()
def prev_page(self):
"""Move the screen page up through the history buffer. Page
size is defined by ``history.ratio``, so for instance
``ratio = .5`` means that half the screen is restored from
history on page switch.
"""
if self.history.position > self.lines and self.history.top:
mid = min(len(self.history.top),
int(math.ceil(self.lines * self.history.ratio)))
self.history.bottom.extendleft(
self.buffer[y]
for y in range(self.lines - 1, self.lines - mid - 1, -1))
self.history = self.history \
._replace(position=self.history.position - mid)
for y in range(self.lines - 1, mid - 1, -1):
self.buffer[y] = self.buffer[y - mid]
for y in range(mid - 1, -1, -1):
self.buffer[y] = self.history.top.pop()
self.dirty = set(range(self.lines))
def next_page(self):
"""Move the screen page down through the history buffer."""
if self.history.position < self.history.size and self.history.bottom:
mid = min(len(self.history.bottom),
int(math.ceil(self.lines * self.history.ratio)))
self.history.top.extend(self.buffer[y] for y in range(mid))
self.history = self.history \
._replace(position=self.history.position + mid)
for y in range(self.lines - mid):
self.buffer[y] = self.buffer[y + mid]
for y in range(self.lines - mid, self.lines):
self.buffer[y] = self.history.bottom.popleft()
self.dirty = set(range(self.lines))
class DebugEvent(namedtuple("Event", "name args kwargs")):
"""Event dispatched to :class:`~pyte.screens.DebugScreen`.
.. warning::
This is developer API with no backward compatibility guarantees.
Use at your own risk!
"""
@staticmethod
def from_string(line):
return DebugEvent(*json.loads(line))
def __str__(self):
return json.dumps(self)
def __call__(self, screen):
"""Execute this event on a given ``screen``."""
return getattr(screen, self.name)(*self.args, **self.kwargs)
class DebugScreen(object):
r"""A screen which dumps a subset of the received events to a file.
>>> import io
>>> with io.StringIO() as buf:
... stream = Stream(DebugScreen(to=buf))
... stream.feed("\x1b[1;24r\x1b[4l\x1b[24;1H\x1b[0;10m")
... print(buf.getvalue())
...
... # doctest: +NORMALIZE_WHITESPACE
["set_margins", [1, 24], {}]
["reset_mode", [4], {}]
["cursor_position", [24, 1], {}]
["select_graphic_rendition", [0, 10], {}]
:param file to: a file-like object to write debug information to.
:param list only: a list of events you want to debug (empty by
default, which means -- debug all events).
.. warning::
This is developer API with no backward compatibility guarantees.
Use at your own risk!
"""
def __init__(self, to=sys.stderr, only=()):
self.to = to
self.only = only
def only_wrapper(self, attr):
def wrapper(*args, **kwargs):
self.to.write(str(DebugEvent(attr, args, kwargs)))
self.to.write(str(os.linesep))
return wrapper
def __getattribute__(self, attr):
if attr not in Stream.events:
return super(DebugScreen, self).__getattribute__(attr)
elif not self.only or attr in self.only:
return self.only_wrapper(attr)
else:
return lambda *args, **kwargs: None
| mit | 3,143,114,599,980,431,000 | 34.434091 | 79 | 0.564834 | false |
suraj-jayakumar/lstm-rnn-ad | src/testdata/random_data_time_series/generate_data.py | 1 | 1042 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 11:15:12 2016
@author: suraj
"""
import random
import numpy as np
import pickle
import matplotlib.pyplot as plt
attachRateList = []
for i in range(3360):
attachRateList.append(random.uniform(4,6))
attachRateList = np.array(attachRateList)
encoded_attach_rate_list = np.fft.fft(attachRateList)
day_number_list = [i%7 for i in range(3360)]
encoded_day_number_list = np.fft.fft(day_number_list)
time_number_list = [i%96 for i in range(3360)]
encoded_time_number_list = np.fft.fft(time_number_list)
final_list_x = np.array([[encoded_day_number_list.real[i],encoded_day_number_list.imag[i],encoded_time_number_list.real[i],encoded_time_number_list.imag[i],encoded_attach_rate_list.real[i],encoded_attach_rate_list.imag[i]] for i in range(3360)])
final_list_y = [ (encoded_attach_rate_list[i].real,encoded_attach_rate_list[i].imag) for i in range(len(encoded_attach_rate_list)) ]
pickle.dump(final_list_x,open('x_att.p','wb'))
pickle.dump(final_list_y,open('y_att.p','wb'))
| apache-2.0 | -7,580,691,378,354,332,000 | 23.809524 | 245 | 0.716891 | false |
kalyptorisk/daversy | src/daversy/difflib_ext.py | 1 | 8527 | import re, difflib
def merge_group(list, func, start=True, end=True):
l, r, s = list[0]
first = ['',' class="first"'][start]
last = ['',' class="last"'][end]
if len(list) == 1:
if start and end:
return LINE_FORMAT % func(' class="first last"', l, r)
else:
return LINE_FORMAT % func(first+last, l, r)
html = LINE_FORMAT % func(first, l, r)
for i in range(1, len(list)-1):
l, r, s = list[i]
html += LINE_FORMAT % func('', l, r)
l, r, s = list[-1]
html += LINE_FORMAT % func(last, l, r)
return html
def make_table(table_id, header, fromlines, tolines, context=None, versions=['old', 'new']):
diff = list(difflib._mdiff(fromlines, tolines, context))
if not diff:
return None
same = lambda c, l, r: (c, l[0], r[0], 'l', format_line(l[1]))
add = lambda c, l, r: (c, '', r[0], 'r', format_line(r[1]))
sub = lambda c, l, r: (c, l[0], '', 'l', format_line(l[1]))
html = TABLE_HEADER % tuple([table_id, header] + versions)
for type, start, end in group_types(diff):
if type == 'same':
html += '<tbody>%s</tbody>\n' % \
merge_group(diff[start:end], same)
elif type == 'add':
html += '<tbody class="add">%s</tbody>\n' % \
merge_group(diff[start:end], add)
elif type == 'del':
html += '<tbody class="rem">%s</tbody>\n' % \
merge_group(diff[start:end], sub)
elif type == 'mod':
html += '<tbody class="mod">%s%s</tbody>\n' % \
(merge_group(diff[start:end], sub, end=False),
merge_group(diff[start:end], add, start=False))
elif type == 'skipped':
html += '<tbody class="skipped"><tr><th>...</th><th>...</th><td> </td></tr></tbody>\n'
html += TABLE_FOOTER
return html
def get_type(left, right, status):
if not status:
if left or right:
return 'same'
else:
return 'skipped'
l_num, l_line = left
r_num, r_line = right
if l_num and not r_num:
return 'del'
elif r_num and not l_num:
return 'add'
else:
return 'mod'
def group_types(diff):
items = [get_type(l,r,s) for l,r,s in diff]
group = []
if not items:
print diff
start, current = 0, items[0]
for i in range(1, len(diff)):
if items[i] != current:
group.append( (current, start, i) )
current = items[i]
start = i
group.append( (current, start, len(diff)) )
return group
REPLACE_CHARS = [
('&', '&'),
('<', '<'),
('>', '>'),
(' ', ' '),
('"', '"'),
('\0+', '<span class="ins">'),
('\0-', '<span class="del">'),
('\0^', '<span class="chg">'),
('\1', '</span>')
]
SINGLE_CHANGE = re.compile("^\0[\+\-\^]([^\0]+)\1\n?$")
def format_line(text):
text = text.replace('\n', '')
match = SINGLE_CHANGE.match(text)
if match:
text = match.group(1)
for src, replace in REPLACE_CHARS:
text = text.replace(src, replace)
return text
## the majority of the CSS and markup has been used from Trac
TABLE_HEADER = """
<li class='entry' id='%s'>
<h2>%s</h2>
<table class="inline" summary="Differences" cellspacing="0">
<colgroup><col class="lineno" /><col class="lineno" /><col class="content" /></colgroup>
<thead><th>%s</th><th>%s</th><th> </th></thead>
"""
TABLE_FOOTER = """
</table>
</li>
"""
LINE_FORMAT = "<tr%s><th>%s</th><th>%s</th><td class='%s'><span>%s</span> </td></tr>"
HTML_HEADER = """
<html><head><style type='text/css'>
/* Diff preferences */
#prefs fieldset { margin: 1em .5em .5em; padding: .5em 1em 0 }
/* Diff/change overview */
#overview {
line-height: 130%;
margin-top: 1em;
padding: .5em;
}
#overview dt {
font-weight: bold;
padding-right: .25em;
position: absolute;
left: 0;
text-align: right;
width: 7.75em;
}
#overview dd { margin-left: 8em }
/* Colors for change types */
#chglist .edit, #overview .mod, .diff #legend .mod { background: #fd8 }
#chglist .delete, #overview .rem, .diff #legend .rem { background: #f88 }
#chglist .add, #overview .add, .diff #legend .add { background: #bfb }
#chglist .copy, #overview .cp, .diff #legend .cp { background: #88f }
#chglist .move, #overview .mv, .diff #legend .mv { background: #ccc }
#chglist .unknown { background: #fff }
/* Legend for diff colors */
.diff #legend {
float: left;
font-size: 9px;
line-height: 1em;
margin: 1em 0;
padding: .5em;
}
.diff #legend h3 { display: none; }
.diff #legend dt {
background: #fff;
border: 1px solid #999;
float: left;
margin: .1em .5em .1em 2em;
overflow: hidden;
width: .8em; height: .8em;
}
.diff #legend dl, .diff #legend dd {
display: inline;
float: left;
padding: 0;
margin: 0;
margin-right: .5em;
}
/* Styles for the list of diffs */
.diff ul.entries { clear: both; margin: 0; padding: 0 }
.diff li.entry {
background: #f7f7f7;
border: 1px solid #d7d7d7;
list-style-type: none;
margin: 0 0 2em;
padding: 2px;
position: relative;
}
.diff h2 {
color: #333;
font-size: 14px;
letter-spacing: normal;
margin: 0 auto;
padding: .1em 0 .25em .5em;
}
/* Styles for the actual diff tables (side-by-side and inline) */
.diff table {
border: 1px solid #ddd;
border-spacing: 0;
border-top: 0;
empty-cells: show;
font-size: 12px;
line-height: 130%;
padding: 0;
margin: 0 auto;
width: 100%;
}
.diff table col.lineno { width: 4em }
.diff table th {
border-right: 1px solid #d7d7d7;
border-bottom: 1px solid #998;
font-size: 11px;
}
.diff table thead th {
background: #eee;
border-top: 1px solid #d7d7d7;
color: #999;
padding: 0 .25em;
text-align: center;
white-space: nowrap;
}
.diff table tbody th {
background: #eed;
color: #886;
font-weight: normal;
padding: 0 .5em;
text-align: right;
vertical-align: top;
}
.diff table tbody td {
background: #fff;
font: normal 11px monospace;
overflow: hidden;
padding: 1px 2px;
vertical-align: top;
}
.diff table tbody.skipped td {
background: #f7f7f7;
border: 1px solid #d7d7d7;
}
.diff table td span.del, .diff table td span.ins { text-decoration: none }
.diff table td span.del { color: #600 }
.diff table td span.ins { color: #060 }
/* Styles for the inline diff */
.diff table.inline tbody.mod td.l, .diff table.inline tbody.rem td.l {
background: #fdd;
border-color: #c00;
border-style: solid;
border-width: 0 1px 0 1px;
}
.diff table.inline tbody.mod td.r, .diff table.inline tbody.add td.r {
background: #dfd;
border-color: #0a0;
border-style: solid;
border-width: 0 1px 0 1px;
}
.diff table.inline tbody.mod tr.first td.l,
.diff table.inline tbody.rem tr.first td.l { border-top-width: 1px }
.diff table.inline tbody.mod tr.last td.l,
.diff table.inline tbody.rem tr.last td.l { border-bottom-width: 1px }
.diff table.inline tbody.mod tr.first td.r,
.diff table.inline tbody.add tr.first td.r { border-top-width: 1px }
.diff table.inline tbody.mod tr.last td.r,
.diff table.inline tbody.add tr.last td.r { border-bottom-width: 1px }
.diff table.inline tbody.mod td span.del { background: #e99; color: #000 }
.diff table.inline tbody.mod td span.ins { background: #9e9; color: #000 }
.diff table.inline tbody.mod td span.chg { background: #ee9; color: #000 }
/* Styles for the side-by-side diff */
.diff table.sidebyside colgroup.content { width: 50% }
.diff table.sidebyside tbody.mod td.l { background: #fe9 }
.diff table.sidebyside tbody.mod td.r { background: #fd8 }
.diff table.sidebyside tbody.add td.l { background: #dfd }
.diff table.sidebyside tbody.add td.r { background: #cfc }
.diff table.sidebyside tbody.rem td.l { background: #f88 }
.diff table.sidebyside tbody.rem td.r { background: #faa }
.diff table.sidebyside tbody.mod span.del, .diff table.sidebyside tbody.mod span.ins, .diff table.sidebyside tbody.mod span.chg {
background: #fc0;
}
/* Changeset overview */
#overview .files { padding-top: 2em }
#overview .files ul { margin: 0; padding: 0 }
#overview .files li { list-style-type: none }
#overview .files li .comment { display: none }
#overview .files li div {
border: 1px solid #999;
float: left;
margin: .2em .5em 0 0;
overflow: hidden;
width: .8em; height: .8em;
}
#overview div.add div, #overview div.cp div, #overview div.mv div {
border: 0;
margin: 0;
float: right;
width: .35em;
}
span.ver {font: normal 11px monospace;}
</style></head><body>
"""
HTML_FOOTER = """
</body>
</html>
"""
| gpl-2.0 | -9,172,895,471,794,657,000 | 26.156051 | 129 | 0.606661 | false |
sugarlabs/sugar-toolkit-gtk3 | src/sugar3/graphics/radiotoolbutton.py | 1 | 7756 | # Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2007-2008, One Laptop Per Child
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
'''
Provides a RadioToolButton class, similar to a "push" button.
A group of RadioToolButtons can be set, so that only one can be
selected at a time. When a button is clicked, it depresses and
is shaded darker.
It is also possible to set a tooltip to be dispalyed when the
user scrolls over it with their cursor as well as an accelerator
keyboard shortcut.
Example:
.. literalinclude:: ../examples/radiotoolbutton.py
'''
from gi.repository import Gtk
from gi.repository import GObject
from sugar3.graphics.icon import Icon
from sugar3.graphics.palette import Palette, ToolInvoker
from sugar3.graphics import toolbutton
class RadioToolButton(Gtk.RadioToolButton):
'''
The RadioToolButton class manages a Gtk.RadioToolButton styled for
Sugar.
Args:
icon_name (string): name of icon to be used.
Keyword Args:
accelerator (string): keyboard shortcut to be used to
activate this button.
tooltip (string): tooltip to be displayed when user hovers
over button.
xo_color (sugar3.graphics.xocolor.XoColor): XoColor of button.
hide_tooltip_on_click (bool): Whether or not the tooltip
is hidden when user clicks on button.
'''
__gtype_name__ = 'SugarRadioToolButton'
def __init__(self, icon_name=None, **kwargs):
self._accelerator = None
self._tooltip = None
self._xo_color = None
self._hide_tooltip_on_click = True
self._palette_invoker = ToolInvoker()
GObject.GObject.__init__(self, **kwargs)
self._palette_invoker.attach_tool(self)
if icon_name:
self.set_icon_name(icon_name)
# HACK: stop Gtk from adding a label and expanding the size of
# the button. This happen when set_icon_widget is called
# if label_widget is None
self.props.label_widget = Gtk.Box()
self.connect('destroy', self.__destroy_cb)
def __destroy_cb(self, icon):
if self._palette_invoker is not None:
self._palette_invoker.detach()
def set_tooltip(self, tooltip):
'''
Set the tooltip.
Args:
tooltip (string): tooltip to be set.
'''
if self.palette is None or self._tooltip is None:
self.palette = Palette(tooltip)
elif self.palette is not None:
self.palette.set_primary_text(tooltip)
self._tooltip = tooltip
# Set label, shows up when toolbar overflows
Gtk.RadioToolButton.set_label(self, tooltip)
def get_tooltip(self):
'''
Return the tooltip.
'''
return self._tooltip
tooltip = GObject.Property(type=str, setter=set_tooltip,
getter=get_tooltip)
def set_accelerator(self, accelerator):
'''
Set keyboard shortcut that activates this button.
Args:
accelerator (string): accelerator to be set. Should be in
form <modifier>Letter.
'''
self._accelerator = accelerator
toolbutton.setup_accelerator(self)
def get_accelerator(self):
'''
Return accelerator string.
'''
return self._accelerator
accelerator = GObject.Property(type=str, setter=set_accelerator,
getter=get_accelerator)
def set_icon_name(self, icon_name):
'''
Set name of icon.
Args:
icon_name (string): name of icon
'''
icon = Icon(icon_name=icon_name,
xo_color=self._xo_color)
self.set_icon_widget(icon)
icon.show()
def get_icon_name(self):
'''
Return icon name, or None if there is no icon name.
'''
if self.props.icon_widget is not None:
return self.props.icon_widget.props.icon_name
else:
return None
icon_name = GObject.Property(type=str, setter=set_icon_name,
getter=get_icon_name)
def set_xo_color(self, xo_color):
'''
Set XoColor of button icon.
Args:
xo_color (sugar3.graphics.xocolor.XoColor): xocolor to be set.
'''
if self._xo_color != xo_color:
self._xo_color = xo_color
if self.props.icon_widget is not None:
self.props.icon_widget.props.xo_color = xo_color
def get_xo_color(self):
'''
Return xocolor.
'''
return self._xo_color
xo_color = GObject.Property(type=object, setter=set_xo_color,
getter=get_xo_color)
def create_palette(self):
return None
def get_palette(self):
return self._palette_invoker.palette
def set_palette(self, palette):
self._palette_invoker.palette = palette
palette = GObject.Property(
type=object, setter=set_palette, getter=get_palette)
def get_palette_invoker(self):
return self._palette_invoker
def set_palette_invoker(self, palette_invoker):
self._palette_invoker.detach()
self._palette_invoker = palette_invoker
palette_invoker = GObject.Property(
type=object, setter=set_palette_invoker, getter=get_palette_invoker)
def do_draw(self, cr):
'''
Implementation method for drawing the button.
'''
if self.palette and self.palette.is_up():
allocation = self.get_allocation()
# draw a black background, has been done by the engine before
cr.set_source_rgb(0, 0, 0)
cr.rectangle(0, 0, allocation.width, allocation.height)
cr.paint()
Gtk.RadioToolButton.do_draw(self, cr)
if self.palette and self.palette.is_up():
invoker = self.palette.props.invoker
invoker.draw_rectangle(cr, self.palette)
return False
def get_hide_tooltip_on_click(self):
'''
Return True if the tooltip is hidden when a user
clicks on the button, otherwise return False.
'''
return self._hide_tooltip_on_click
def set_hide_tooltip_on_click(self, hide_tooltip_on_click):
'''
Set whether or not the tooltip is hidden when a user
clicks on the button.
Args:
hide_tooltip_on_click (bool): True if the tooltip is
hidden on click, and False otherwise.
'''
if self._hide_tooltip_on_click != hide_tooltip_on_click:
self._hide_tooltip_on_click = hide_tooltip_on_click
hide_tooltip_on_click = GObject.Property(
type=bool, default=True, getter=get_hide_tooltip_on_click,
setter=set_hide_tooltip_on_click)
def do_clicked(self):
'''
Implementation method for hiding the tooltip when
the button is clicked.
'''
if self._hide_tooltip_on_click and self.palette:
self.palette.popdown(True)
| lgpl-2.1 | 3,167,136,973,559,909,400 | 29.777778 | 76 | 0.618747 | false |
hackthemarket/pystrat | sim.py | 1 | 10697 | # simple trading strategy simulator
import pandas as pd
from pandas.tools.plotting import autocorrelation_plot
from pandas.tools.plotting import scatter_matrix
import numpy as np
from scipy import stats
import sklearn
from sklearn import preprocessing as pp
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(True)
import sys
import time
import logging as log
log.basicConfig(level=log.DEBUG)
import glob
import os.path
import pickle
import logging as log
log.basicConfig(level=log.DEBUG)
import random
import pdb
pd.set_option('display.width',500)
# define constant friction function
DefaultBPS = 10
def FrictionInBps(U, cfg, kvargs):
""" default FrictionInBps function just returns default,
but the interface receives all strategy info after
strategy is run, so one can create more realistic
impact models """
return DefaultBPS
""" default simulator cfg dictionary.
default keys/values:
FrictionInBps - function that takes same args as strategy.
by default, returns DefaultBps.
InitBal - in $s
Reinvest - should we reinvest our winnings or constantly assume we have InitBal?
Verbose
"""
DEF_SIM_CFG= { 'FrictionInBps': FrictionInBps,
'Verbose' : True,
'InitBal' : 1e7,
'Reinvest' : True }
# columns in prepped univ
SIM_COLS = ["Sym","Product","Instrument",
"Multiplier","Expiry","Strike",
"Open","High","Low","Close","Volume"]
SIM_COLS_OUT = ["Prev_Weight", "Weight", "Prev_Qty", "Qty",
"Trade_Qty", "Trade_Fric", "PNL", "NET_PNL"]
SIM_COL_BALS =[ "NAV","Friction","PNL","NET_PNL", "Longs","Shorts",
"Long_Dlrs","Short_Dlrs","Num_Trades","Turnover","NET_Return"]
def squarem( df, sym='Sym', min_pct=.9 ) :
# sim_squarem solves the common problem in which you have a large table of
# data grouped by symbols, some of which have missing data. You want to
# 'square' the data such that any symbol which is missing 'too much' data
# is expunged and the remaining data is filled appropriately, leaving you
# with a dataset which has the same # of observations for each symbol.
#
bysyms = df.groupby(sym).size()
idx = df.index.unique()
onumsyms = len(bysyms)
minlen = int(round(len(idx) * .9 ))
keep = bysyms[bysyms > minlen]
u = df[ df[sym].isin(keep.index) ]
numsyms = len(keep)
log.info('Got rid of %d/%d symbols',(numsyms-onumsyms),onumsyms)
u.replace(0,np.nan,inplace=True)
u.replace([np.inf, -np.inf], np.nan,inplace=True)
u.sort_index(inplace=True)
uidx = u.index.unique()
# groupby and reindex magic
z = u.groupby(sym).apply(
lambda x: x.reindex(uidx).ffill()).reset_index(0,drop=True)
# badz = z[z.isnull().any(axis=1)]
# if len(badz.index) > 0 :
# badtimes = badz.index.unique().values
# z.drop( badtimes, inplace=True )
# for dt in badtimes:
# log.info('removed %s for NaNs',pd.to_datetime(str(dt)).strftime(
# '%Y-%m-%d'))
return z
def prep_univ( dateTime, symbol,
open, high, low, close, volume,
product, instrument='STK', multiplier=1.0,expiry=None,
strike=None,adv_days=20,sd_days=20, open2close_returns=True,
scaleAndCenter=False, **more_cols) :
# constructs universe appropriate for use with simulator; any additional columns
# passed-in via ellipsis will be added to table as named
#
U = pd.DataFrame({'Sym': symbol,
'Product' : product, 'Instrument':instrument,
'Multiplier': 1.0, 'Expiry': None, 'Strike':None,
'Open':open,'High':high, 'Low':low, 'Close':close,
'Volume':volume }, index=dateTime )
U = U[ SIM_COLS ]
if len(more_cols) > 0:
U = pd.concat( [U, pd.DataFrame(more_cols)], axis=1 )
U.reset_index( inplace=True)
U.sort_values(['Sym','Date'],inplace=True)
U.Date = pd.to_datetime(U.Date)
U.set_index('Date',inplace=True)
if scaleAndCenter :
log.debug('prep_univ: scaling & centering')
raw_scaled = U.groupby('Sym').transform(
lambda x : (x - x.mean())/x.std())
U = pd.concat([ u.Sym, raw_scaled], axis=1)
# calculate adv, returns, fwd_returns & change in volume
U['ADV'] = U.groupby('Sym')['Volume'].apply(
pd.rolling_mean, adv_days, 1).shift()
U['DeltaV'] = U.groupby('Sym')['Volume'].transform(
lambda x : np.log(x / x.shift()) )
U['Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()) )
U['Fwd_Close'] = U.groupby('Sym')['Close'].shift(-1)
U['Fwd_Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()).shift(-1) ) # fwd.returns
U['SD'] = U.groupby('Sym')['Return'].apply(
pd.rolling_std, sd_days, 1).shift()
if open2close_returns:
U['Fwd_Open'] = U.groupby('Sym')['Open'].shift(-1)
U['Fwd_COReturn'] = np.divide(np.add( U.Fwd_Open, -U.Close ),U.Close)
U.ffill(inplace=True)
U.sort_index(inplace=True)
return U
# simple, default strategy: equal weight universe on daily basis
def eq_wt( U, cfg, kvargs ) :
#pdb.set_trace()
U.Weight = 1/float(len(U.index))
return U
# given today's Universe U and Yesterday's Y, set U's
# Prev_Weight and Prev_Qty to Y's Weight & Qty
# TODO: clean-up
def _getprevs( U, Y ) :
# TODO: surely there's a cleaner way to do this...
wts = Y.reset_index()[['Sym','Weight']]
wts.columns = ['Sym','Prev_Weight']
pwts = U[['Sym']].merge( wts, on = 'Sym' )['Prev_Weight']
U.Prev_Weight=pwts.values
qts = Y.reset_index()[['Sym','Qty']]
qts.columns = ['Sym','Prev_Qty']
pqts = U[['Sym']].merge( qts, on = 'Sym' )['Prev_Qty']
U.Prev_Qty=pqts.values
# functor to run strategy each day and update tbls ...
# TODO: clean-up
def __sim ( U, FUN, cfg, B, kvargs) :
# run sim to set weights
U = FUN( U, cfg, kvargs)
# set prev values for weight & qty...
Y = kvargs.pop('_Y', None)
if Y is not None and not np.all(Y.index==U.index):
_getprevs(U,Y)
loop = 1 + int(kvargs.pop('_L'))
else:
loop = 0
kvargs['_L'] = loop
kvargs['_Y'] = U
bb = B.iloc[loop]
# fill-out trade details
NAV = bb.NAV
tospend = NAV/U.Weight
U.Qty = np.round((NAV*U.Weight) / (U.Multiplier*U.Close))
U.Trade_Qty = U.Qty - U.Prev_Qty
fbps = 1e-4 * cfg['FrictionInBps'](U,cfg,kvargs)
U.Trade_Fric = U.Trade_Qty * U.Close * U.Multiplier * fbps
U.PNL = (U.Fwd_Close - U.Close) * U.Qty * U.Multiplier
U.NET_PNL = U.PNL - U.Trade_Fric
# today's balances are based on yesterday's posns...
longs = U[U.Qty > 0]
shorts = U[U.Qty < 0]
trades = U[U.Trade_Qty != 0]
bb.Friction = U.Trade_Fric.sum()
bb.PNL = U.PNL.sum()
bb.NET_PNL = U.NET_PNL.sum()
bb.Longs = len(longs.index)
bb.Shorts = len(shorts.index)
bb.Long_Dlrs = (longs.Close * longs.Multiplier * longs.Qty).sum()
bb.Short_Dlrs = (shorts.Close * shorts.Multiplier * shorts.Qty).sum()
bb.Num_Trades = len(trades.index)
bb.Turnover = (trades.Close * trades.Multiplier
* trades.Trade_Qty.abs()).sum()/NAV
if loop > 0 :
yb = B.iloc[loop-1]
ynav = yb.NAV
tnav = ynav + yb.NET_PNL
bb.NAV = tnav
bb.NET_Return = (tnav-ynav)/ynav
B.iloc[loop] = bb
# pdb.set_trace()
return U
def sim( univ, sim_FUN=eq_wt, cfg=DEF_SIM_CFG.copy(), kvargs={} ) :
""" simulator: runs simulation and returns a table of activity and balances.
args:
univ - historical data that's been produced by prep_univ
sim_FUN - strategy function. by default, equal weights univ.
cfg - cfg info. by default
kvargs - strat-specific extra data in a dict
"""
#
t0 = time.time()
all_times = univ.index.unique().values
# prepare writable/output side of universe
W = pd.DataFrame( columns=SIM_COLS_OUT, index = univ.index).fillna(0.0)
U = pd.concat( [univ, W], axis=1 )
# create balances table: one per day
B = pd.DataFrame( columns = SIM_COL_BALS, index = all_times ).fillna(0.0)
B.NAV = cfg['InitBal']
# 'daily' loop
Z = U.groupby(U.index).apply( __sim, FUN=sim_FUN,
cfg=cfg, B=B, kvargs=kvargs )
log.info('ran over %d days and %d rows in %d secs', len(all_times),
len(U.index),time.time()-t0)
# summarize results a bit more...?
#ts=xts(B$Net.Return,order.by=B$DateTime)
# return universe and balances
#list(U=U,B=B, ts=ts)
return Z, B
def sharpe(Returns) :
return np.sqrt(252) * np.mean(Returns)/np.std(Returns)
def random_strat( U, cfg, kvargs ) :
# random portfolio strategy: picks 'num_names' randomly
nnames = kvargs.get('num_names',10)
names = random.sample(U.Sym, nnames )
U.Weight = np.where( U.Sym.isin( names ), 1/float(nnames), 0 )
return U
def best_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
best = U.sort_values('Return',ascending=False,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( best ), 1/float(nnames), 0 )
return U
def worst_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
worst = U.sort_values('Return',ascending=True,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( worst ), 1/float(nnames), 0 )
return U
def rtest(U,FUN=random_strat, runs=10):
# run given strat repeatedly, plotting NAVs and Returning them
# nb: this only makes sense if the strategy is random...
# run random_strat 'runs' times and plot NAVs
N = None
for i in range(runs) :
_,b = sim( U, sim_FUN=FUN )
n = pd.DataFrame(b.NAV)
N = n if N is None else pd.concat([N,n],axis=1)
N.plot(legend=False)
return N
def sim_test():
# dev driver
f = 'U.pkl'
P = pickle.load(open(f))
log.info('loaded <%s>',f)
P.describe()
U = P[P.index >= '2005-01-01']
U.describe()
import sim
_,B = sim.sim(U)
#plot NAV
B.NAV.plot(title='Equal Weight Everyone')
return B
| gpl-3.0 | 3,906,262,475,805,889,000 | 32.015432 | 84 | 0.597177 | false |
douglaskastle/bootswatch | convert_bootswatch_mutara.py | 1 | 7972 | import re
import os
values = {
# 'uc': 'Grissom',
'lc': 'mutara',
'header': 'Michroma',
'body': 'Play',
'website': 'mavek_org',
# 'cl': '#116BB7',
}
def main():
src = 'cyborg'
cmd = 'cp -r {0}/* {1}'.format(src, values['lc'])
os.system(cmd)
infile = "{0}/bootswatch.less".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/bootswatch.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
if re.search("Roboto", line):
continue
if re.search("web-font-path", line):
line = '@web-font-path2: "https://fonts.googleapis.com/css?family={0}:400,700,400italic";\n'.format(values['body']) + line
line = '@web-font-path: "https://fonts.googleapis.com/css?family={0}:300italic,400italic,700italic,400,300,700";\n'.format(values['header']) + line
line = line + '.web-font(@web-font-path2);\n'
f.write(line)
f.close()
infile = "{0}/variables.less".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
swap_list = {
'@brand-primary:': '@brand-primary: #00ff00',
'@brand-success:': '@brand-success: #0000ff',
'@text-color:': '@text-color: #ffffff',
'@headings-color:': '@headings-color: #00ff00',
'@border-radius-base:': '@border-radius-base: 20px',
'@border-radius-large:': '@border-radius-large: 22px',
'@border-radius-small:': '@border-radius-small: 19px',
'@component-active-color:': '@component-active-color: #00ff00',
'@btn-default-color:': '@btn-default-color: #000',
'@btn-default-bg:': '@btn-default-bg: lighten(@gray-dark, 50%)',
'@input-bg:': '@input-bg: @gray-dark',
'@input-group-addon-bg:': '@input-group-addon-bg: @gray-lighter',
'@dropdown-border:': '@dropdown-border: rgba(0,255,0,0.1)',
'@dropdown-divider-bg:': '@dropdown-divider-bg: rgba(0,255,0,0.1)',
'@dropdown-link-color:': '@dropdown-link-color: #00ff00',
'@dropdown-link-hover-color:': '@dropdown-link-hover-color: #00ff00',
'@dropdown-link-active-color:': '@dropdown-link-active-color: #00ff00',
'@navbar-default-link-hover-color:': '@navbar-default-link-hover-color: #00ff00',
'@navbar-default-link-active-color:': '@navbar-default-link-active-color: #00ff00',
'@navbar-default-brand-color:': '@navbar-default-brand-color: #00ff00',
'@navbar-default-brand-hover-color:': '@navbar-default-brand-hover-color: #00ff00',
'@navbar-inverse-link-hover-color:': '@navbar-inverse-link-hover-color: #0000ff',
'@navbar-inverse-brand-color:': '@navbar-inverse-brand-color: #0000ff',
'@navbar-inverse-brand-hover-color:': '@navbar-inverse-brand-hover-color: #0000ff',
'@navbar-inverse-toggle-hover-bg:': '@navbar-inverse-toggle-hover-bg: #8080ff',
'@navbar-inverse-toggle-icon-bar-bg:': '@navbar-inverse-toggle-icon-bar-bg: #0000ff',
'@navbar-inverse-toggle-border-color:': '@navbar-inverse-toggle-border-color: #8080ff',
'@nav-tabs-active-link-hover-color:': '@nav-tabs-active-link-hover-color: #000',
'@pagination-color:': '@pagination-color: #000',
'@pagination-bg:': '@pagination-bg: @gray',
'@pagination-hover-color:': '@pagination-hover-color: #000',
'@pagination-active-color:': '@pagination-active-color: #000',
'@pagination-disabled-bg:': '@pagination-disabled-bg: @gray',
'@state-success-text:': '@state-success-text: #000',
'@state-info-text:': '@state-info-text: #000',
'@state-warning-text:': '@state-warning-text: #000',
'@state-danger-text:': '@state-danger-text: #000',
'@tooltip-bg:': '@tooltip-bg: #000',
'@popover-bg:': '@popover-bg: lighten(@body-bg, 10%)',
'@popover-fallback-border-color:': '@popover-fallback-border-color: #999',
'@popover-arrow-outer-color:': '@popover-arrow-outer-color: fadein(@popover-border-color, 5%)',
'@popover-arrow-outer-fallback-color:': '@popover-arrow-outer-fallback-color: darken(@popover-fallback-border-color, 20%)',
'@label-color:': '@label-color: #000',
'@label-link-hover-color:': '@label-link-hover-color: #000',
'@list-group-link-heading-color:': '@list-group-link-heading-color: #000',
'@panel-primary-text:': '@panel-primary-text: #000',
'@badge-color:': '@badge-color: #000',
'@badge-link-hover-color:': '@badge-link-hover-color: #000',
'@badge-active-bg:': '@badge-active-bg: #000',
'@breadcrumb-color:': '@breadcrumb-color: #00ff00',
'@carousel-control-color:': '@carousel-control-color: #000',
# '': '',
}
outfile = values['lc'] + "/variables.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
line = re.sub(src, values['lc'], line)
#line = re.sub('Roboto', 'Michroma', line)
for s in swap_list.keys():
if re.search(s, line):
line = swap_list[s] + ";\n"
line = re.sub('headings-font-family: @font-family-base', 'headings-font-family: @font-family-header-sans-serif', line)
if re.search("Roboto", line):
line = re.sub('Roboto', '{0}'.format(values['body']), line)
line = '@font-family-header-sans-serif: "{0}", "Helvetica Neue", Helvetica, Arial, sans-serif;\n'.format(values['header']) + line
f.write(line)
f.close()
infile = "{0}/index.html".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/index.html"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
line = re.sub(src, values['lc'], line)
line = re.sub('UA-[0-9\-]+', '', line)
if re.search('bootstrap.css" media="screen"', line):
line = line + ' <link rel="stylesheet" href="./bootstrap_fixes.css" media="screen">\n'
f.write(line)
f.close()
grunt = "/cygdrive/c/Users/keeshand/AppData/Roaming/npm/grunt"
cmd = "{0} swatch:{1}".format(grunt, values['lc'])
os.system(cmd)
cmd = "cp {0}/bootstrap.min.css ../{1}/pelican-themes/bootstrap3/static/css/bootstrap.{0}.min.css".format(values['lc'], values['website'])
os.system(cmd)
cmd = "cp {0}/bootstrap_fixes.css ../{1}/pelican-themes/bootstrap3/static/css/bootstrap_fixes.{0}.css".format(values['lc'], values['website'])
os.system(cmd)
if __name__ == '__main__':
main()
| mit | -6,429,890,791,821,344,000 | 53.97931 | 159 | 0.49147 | false |
sony/nnabla | python/src/nnabla/models/imagenet/densenet.py | 1 | 2722 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from nnabla.utils.nnp_graph import NnpNetworkPass
from .base import ImageNetBase
class DenseNet(ImageNetBase):
"""
The following is a list of string that can be specified to ``use_up_to`` option in ``__call__`` method;
* ``'classifier'`` (default): The output of the final affine layer for classification.
* ``'pool'``: The output of the final global average pooling.
* ``'lastconv'``: The output from last denseblock.
* ``'lastconv+relu'``: Network up to ``'lastconv'`` followed by ReLU activation.
References:
* `Huang et al., Densely Connected Convolutional Networks.
<https://arxiv.org/abs/1608.06993>`_
"""
_KEY_VARIABLE = {
'classifier': 'DenseNet/Affine',
'pool': 'DenseNet/AveragePooling_4',
'lastconv': 'DenseNet/BatchNormalization_5',
'lastconv+relu': 'DenseNet/ReLU_5',
}
def __init__(self):
# Load nnp
self._load_nnp('DenseNet-161.nnp', 'DenseNet-161/DenseNet-161.nnp')
def _input_shape(self):
return (3, 224, 224)
def __call__(self, input_var=None, use_from=None, use_up_to='classifier', training=False, force_global_pooling=False, check_global_pooling=True, returns_net=False, verbose=0):
assert use_from is None, 'This should not be set because it is for forward compatibility.'
input_var = self.get_input_var(input_var)
callback = NnpNetworkPass(verbose)
callback.remove_and_rewire('ImageAugmentationX')
callback.set_variable('InputX', input_var)
self.configure_global_average_pooling(
callback, force_global_pooling, check_global_pooling, 'DenseNet/AveragePooling_4')
callback.set_batch_normalization_batch_stat_all(training)
self.use_up_to(use_up_to, callback)
if not training:
callback.fix_parameters()
batch_size = input_var.shape[0]
net = self.nnp.get_network(
'Train', batch_size=batch_size, callback=callback)
if returns_net:
return net
return list(net.outputs.values())[0]
| apache-2.0 | 6,252,650,887,576,425,000 | 38.449275 | 179 | 0.669361 | false |
robertostling/efselab | scripts/conll2tab.py | 1 | 1221 | # Script to convert CoNLL files with tag+morphology into the simple two-column
# format assumed by efselab.
#
# If only the tag is required, conversion can more easily be done like this:
#
# cut -f 2,4 file.conll >file.tab
"""
cat /home/corpora/SUC3.0/corpus/conll/blogs.conll /home/corpora/SUC3.0/corpus/conll/suc-train.conll | python3 conll2tab.py ne >../suc-data/suc-blogs-ne-train.tab
cat /home/corpora/SUC3.0/corpus/conll/suc-dev.conll | python3 conll2tab.py ne >../suc-data/suc-ne-dev.tab
cat /home/corpora/SUC3.0/corpus/conll/suc-test.conll | python3 conll2tab.py ne >../suc-data/suc-ne-test.tab
"""
import sys
include_ne = 'ne' in sys.argv[1:]
for line in sys.stdin:
fields = line.rstrip('\n').split('\t')
if len(fields) >= 6:
word = fields[1]
pos = fields[3]
if pos == 'LE': pos = 'IN'
tag = pos+'|'+fields[5] if (fields[5] and fields[5] != '_') else pos
if include_ne and len(fields) >= 12:
ne = fields[10] if fields[11] == '_' else (
'%s-%s' % (fields[10], fields[11]))
lemma = fields[2]
print(word+'\t'+lemma+'\t'+tag+'\t'+ne)
else:
print(word+'\t'+tag)
else:
print()
| gpl-3.0 | -248,659,985,629,635,300 | 34.911765 | 161 | 0.600328 | false |
grengojbo/st2 | st2actions/st2actions/config.py | 1 | 3073 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration options registration and useful routines.
"""
import sys
from oslo_config import cfg
import st2common.config as common_config
from st2common.constants.system import VERSION_STRING
CONF = cfg.CONF
def parse_args(args=None):
CONF(args=args, version=VERSION_STRING)
def register_opts():
_register_common_opts()
_register_action_runner_opts()
def _register_common_opts():
common_config.register_opts()
def _register_action_runner_opts():
logging_opts = [
cfg.StrOpt('logging', default='conf/logging.conf',
help='location of the logging.conf file'),
cfg.StrOpt('python_binary', default=sys.executable,
help='Python binary which will be used by Python actions.')
]
CONF.register_opts(logging_opts, group='actionrunner')
db_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='host of db server'),
cfg.IntOpt('port', default=27017, help='port of db server'),
cfg.StrOpt('db_name', default='st2', help='name of database')
]
CONF.register_opts(db_opts, group='database')
ssh_runner_opts = [
cfg.StrOpt('remote_dir',
default='/tmp',
help='Location of the script on the remote filesystem.'),
cfg.BoolOpt('allow_partial_failure',
default=False,
help='How partial success of actions run on multiple nodes should be treated.')
]
CONF.register_opts(ssh_runner_opts, group='ssh_runner')
mistral_opts = [
cfg.StrOpt('v2_base_url', default='http://localhost:8989/v2',
help='Mistral v2 API server root endpoint.'),
cfg.IntOpt('max_attempts', default=180,
help='Maximum no of attempts made to connect to Mistral.'),
cfg.IntOpt('retry_wait', default=5,
help='Time in seconds to wait before retrying connection to Mistral.')
]
CONF.register_opts(mistral_opts, group='mistral')
cloudslang_opts = [
cfg.StrOpt('home_dir', default='/opt/cslang',
help='CloudSlang home directory.'),
]
CONF.register_opts(cloudslang_opts, group='cloudslang')
def get_logging_config_path():
return CONF.actionrunner.logging
register_opts()
| apache-2.0 | -5,164,267,192,846,856,000 | 33.144444 | 99 | 0.663196 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/nose-0.11.1-py2.7.egg/nose/proxy.py | 1 | 5945 | """
Result Proxy
------------
The result proxy wraps the result instance given to each test. It
performs two functions: enabling extended error/failure reporting
and calling plugins.
As each result event is fired, plugins are called with the same event;
however, plugins are called with the nose.case.Test instance that
wraps the actual test. So when a test fails and calls
result.addFailure(self, err), the result proxy calls
addFailure(self.test, err) for each plugin. This allows plugins to
have a single stable interface for all test types, and also to
manipulate the test object itself by setting the `test` attribute of
the nose.case.Test that they receive.
"""
import logging
from nose.config import Config
log = logging.getLogger(__name__)
def proxied_attribute(local_attr, proxied_attr, doc):
"""Create a property that proxies attribute ``proxied_attr`` through
the local attribute ``local_attr``.
"""
def fget(self):
return getattr(getattr(self, local_attr), proxied_attr)
def fset(self, value):
setattr(getattr(self, local_attr), proxied_attr, value)
def fdel(self):
delattr(getattr(self, local_attr), proxied_attr)
return property(fget, fset, fdel, doc)
class ResultProxyFactory(object):
"""Factory for result proxies. Generates a ResultProxy bound to each test
and the result passed to the test.
"""
def __init__(self, config=None):
if config is None:
config = Config()
self.config = config
self.__prepared = False
self.__result = None
def __call__(self, result, test):
"""Return a ResultProxy for the current test.
On first call, plugins are given a chance to replace the
result used for the remaining tests. If a plugin returns a
value from prepareTestResult, that object will be used as the
result for all tests.
"""
if not self.__prepared:
self.__prepared = True
plug_result = self.config.plugins.prepareTestResult(result)
if plug_result is not None:
self.__result = result = plug_result
if self.__result is not None:
result = self.__result
return ResultProxy(result, test, config=self.config)
class ResultProxy(object):
"""Proxy to TestResults (or other results handler).
One ResultProxy is created for each nose.case.Test. The result
proxy calls plugins with the nose.case.Test instance (instead of
the wrapped test case) as each result call is made. Finally, the
real result method is called, also with the nose.case.Test
instance as the test parameter.
"""
def __init__(self, result, test, config=None):
if config is None:
config = Config()
self.config = config
self.plugins = config.plugins
self.result = result
self.test = test
def __repr__(self):
return repr(self.result)
def assertMyTest(self, test):
# The test I was called with must be my .test or my
# .test's .test. or my .test.test's .case
case = getattr(self.test, 'test', None)
assert (test is self.test
or test is case
or test is getattr(case, '_nose_case', None)), (
"ResultProxy for %r (%s) was called with test %r (%s)"
% (self.test, id(self.test), test, id(test)))
def afterTest(self, test):
self.assertMyTest(test)
self.plugins.afterTest(self.test)
if hasattr(self.result, "afterTest"):
self.result.afterTest(self.test)
def beforeTest(self, test):
self.assertMyTest(test)
self.plugins.beforeTest(self.test)
if hasattr(self.result, "beforeTest"):
self.result.beforeTest(self.test)
def addError(self, test, err):
self.assertMyTest(test)
plugins = self.plugins
plugin_handled = plugins.handleError(self.test, err)
if plugin_handled:
return
# test.passed is set in result, to account for error classes
formatted = plugins.formatError(self.test, err)
if formatted is not None:
err = formatted
plugins.addError(self.test, err)
self.result.addError(self.test, err)
if not self.result.wasSuccessful() and self.config.stopOnError:
self.shouldStop = True
def addFailure(self, test, err):
self.assertMyTest(test)
plugins = self.plugins
plugin_handled = plugins.handleFailure(self.test, err)
if plugin_handled:
return
self.test.passed = False
formatted = plugins.formatFailure(self.test, err)
if formatted is not None:
err = formatted
plugins.addFailure(self.test, err)
self.result.addFailure(self.test, err)
if self.config.stopOnError:
self.shouldStop = True
def addSuccess(self, test):
self.assertMyTest(test)
self.plugins.addSuccess(self.test)
self.result.addSuccess(self.test)
def startTest(self, test):
self.assertMyTest(test)
self.plugins.startTest(self.test)
self.result.startTest(self.test)
def stop(self):
self.result.stop()
def stopTest(self, test):
self.assertMyTest(test)
self.plugins.stopTest(self.test)
self.result.stopTest(self.test)
# proxied attributes
shouldStop = proxied_attribute('result', 'shouldStop',
"""Should the test run stop?""")
errors = proxied_attribute('result', 'errors',
"""Tests that raised an exception""")
failures = proxied_attribute('result', 'failures',
"""Tests that failed""")
testsRun = proxied_attribute('result', 'testsRun',
"""Number of tests run""")
| gpl-3.0 | 5,547,417,367,787,775,000 | 34.386905 | 77 | 0.622876 | false |
meowtec/page-navigator | example.py | 1 | 2364 | # coding:utf-8
html_tpl = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Document</title>
<style type="text/css">
.nav{
margin: 10px 0;
font-size: 12px;
font-family: "Helvetica", "Arial", sans-serif;
}
.nav a{
text-decoration: none;
color: #000;
}
.nav span{
color: #999;
}
.nav .item{
display: inline-block;
padding: 3px 8px;
margin: 0 3px;
}
.nav a.number:hover{
background: #99dddd;
color: #ffffff;
}
.nav span.current{
background: #9cc;
color: #fff;
}
.nav a.prev:hover, .nav a.next:hover{
color: #9cc;
}
h2{
margin-top: 2em;
}
</style>
</head>
<body>
<h2>基本</h2>
<div class="nav">{{html_1_1}}</div>
<div class="nav">{{html_1_2}}</div>
<div class="nav">{{html_1_3}}</div>
<div class="nav">{{html_1_4}}</div>
<div class="nav">{{html_1_5}}</div>
<div class="nav">{{html_1_6}}</div>
<h2>设置</h2>
<div class="nav">{{html_2_1}}</div>
<h2>自定义Helper</h2>
<div class="nav">{{html_3_1}}</div>
</body>
</html>
'''
from pagenavigator import PageNavigator
def string_replace(string, **data):
for key in data:
string = string.replace('{{' + key + '}}', str(data[key]))
return string
nav_1 = PageNavigator()
html_1_1 = nav_1.create(1, 5)
html_1_2 = nav_1.create(2, 5)
html_1_3 = nav_1.create(5, 5)
html_1_4 = nav_1.create(1, 6)
html_1_5 = nav_1.create(5, 6)
html_1_6 = nav_1.create(5, 10)
nav_2 = PageNavigator(link_helper='list.html?page={{page}}&from={{current}}&max={{max}}',
prev_text='←', next_text='→', more_text='……', size=9)
html_2_1 = nav_2.create(10, 20)
nav_3 = PageNavigator(number_helper='<button href="{{link}}" class="item number" data-page="{{page}}">{{page}}</button>',
current_helper='<button class="item number current" data-page="{{page}}" disabled="disabled">{{page}}</button>')
html_3_1 = nav_3.create(10, 20)
html = string_replace(html_tpl, html_1_1=html_1_1, html_1_2=html_1_2, html_1_3=html_1_3,
html_1_4=html_1_4, html_1_5=html_1_5, html_1_6=html_1_6,
html_2_1=html_2_1,
html_3_1=html_3_1
)
file_object = open('python_example.html', 'w')
file_object.write(html)
file_object.close( )
| mit | -406,984,751,838,615,360 | 24.736264 | 134 | 0.551665 | false |
ehovind/extensible-ebook-converter | eecon_fetcher.py | 1 | 3151 | #!/usr/bin/env python
"""
This file is part of Extensible eBook Converter (EeCon),
an advanced ebook analysis and conversion tool.
Copyright (C) 2012 Espen Hovind <[email protected]>
EeCon is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Eeon is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with EeCon. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from fetcher import fetcher as Fetcher
# ==============================================================================
# CONFIGURATION:
# ==============================================================================
WORKSPACE = "workspace/project_runeberg/"
VALID_DOMAINS = ("runeberg.org",)
# ==============================================================================
# FUNCTION:
# main()
# ==============================================================================
def main():
"""
DESCRIPTION:
PARAMETERS:
RETURN:
"""
# parse arguments
args = parse_command()
# fetch and initalize the workspace
fetcher = Fetcher.Fetcher(WORKSPACE, VALID_DOMAINS, args)
# process the arguments
fetcher.process()
# ==============================================================================
# FUNCTION:
# parse_command()
# ==============================================================================
def parse_command():
"""
DESCRIPTION:
Parse the user-provided command using argparse.
PARAMETERS:
None
RETURN:
Dictionary of command line options
"""
print "[STATUS] parsing arguments... ",
# create an ArgumentParser
parser = argparse.ArgumentParser()
# positional arguments
parser.add_argument("--auto-markup", action="store_true",
help="Automatic conversion from HTML to XHTML (best effort)")
parser.add_argument("--auto-populate", action="store_true",
help="Automatic population from Project Runeberg Pages files")
parser.add_argument("--auto-utf8", action="store_true",
help="auto convert publication files to UTF-8")
parser.add_argument("--patch", help="apply pre-made git patch")
parser.add_argument("--source",
help="fetch a ebook archive URL or filename")
parser.add_argument("--title",
help="title of publication")
# parse the command into a ArgumentParser object
args = parser.parse_args()
print "ok."
# return a dict with command line options
return vars(args)
# ==============================================================================
# MODULE:
# __name__
# ==============================================================================
if __name__ == "__main__":
main()
| gpl-3.0 | 6,881,548,638,896,659,000 | 29.009524 | 80 | 0.535386 | false |
albert12132/templar | templar/cli/templar.py | 1 | 2179 | """Command-line interface for templar."""
from templar.api import config
from templar.api import publish
from templar.exceptions import TemplarError
import templar
import argparse
import logging
import sys
LOGGING_FORMAT = '%(levelname)s %(filename)s:%(lineno)d> %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger('templar')
def flags(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source',
help='Path to a source file with Markdown content.')
parser.add_argument('-t', '--template',
help='Path to a Jinja template file')
parser.add_argument('-d', '--destination',
help='Path to the destination file.')
parser.add_argument('-c', '--config', default='config.py',
help='Path to a Templar configuration file.')
parser.add_argument('--print', action='store_true',
help='Forces printing of result to stdout, '
'even if --destination is specified')
parser.add_argument('--debug', action='store_true',
help='Enable debugging messages.')
parser.add_argument('--version', action='store_true',
help='Print the version number and exit')
if args is not None:
return parser.parse_args(args)
return parser.parse_args()
def run(args):
if args.version:
print('Templar version {}'.format(templar.__version__))
exit(0)
log.setLevel(logging.DEBUG if args.debug else logging.ERROR)
try:
configuration = config.import_config(args.config)
result = publish.publish(
configuration,
source=args.source,
template=args.template,
destination=args.destination,
no_write=args.print)
except TemplarError as e:
if args.debug:
raise
else:
print('{}: {}'.format(type(e).__name__, str(e)), file=sys.stderr)
exit(1)
else:
if not args.destination or args.print:
print(result)
def main():
run(flags())
| mit | -8,073,542,425,977,757,000 | 33.046875 | 77 | 0.58972 | false |
hammerlab/immuno | immuno/immunogenicity.py | 1 | 5849 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from os import environ, listdir
from os.path import exists, split, join
from mhc_common import compact_hla_allele_name
from peptide_binding_measure import IC50_FIELD_NAME
DEFAULT_PEPTIDE_DIR = environ.get(
"IMMUNO_THYMIC_PEPTIDES",
join(split(__file__)[0], "thymic_peptides"))
THYMIC_DELETION_FIELD_NAME = 'ThymicDeletion'
def _load_allele_mapping_dict(path):
"""
Since some alleles have identical peptide sets as others, we compress
the stored data by only retaining one allele from each equivalence class
and using a mappings file to figure out which allele is retained.
"""
result = {}
with open(path, 'r') as f:
for line in f.read().split("\n"):
if len(line) > 0:
k, v = line.split("\t")
result[k] = v
return result
class ImmunogenicityPredictor(object):
"""
Predict whether some T-cell in a person's circulating repertoire could
recognize a particular pattern. The subset of the 'self' proteome which
binds to an individual's HLA alleles tells us which T-cells were removed
by negative selection. T-cells inspect peptides more strongly along
interior residues (positions 3-8), so we restrict our query only to those
positions.
"""
def __init__(
self,
alleles,
data_path = DEFAULT_PEPTIDE_DIR,
binding_threshold = 500,
first_position = 3,
last_position = 8):
"""
Parameters
--------
alleles : list of strings
data_path : str, optional
first_position : int, optional
Start position for extracting substring of
query peptide (indexed starting from 1)
last_position : int, optional
Last position for extracting substring of
query peptide (indexed starting from 1)
"""
self.binding_threshold = binding_threshold
self.first_position = first_position
self.last_position = last_position
self.alleles = {
compact_hla_allele_name(allele) for allele in alleles
}
self.data_path = data_path
assert exists(self.data_path), \
"Directory with thymic peptides (%s) does not exist" % \
self.data_path
available_alleles = listdir(self.data_path)
mappings_file_path = join(self.data_path, 'mappings')
if exists(mappings_file_path):
self.allele_mappings = \
_load_allele_mapping_dict(mappings_file_path)
else:
self.allele_mappings = \
dict(zip(available_alleles, available_alleles))
self.peptide_sets = {}
for allele in self.alleles:
if allele not in self.allele_mappings:
logging.warn(
"No MHC peptide set available for HLA allele %s", allele)
continue
else:
logging.info(
"Loading thymic MHC peptide set for HLA allele %s", allele)
filename = self.allele_mappings[allele]
assert filename in available_alleles, \
"No MHC peptide set available for HLA allele %s (file = %s)" % \
(allele,filename)
with open(join(self.data_path, filename), 'r') as f:
peptide_set = {l for l in f.read().split("\n") if len(l) > 0}
self.peptide_sets[allele] = peptide_set
def predict(self, peptides_df):
"""
Determine whether 9-mer peptide is immunogenic by checking
1) that the epitope binds strongly to a particular MHC allele
2) the "core" of the peptide (positions 3-8) don't overlap with any
other peptides in the self/thymic MHC ligand sets of that HLA allele
Returns DataFrame with two extra columns:
- ThymicDeletion: Was this epitope deleted during thymic selection
(and thus can't be recognize by T-cells)?
- Immunogenic: Is this epitope a sufficiently strong binder that
wasn't deleted during thymic selection?
"""
thymic_peptide_sets = self.peptide_sets.values()
# assume a peptide is non-immunogenic unless not in thymic sets
# We do this in case some alleles are missing, resulting in all
# their associated ligands being considered non-immunogenic
peptides_df[THYMIC_DELETION_FIELD_NAME] = True
for i in xrange(len(peptides_df)):
row = peptides_df.ix[i]
peptide = row.Epitope
allele = compact_hla_allele_name(row.Allele)
if allele in self.peptide_sets:
# positions in the epitope are indexed starting from 1 to
# match immunology nomenclature
substring = \
peptide[self.first_position - 1 : self.last_position]
peptides_df[THYMIC_DELETION_FIELD_NAME].ix[i] = \
substring in self.peptide_sets[allele]
peptides_df["Immunogenic"] = \
~peptides_df[THYMIC_DELETION_FIELD_NAME] & \
(peptides_df[IC50_FIELD_NAME] <= self.binding_threshold)
return peptides_df
| apache-2.0 | -1,123,639,031,035,863,000 | 35.786164 | 80 | 0.618396 | false |
Gabriel-p/mcs_rot_angles | aux_modules/validation_set.py | 1 | 10176 |
import os
from astropy.io import ascii
from astropy.table import Table
from astropy.coordinates import Distance, Angle, SkyCoord
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
# Change path so that we can import functions from the 'modules/' folder.
sys.path.insert(0, sys.path[0].replace('aux_', ''))
import readData
import MCs_data
def zDist(N):
"""
This function generates a uniform spread of vertical distances, in the
range (-z_dist, +z_dist).
"""
# Define maximum vertical distance (in parsec)
z_dist = 5000.
# Generate N random z' vertical distances, in parsec.
# To generate the *same* values each time the code is executed, fix the
# random seed to any integer value.
# np.random.seed(12345)
z_prime = np.random.uniform(-z_dist, z_dist, N)
return z_prime
def invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime):
"""
Inverted distance in parsecs (D) from Eq (7) in
van der Marel & Cioni (2001) using Eqs (1), (2), (3).
"""
# Express everything in radians.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
ra_0, dec_0, ra, dec = ra_0.rad, dec_0.rad, np.deg2rad(ra), np.deg2rad(dec)
# cos(rho)
A = np.cos(dec) * np.cos(dec_0) * np.cos(ra - ra_0) +\
np.sin(dec) * np.sin(dec_0)
# sin(rho) * cos(phi)
B = -np.cos(dec) * np.sin(ra - ra_0)
# sin(rho) * sin(phi)
C = np.sin(dec) * np.cos(dec_0) -\
np.cos(dec) * np.sin(dec_0) * np.cos(ra - ra_0)
# Eq (7)
D = (z_prime - D_0.value * np.cos(incl)) /\
(np.sin(incl) * (C * np.cos(theta) - B * np.sin(theta)) -
A * np.cos(incl))
return D
def rho_phi(ra, dec, glx_ctr):
"""
Obtain the angular distance between (ra, dec) coordinates and the center
of the galaxy (rho), and its position angle (phi).
"""
# Store clusters' (ra, dec) coordinates in degrees.
coords = SkyCoord(list(zip(*[ra, dec])), unit=(u.deg, u.deg))
rho = coords.separation(glx_ctr)
# Position angle between center and coordinates. This is the angle between
# the positive y axis (North) counter-clockwise towards the negative x
# axis (East).
Phi = glx_ctr.position_angle(coords)
# This is the angle measured counter-clockwise from the x positive axis
# (West).
phi = Phi + Angle('90d')
return rho, phi
def xyz_coords(rho, phi, D_0, r_dist):
'''
Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001),
Eq (5).
Values (x, y,z) returned in Kpc.
'''
d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000.,
unit=u.kpc)
x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian)
y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian)
z = D_0.kpc * u.kpc - d_kpc * np.cos(rho.radian)
x, y, z = x.value, y.value, z.value
return np.array([x, y, z])
def outData(gal, gal_data, dist_mod, e_dm):
"""
Write data to output 'xxx_input_synth.dat' file ('xxx' stands for the
processed galaxy.)
"""
data = Table(
[gal_data['Name'], gal_data['ra'], gal_data['dec'], dist_mod, e_dm,
gal_data['log(age)']],
names=['Name', 'ra', 'dec', 'dist_mod', 'e_dm', 'log(age)'])
with open(gal.lower() + "_input_synth.dat", 'w') as f:
ascii.write(data, f, format='fixed_width', delimiter=' ')
def inv_trans_eqs(x_p, y_p, z_p, theta, inc):
"""
Inverse set of equations. Transform inclined plane system (x',y',z')
into face on sky system (x,y,z).
"""
x = x_p * np.cos(theta) - y_p * np.cos(inc) * np.sin(theta) -\
z_p * np.sin(inc) * np.sin(theta)
y = x_p * np.sin(theta) + y_p * np.cos(inc) * np.cos(theta) +\
z_p * np.sin(inc) * np.cos(theta)
z = -1. * y_p * np.sin(inc) + z_p * np.cos(inc)
return x, y, z
def make_plot(gal_name, incl, theta, cl_xyz, dm):
"""
Original link for plotting intersecting planes:
http://stackoverflow.com/a/14825951/1391441
"""
# Make plot.
fig = plt.figure()
ax = Axes3D(fig)
# Placement 0, 0 is the bottom left, 1, 1 is the top right.
ax.text2D(
0.4, 0.95, r"${}:\;(\Theta, i) = ({}, {})$".format(
gal_name, theta - 90., incl),
transform=ax.transAxes, fontsize=15, color='red')
# Express in radians for calculations.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
# Plot clusters.
x_cl, y_cl, z_cl = cl_xyz
SC = ax.scatter(x_cl, z_cl, y_cl, c=dm, s=50)
min_X, max_X = min(x_cl) - 2., max(x_cl) + 2.
min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2.
min_Z, max_Z = min(z_cl) - 2., max(z_cl) + 2.
# x,y plane.
X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y])
Z = np.zeros((2, 2))
# Plot x,y plane.
ax.plot_surface(X, Z, Y, color='gray', alpha=.1, linewidth=0, zorder=1)
# Axis of x,y plane.
# x axis.
ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4)
# Arrow head pointing in the positive x direction.
ax.quiver(max_X, 0., 0., max_X, 0., 0., arrow_length_ratio=.5,
length=.1, color='k')
ax.text(max_X, 0., -.5, 'x', 'x')
# y axis.
ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k')
# Arrow head pointing in the positive y direction.
ax.quiver(0., 0., max_Y, 0., 0., max_Y, arrow_length_ratio=.8,
length=.1, color='k')
ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k')
ax.text(-.5, 0., max_Y, 'y', 'y')
#
# A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal.
a, b, c, d = -1. * np.sin(theta) * np.sin(incl),\
np.cos(theta) * np.sin(incl), np.cos(incl), 0.
# print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c)
# Rotated plane.
X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y])
Z2_t = (-a * X2_t - b * Y2_t) / c
X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0])
Z2_b = (-a * X2_b - b * Y2_b) / c
# Top half of first x',y' inclined plane.
ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.1, lw=0, zorder=3)
# Bottom half of inclined plane.
ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.1, lw=0, zorder=-1)
# Axis of x',y' plane.
# x' axis.
x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# Arrow head pointing in the positive x' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.7)
ax.text(x_max, z_max, y_max - .5, "x'", 'x', color='b')
# y' axis.
x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# Arrow head pointing in the positive y' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.9, color='g')
ax.text(x_max - .5, z_max, y_max, "y'", 'y', color='g')
# # z' axis.
# x_min, y_min, z_min = inv_trans_eqs(0., 0, min_Z, theta, incl)
# x_max, y_max, z_max = inv_trans_eqs(0., 0, max_Z, theta, incl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='y')
# # Arrow head pointing in the positive z' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
# arrow_length_ratio=.9, color='y')
# ax.text(x_max - .5, z_max, y_max, "z'", 'z', color='y')
ax.set_xlabel('x (Kpc)')
ax.set_ylabel('z (Kpc)')
ax.set_ylim(max_Y, min_Y)
ax.set_zlabel('y (Kpc)')
plt.colorbar(SC, shrink=0.9, aspect=25)
ax.axis('equal')
ax.axis('tight')
# This controls the initial orientation of the displayed 3D plot.
# ‘elev’ stores the elevation angle in the z plane. ‘azim’ stores the
# azimuth angle in the x,y plane.
ax.view_init(elev=0., azim=-90.)
plt.show()
# plt.savefig()
def main():
"""
"""
# Define inclination angles (i, Theta) (SMC first, LMC second).
# 'Theta' is the PA (position angle) measured from the North (positive
# y axis in van der Marel et al. 2002, Fig 3)
rot_angles = ((60, 150.), (30, 140.))
# Root path.
r_path = os.path.realpath(__file__)[:-30]
# Read input data for both galaxies from file (smc_data, lmc_data)
gal_data = readData.main(r_path)
for gal, gal_name in enumerate(['SMC', 'LMC']):
print("Generating data for {}".format(gal_name))
incl, Theta = rot_angles[gal]
# 'theta' is the position angle measured from the West (positive
# x axis), used by Eq (7) in van der Marel & Cioni (2001).
theta = Theta + 90.
# Center coordinates and distance for this galaxy.
gal_center, D_0, e_gal_dist = MCs_data.MCs_data(gal)
ra_0, dec_0 = gal_center.ra, gal_center.dec
# Center coordinates for observed clusters in this galaxy.
ra, dec = gal_data[gal]['ra'], gal_data[gal]['dec']
# Generate N random vertical distances (z'), in parsec.
z_prime = zDist(len(ra))
# Distance to clusters in parsecs.
D = invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime)
# Convert to distance moduli.
dist_mod = np.round(-5. + 5. * np.log10(D), 2)
# This line below uses the actual distance moduli found by ASteCA.
# dist_mod = gal_data[gal]['dist_mod']
# Random errors for distance moduli.
e_dm = np.round(np.random.uniform(.03, .09, len(ra)), 2)
# Store data in output file.
outData(gal_name, gal_data[gal], dist_mod, e_dm)
print("Output data stored")
# Obtain angular projected distance and position angle for the
# clusters in the galaxy.
rho, phi = rho_phi(ra, dec, gal_center)
cl_xyz = xyz_coords(rho, phi, D_0, dist_mod)
make_plot(gal_name, incl, theta, cl_xyz, dist_mod)
print("Plot saved.")
if __name__ == '__main__':
main()
| gpl-3.0 | -6,003,738,110,879,360,000 | 34.552448 | 79 | 0.564123 | false |
wxgeo/geophar | wxgeometrie/sympy/polys/tests/test_numberfields.py | 4 | 28618 | """Tests for computational algebraic number field theory. """
from sympy import (S, Rational, Symbol, Poly, sqrt, I, oo, Tuple, expand,
pi, cos, sin, exp)
from sympy.utilities.pytest import raises, slow
from sympy.core.compatibility import range
from sympy.polys.numberfields import (
minimal_polynomial,
primitive_element,
is_isomorphism_possible,
field_isomorphism_pslq,
field_isomorphism,
to_number_field,
AlgebraicNumber,
isolate, IntervalPrinter,
)
from sympy.polys.polyerrors import (
IsomorphismFailed,
NotAlgebraic,
GeneratorsError,
)
from sympy.polys.polyclasses import DMP
from sympy.polys.domains import QQ
from sympy.polys.rootoftools import rootof
from sympy.polys.polytools import degree
from sympy.abc import x, y, z
Q = Rational
def test_minimal_polynomial():
assert minimal_polynomial(-7, x) == x + 7
assert minimal_polynomial(-1, x) == x + 1
assert minimal_polynomial( 0, x) == x
assert minimal_polynomial( 1, x) == x - 1
assert minimal_polynomial( 7, x) == x - 7
assert minimal_polynomial(sqrt(2), x) == x**2 - 2
assert minimal_polynomial(sqrt(5), x) == x**2 - 5
assert minimal_polynomial(sqrt(6), x) == x**2 - 6
assert minimal_polynomial(2*sqrt(2), x) == x**2 - 8
assert minimal_polynomial(3*sqrt(5), x) == x**2 - 45
assert minimal_polynomial(4*sqrt(6), x) == x**2 - 96
assert minimal_polynomial(2*sqrt(2) + 3, x) == x**2 - 6*x + 1
assert minimal_polynomial(3*sqrt(5) + 6, x) == x**2 - 12*x - 9
assert minimal_polynomial(4*sqrt(6) + 7, x) == x**2 - 14*x - 47
assert minimal_polynomial(2*sqrt(2) - 3, x) == x**2 + 6*x + 1
assert minimal_polynomial(3*sqrt(5) - 6, x) == x**2 + 12*x - 9
assert minimal_polynomial(4*sqrt(6) - 7, x) == x**2 + 14*x - 47
assert minimal_polynomial(sqrt(1 + sqrt(6)), x) == x**4 - 2*x**2 - 5
assert minimal_polynomial(sqrt(I + sqrt(6)), x) == x**8 - 10*x**4 + 49
assert minimal_polynomial(2*I + sqrt(2 + I), x) == x**4 + 4*x**2 + 8*x + 37
assert minimal_polynomial(sqrt(2) + sqrt(3), x) == x**4 - 10*x**2 + 1
assert minimal_polynomial(
sqrt(2) + sqrt(3) + sqrt(6), x) == x**4 - 22*x**2 - 48*x - 23
a = 1 - 9*sqrt(2) + 7*sqrt(3)
assert minimal_polynomial(
1/a, x) == 392*x**4 - 1232*x**3 + 612*x**2 + 4*x - 1
assert minimal_polynomial(
1/sqrt(a), x) == 392*x**8 - 1232*x**6 + 612*x**4 + 4*x**2 - 1
raises(NotAlgebraic, lambda: minimal_polynomial(oo, x))
raises(NotAlgebraic, lambda: minimal_polynomial(2**y, x))
raises(NotAlgebraic, lambda: minimal_polynomial(sin(1), x))
assert minimal_polynomial(sqrt(2)).dummy_eq(x**2 - 2)
assert minimal_polynomial(sqrt(2), x) == x**2 - 2
assert minimal_polynomial(sqrt(2), polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(sqrt(2), x, polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(sqrt(2), x, polys=True, compose=False) == Poly(x**2 - 2)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
assert minimal_polynomial(a, x) == x**2 - 2
assert minimal_polynomial(b, x) == x**2 - 3
assert minimal_polynomial(a, x, polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(b, x, polys=True) == Poly(x**2 - 3)
assert minimal_polynomial(sqrt(a/2 + 17), x) == 2*x**4 - 68*x**2 + 577
assert minimal_polynomial(sqrt(b/2 + 17), x) == 4*x**4 - 136*x**2 + 1153
a, b = sqrt(2)/3 + 7, AlgebraicNumber(sqrt(2)/3 + 7)
f = 81*x**8 - 2268*x**6 - 4536*x**5 + 22644*x**4 + 63216*x**3 - \
31608*x**2 - 189648*x + 141358
assert minimal_polynomial(sqrt(a) + sqrt(sqrt(a)), x) == f
assert minimal_polynomial(sqrt(b) + sqrt(sqrt(b)), x) == f
assert minimal_polynomial(
a**Q(3, 2), x) == 729*x**4 - 506898*x**2 + 84604519
# issue 5994
eq = S('''
-1/(800*sqrt(-1/240 + 1/(18000*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)) + 2*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)))''')
assert minimal_polynomial(eq, x) == 8000*x**2 - 1
ex = 1 + sqrt(2) + sqrt(3)
mp = minimal_polynomial(ex, x)
assert mp == x**4 - 4*x**3 - 4*x**2 + 16*x - 8
ex = 1/(1 + sqrt(2) + sqrt(3))
mp = minimal_polynomial(ex, x)
assert mp == 8*x**4 - 16*x**3 + 4*x**2 + 4*x - 1
p = (expand((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3))**Rational(1, 3)
mp = minimal_polynomial(p, x)
assert mp == x**8 - 8*x**7 - 56*x**6 + 448*x**5 + 480*x**4 - 5056*x**3 + 1984*x**2 + 7424*x - 3008
p = expand((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3)
mp = minimal_polynomial(p, x)
assert mp == x**8 - 512*x**7 - 118208*x**6 + 31131136*x**5 + 647362560*x**4 - 56026611712*x**3 + 116994310144*x**2 + 404854931456*x - 27216576512
assert minimal_polynomial(S("-sqrt(5)/2 - 1/2 + (-sqrt(5)/2 - 1/2)**2"), x) == x - 1
a = 1 + sqrt(2)
assert minimal_polynomial((a*sqrt(2) + a)**3, x) == x**2 - 198*x + 1
p = 1/(1 + sqrt(2) + sqrt(3))
assert minimal_polynomial(p, x, compose=False) == 8*x**4 - 16*x**3 + 4*x**2 + 4*x - 1
p = 2/(1 + sqrt(2) + sqrt(3))
assert minimal_polynomial(p, x, compose=False) == x**4 - 4*x**3 + 2*x**2 + 4*x - 2
assert minimal_polynomial(1 + sqrt(2)*I, x, compose=False) == x**2 - 2*x + 3
assert minimal_polynomial(1/(1 + sqrt(2)) + 1, x, compose=False) == x**2 - 2
assert minimal_polynomial(sqrt(2)*I + I*(1 + sqrt(2)), x,
compose=False) == x**4 + 18*x**2 + 49
# minimal polynomial of I
assert minimal_polynomial(I, x, domain=QQ.algebraic_field(I)) == x - I
K = QQ.algebraic_field(I*(sqrt(2) + 1))
assert minimal_polynomial(I, x, domain=K) == x - I
assert minimal_polynomial(I, x, domain=QQ) == x**2 + 1
assert minimal_polynomial(I, x, domain='QQ(y)') == x**2 + 1
def test_minimal_polynomial_hi_prec():
p = 1/sqrt(1 - 9*sqrt(2) + 7*sqrt(3) + S(1)/10**30)
mp = minimal_polynomial(p, x)
# checked with Wolfram Alpha
assert mp.coeff(x**6) == -1232000000000000000000000000001223999999999999999999999999999987999999999999999999999999999996000000000000000000000000000000
def test_minimal_polynomial_sq():
from sympy import Add, expand_multinomial
p = expand_multinomial((1 + 5*sqrt(2) + 2*sqrt(3))**3)
mp = minimal_polynomial(p**Rational(1, 3), x)
assert mp == x**4 - 4*x**3 - 118*x**2 + 244*x + 1321
p = expand_multinomial((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3)
mp = minimal_polynomial(p**Rational(1, 3), x)
assert mp == x**8 - 8*x**7 - 56*x**6 + 448*x**5 + 480*x**4 - 5056*x**3 + 1984*x**2 + 7424*x - 3008
p = Add(*[sqrt(i) for i in range(1, 12)])
mp = minimal_polynomial(p, x)
assert mp.subs({x: 0}) == -71965773323122507776
def test_minpoly_compose():
# issue 6868
eq = S('''
-1/(800*sqrt(-1/240 + 1/(18000*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)) + 2*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)))''')
mp = minimal_polynomial(eq + 3, x)
assert mp == 8000*x**2 - 48000*x + 71999
# issue 5888
assert minimal_polynomial(exp(I*pi/8), x) == x**8 + 1
mp = minimal_polynomial(sin(pi/7) + sqrt(2), x)
assert mp == 4096*x**12 - 63488*x**10 + 351488*x**8 - 826496*x**6 + \
770912*x**4 - 268432*x**2 + 28561
mp = minimal_polynomial(cos(pi/7) + sqrt(2), x)
assert mp == 64*x**6 - 64*x**5 - 432*x**4 + 304*x**3 + 712*x**2 - \
232*x - 239
mp = minimal_polynomial(exp(I*pi/7) + sqrt(2), x)
assert mp == x**12 - 2*x**11 - 9*x**10 + 16*x**9 + 43*x**8 - 70*x**7 - 97*x**6 + 126*x**5 + 211*x**4 - 212*x**3 - 37*x**2 + 142*x + 127
mp = minimal_polynomial(sin(pi/7) + sqrt(2), x)
assert mp == 4096*x**12 - 63488*x**10 + 351488*x**8 - 826496*x**6 + \
770912*x**4 - 268432*x**2 + 28561
mp = minimal_polynomial(cos(pi/7) + sqrt(2), x)
assert mp == 64*x**6 - 64*x**5 - 432*x**4 + 304*x**3 + 712*x**2 - \
232*x - 239
mp = minimal_polynomial(exp(I*pi/7) + sqrt(2), x)
assert mp == x**12 - 2*x**11 - 9*x**10 + 16*x**9 + 43*x**8 - 70*x**7 - 97*x**6 + 126*x**5 + 211*x**4 - 212*x**3 - 37*x**2 + 142*x + 127
mp = minimal_polynomial(exp(2*I*pi/7), x)
assert mp == x**6 + x**5 + x**4 + x**3 + x**2 + x + 1
mp = minimal_polynomial(exp(2*I*pi/15), x)
assert mp == x**8 - x**7 + x**5 - x**4 + x**3 - x + 1
mp = minimal_polynomial(cos(2*pi/7), x)
assert mp == 8*x**3 + 4*x**2 - 4*x - 1
mp = minimal_polynomial(sin(2*pi/7), x)
ex = (5*cos(2*pi/7) - 7)/(9*cos(pi/7) - 5*cos(3*pi/7))
mp = minimal_polynomial(ex, x)
assert mp == x**3 + 2*x**2 - x - 1
assert minimal_polynomial(-1/(2*cos(pi/7)), x) == x**3 + 2*x**2 - x - 1
assert minimal_polynomial(sin(2*pi/15), x) == \
256*x**8 - 448*x**6 + 224*x**4 - 32*x**2 + 1
assert minimal_polynomial(sin(5*pi/14), x) == 8*x**3 - 4*x**2 - 4*x + 1
assert minimal_polynomial(cos(pi/15), x) == 16*x**4 + 8*x**3 - 16*x**2 - 8*x + 1
ex = rootof(x**3 +x*4 + 1, 0)
mp = minimal_polynomial(ex, x)
assert mp == x**3 + 4*x + 1
mp = minimal_polynomial(ex + 1, x)
assert mp == x**3 - 3*x**2 + 7*x - 4
assert minimal_polynomial(exp(I*pi/3), x) == x**2 - x + 1
assert minimal_polynomial(exp(I*pi/4), x) == x**4 + 1
assert minimal_polynomial(exp(I*pi/6), x) == x**4 - x**2 + 1
assert minimal_polynomial(exp(I*pi/9), x) == x**6 - x**3 + 1
assert minimal_polynomial(exp(I*pi/10), x) == x**8 - x**6 + x**4 - x**2 + 1
assert minimal_polynomial(sin(pi/9), x) == 64*x**6 - 96*x**4 + 36*x**2 - 3
assert minimal_polynomial(sin(pi/11), x) == 1024*x**10 - 2816*x**8 + \
2816*x**6 - 1232*x**4 + 220*x**2 - 11
ex = 2**Rational(1, 3)*exp(Rational(2, 3)*I*pi)
assert minimal_polynomial(ex, x) == x**3 - 2
raises(NotAlgebraic, lambda: minimal_polynomial(cos(pi*sqrt(2)), x))
raises(NotAlgebraic, lambda: minimal_polynomial(sin(pi*sqrt(2)), x))
raises(NotAlgebraic, lambda: minimal_polynomial(exp(I*pi*sqrt(2)), x))
# issue 5934
ex = 1/(-36000 - 7200*sqrt(5) + (12*sqrt(10)*sqrt(sqrt(5) + 5) +
24*sqrt(10)*sqrt(-sqrt(5) + 5))**2) + 1
raises(ZeroDivisionError, lambda: minimal_polynomial(ex, x))
ex = sqrt(1 + 2**Rational(1,3)) + sqrt(1 + 2**Rational(1,4)) + sqrt(2)
mp = minimal_polynomial(ex, x)
assert degree(mp) == 48 and mp.subs({x:0}) == -16630256576
def test_minpoly_issue_7113():
# see discussion in https://github.com/sympy/sympy/pull/2234
from sympy.simplify.simplify import nsimplify
r = nsimplify(pi, tolerance=0.000000001)
mp = minimal_polynomial(r, x)
assert mp == 1768292677839237920489538677417507171630859375*x**109 - \
2734577732179183863586489182929671773182898498218854181690460140337930774573792597743853652058046464
def test_minpoly_issue_7574():
ex = -(-1)**Rational(1, 3) + (-1)**Rational(2,3)
assert minimal_polynomial(ex, x) == x + 1
def test_primitive_element():
assert primitive_element([sqrt(2)], x) == (x**2 - 2, [1])
assert primitive_element(
[sqrt(2), sqrt(3)], x) == (x**4 - 10*x**2 + 1, [1, 1])
assert primitive_element([sqrt(2)], x, polys=True) == (Poly(x**2 - 2), [1])
assert primitive_element([sqrt(
2), sqrt(3)], x, polys=True) == (Poly(x**4 - 10*x**2 + 1), [1, 1])
assert primitive_element(
[sqrt(2)], x, ex=True) == (x**2 - 2, [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True) == \
(x**4 - 10*x**2 + 1, [1, 1], [[Q(1, 2), 0, -Q(9, 2), 0], [-
Q(1, 2), 0, Q(11, 2), 0]])
assert primitive_element(
[sqrt(2)], x, ex=True, polys=True) == (Poly(x**2 - 2), [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True, polys=True) == \
(Poly(x**4 - 10*x**2 + 1), [1, 1], [[Q(1, 2), 0, -Q(9, 2),
0], [-Q(1, 2), 0, Q(11, 2), 0]])
assert primitive_element([sqrt(2)], polys=True) == (Poly(x**2 - 2), [1])
raises(ValueError, lambda: primitive_element([], x, ex=False))
raises(ValueError, lambda: primitive_element([], x, ex=True))
# Issue 14117
a, b = I*sqrt(2*sqrt(2) + 3), I*sqrt(-2*sqrt(2) + 3)
assert primitive_element([a, b, I], x) == (x**4 + 6*x**2 + 1, [1, 0, 0])
def test_field_isomorphism_pslq():
a = AlgebraicNumber(I)
b = AlgebraicNumber(I*sqrt(3))
raises(NotImplementedError, lambda: field_isomorphism_pslq(a, b))
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
d = AlgebraicNumber(sqrt(2) + sqrt(3))
e = AlgebraicNumber(sqrt(2) + sqrt(3) + sqrt(7))
assert field_isomorphism_pslq(a, a) == [1, 0]
assert field_isomorphism_pslq(a, b) is None
assert field_isomorphism_pslq(a, c) is None
assert field_isomorphism_pslq(a, d) == [Q(1, 2), 0, -Q(9, 2), 0]
assert field_isomorphism_pslq(
a, e) == [Q(1, 80), 0, -Q(1, 2), 0, Q(59, 20), 0]
assert field_isomorphism_pslq(b, a) is None
assert field_isomorphism_pslq(b, b) == [1, 0]
assert field_isomorphism_pslq(b, c) is None
assert field_isomorphism_pslq(b, d) == [-Q(1, 2), 0, Q(11, 2), 0]
assert field_isomorphism_pslq(b, e) == [-Q(
3, 640), 0, Q(67, 320), 0, -Q(297, 160), 0, Q(313, 80), 0]
assert field_isomorphism_pslq(c, a) is None
assert field_isomorphism_pslq(c, b) is None
assert field_isomorphism_pslq(c, c) == [1, 0]
assert field_isomorphism_pslq(c, d) is None
assert field_isomorphism_pslq(c, e) == [Q(
3, 640), 0, -Q(71, 320), 0, Q(377, 160), 0, -Q(469, 80), 0]
assert field_isomorphism_pslq(d, a) is None
assert field_isomorphism_pslq(d, b) is None
assert field_isomorphism_pslq(d, c) is None
assert field_isomorphism_pslq(d, d) == [1, 0]
assert field_isomorphism_pslq(d, e) == [-Q(
3, 640), 0, Q(71, 320), 0, -Q(377, 160), 0, Q(549, 80), 0]
assert field_isomorphism_pslq(e, a) is None
assert field_isomorphism_pslq(e, b) is None
assert field_isomorphism_pslq(e, c) is None
assert field_isomorphism_pslq(e, d) is None
assert field_isomorphism_pslq(e, e) == [1, 0]
f = AlgebraicNumber(3*sqrt(2) + 8*sqrt(7) - 5)
assert field_isomorphism_pslq(
f, e) == [Q(3, 80), 0, -Q(139, 80), 0, Q(347, 20), 0, -Q(761, 20), -5]
def test_field_isomorphism():
assert field_isomorphism(3, sqrt(2)) == [3]
assert field_isomorphism( I*sqrt(3), I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism(-I*sqrt(3), I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism( I*sqrt(3), -I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism(-I*sqrt(3), -I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism( 2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism( 2*I*sqrt(3)/7, -5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7, -5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism(
2*I*sqrt(3)/7 + 27, 5*I*sqrt(3)/3) == [ S(6)/35, 27]
assert field_isomorphism(
-2*I*sqrt(3)/7 + 27, 5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism(
2*I*sqrt(3)/7 + 27, -5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism(
-2*I*sqrt(3)/7 + 27, -5*I*sqrt(3)/3) == [ S(6)/35, 27]
p = AlgebraicNumber( sqrt(2) + sqrt(3))
q = AlgebraicNumber(-sqrt(2) + sqrt(3))
r = AlgebraicNumber( sqrt(2) - sqrt(3))
s = AlgebraicNumber(-sqrt(2) - sqrt(3))
pos_coeffs = [ S(1)/2, S(0), -S(9)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(9)/2, S(0)]
a = AlgebraicNumber(sqrt(2))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
a = AlgebraicNumber(-sqrt(2))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
pos_coeffs = [ S(1)/2, S(0), -S(11)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(11)/2, S(0)]
a = AlgebraicNumber(sqrt(3))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(-sqrt(3))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
pos_coeffs = [ S(3)/2, S(0), -S(33)/2, -S(8)]
neg_coeffs = [-S(3)/2, S(0), S(33)/2, -S(8)]
a = AlgebraicNumber(3*sqrt(3) - 8)
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(3*sqrt(2) + 2*sqrt(3) + 1)
pos_1_coeffs = [ S(1)/2, S(0), -S(5)/2, S(1)]
neg_5_coeffs = [-S(5)/2, S(0), S(49)/2, S(1)]
pos_5_coeffs = [ S(5)/2, S(0), -S(49)/2, S(1)]
neg_1_coeffs = [-S(1)/2, S(0), S(5)/2, S(1)]
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_1_coeffs
assert field_isomorphism(a, q, fast=True) == neg_5_coeffs
assert field_isomorphism(a, r, fast=True) == pos_5_coeffs
assert field_isomorphism(a, s, fast=True) == neg_1_coeffs
assert field_isomorphism(a, p, fast=False) == pos_1_coeffs
assert field_isomorphism(a, q, fast=False) == neg_5_coeffs
assert field_isomorphism(a, r, fast=False) == pos_5_coeffs
assert field_isomorphism(a, s, fast=False) == neg_1_coeffs
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
assert is_isomorphism_possible(a, b) is True
assert is_isomorphism_possible(b, a) is True
assert is_isomorphism_possible(c, p) is False
assert field_isomorphism(sqrt(2), sqrt(3), fast=True) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=True) is None
assert field_isomorphism(sqrt(2), sqrt(3), fast=False) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=False) is None
def test_to_number_field():
assert to_number_field(sqrt(2)) == AlgebraicNumber(sqrt(2))
assert to_number_field(
[sqrt(2), sqrt(3)]) == AlgebraicNumber(sqrt(2) + sqrt(3))
a = AlgebraicNumber(sqrt(2) + sqrt(3), [S(1)/2, S(0), -S(9)/2, S(0)])
assert to_number_field(sqrt(2), sqrt(2) + sqrt(3)) == a
assert to_number_field(sqrt(2), AlgebraicNumber(sqrt(2) + sqrt(3))) == a
raises(IsomorphismFailed, lambda: to_number_field(sqrt(2), sqrt(3)))
def test_AlgebraicNumber():
minpoly, root = x**2 - 2, sqrt(2)
a = AlgebraicNumber(root, gen=x)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert a.coeffs() == [S(1), S(0)]
assert a.native_coeffs() == [QQ(1), QQ(0)]
a = AlgebraicNumber(root, gen=x, alias='y')
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is True
a = AlgebraicNumber(root, gen=x, alias=Symbol('y'))
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is True
assert AlgebraicNumber(sqrt(2), []).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), ()).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), (0, 0)).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), [8]).rep == DMP([QQ(8)], QQ)
assert AlgebraicNumber(sqrt(2), [S(8)/3]).rep == DMP([QQ(8, 3)], QQ)
assert AlgebraicNumber(sqrt(2), [7, 3]).rep == DMP([QQ(7), QQ(3)], QQ)
assert AlgebraicNumber(
sqrt(2), [S(7)/9, S(3)/2]).rep == DMP([QQ(7, 9), QQ(3, 2)], QQ)
assert AlgebraicNumber(sqrt(2), [1, 2, 3]).rep == DMP([QQ(2), QQ(5)], QQ)
a = AlgebraicNumber(AlgebraicNumber(root, gen=x), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert a.coeffs() == [S(1), S(2)]
assert a.native_coeffs() == [QQ(1), QQ(2)]
a = AlgebraicNumber((minpoly, root), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
a = AlgebraicNumber((Poly(minpoly), root), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert AlgebraicNumber( sqrt(3)).rep == DMP([ QQ(1), QQ(0)], QQ)
assert AlgebraicNumber(-sqrt(3)).rep == DMP([ QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(2))
assert a == b
c = AlgebraicNumber(sqrt(2), gen=x)
d = AlgebraicNumber(sqrt(2), gen=x)
assert a == b
assert a == c
a = AlgebraicNumber(sqrt(2), [1, 2])
b = AlgebraicNumber(sqrt(2), [1, 3])
assert a != b and a != sqrt(2) + 3
assert (a == x) is False and (a != x) is True
a = AlgebraicNumber(sqrt(2), [1, 0])
b = AlgebraicNumber(sqrt(2), [1, 0], alias=y)
assert a.as_poly(x) == Poly(x)
assert b.as_poly() == Poly(y)
assert a.as_expr() == sqrt(2)
assert a.as_expr(x) == x
assert b.as_expr() == sqrt(2)
assert b.as_expr(x) == x
a = AlgebraicNumber(sqrt(2), [2, 3])
b = AlgebraicNumber(sqrt(2), [2, 3], alias=y)
p = a.as_poly()
assert p == Poly(2*p.gen + 3)
assert a.as_poly(x) == Poly(2*x + 3)
assert b.as_poly() == Poly(2*y + 3)
assert a.as_expr() == 2*sqrt(2) + 3
assert a.as_expr(x) == 2*x + 3
assert b.as_expr() == 2*sqrt(2) + 3
assert b.as_expr(x) == 2*x + 3
a = AlgebraicNumber(sqrt(2))
b = to_number_field(sqrt(2))
assert a.args == b.args == (sqrt(2), Tuple(1, 0))
b = AlgebraicNumber(sqrt(2), alias='alpha')
assert b.args == (sqrt(2), Tuple(1, 0), Symbol('alpha'))
a = AlgebraicNumber(sqrt(2), [1, 2, 3])
assert a.args == (sqrt(2), Tuple(1, 2, 3))
def test_to_algebraic_integer():
a = AlgebraicNumber(sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 3
assert a.root == sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(2*sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, [S(7)/19, 3], gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(7, 19), QQ(3)], QQ)
def test_IntervalPrinter():
ip = IntervalPrinter()
assert ip.doprint(x**Q(1, 3)) == "x**(mpi('1/3'))"
assert ip.doprint(sqrt(x)) == "x**(mpi('1/2'))"
def test_isolate():
assert isolate(1) == (1, 1)
assert isolate(S(1)/2) == (S(1)/2, S(1)/2)
assert isolate(sqrt(2)) == (1, 2)
assert isolate(-sqrt(2)) == (-2, -1)
assert isolate(sqrt(2), eps=S(1)/100) == (S(24)/17, S(17)/12)
assert isolate(-sqrt(2), eps=S(1)/100) == (-S(17)/12, -S(24)/17)
raises(NotImplementedError, lambda: isolate(I))
def test_minpoly_fraction_field():
assert minimal_polynomial(1/x, y) == -x*y + 1
assert minimal_polynomial(1 / (x + 1), y) == (x + 1)*y - 1
assert minimal_polynomial(sqrt(x), y) == y**2 - x
assert minimal_polynomial(sqrt(x + 1), y) == y**2 - x - 1
assert minimal_polynomial(sqrt(x) / x, y) == x*y**2 - 1
assert minimal_polynomial(sqrt(2) * sqrt(x), y) == y**2 - 2 * x
assert minimal_polynomial(sqrt(2) + sqrt(x), y) == \
y**4 + (-2*x - 4)*y**2 + x**2 - 4*x + 4
assert minimal_polynomial(x**Rational(1,3), y) == y**3 - x
assert minimal_polynomial(x**Rational(1,3) + sqrt(x), y) == \
y**6 - 3*x*y**4 - 2*x*y**3 + 3*x**2*y**2 - 6*x**2*y - x**3 + x**2
assert minimal_polynomial(sqrt(x) / z, y) == z**2*y**2 - x
assert minimal_polynomial(sqrt(x) / (z + 1), y) == (z**2 + 2*z + 1)*y**2 - x
assert minimal_polynomial(1/x, y, polys=True) == Poly(-x*y + 1, y)
assert minimal_polynomial(1 / (x + 1), y, polys=True) == \
Poly((x + 1)*y - 1, y)
assert minimal_polynomial(sqrt(x), y, polys=True) == Poly(y**2 - x, y)
assert minimal_polynomial(sqrt(x) / z, y, polys=True) == \
Poly(z**2*y**2 - x, y)
# this is (sqrt(1 + x**3)/x).integrate(x).diff(x) - sqrt(1 + x**3)/x
a = sqrt(x)/sqrt(1 + x**(-3)) - sqrt(x**3 + 1)/x + 1/(x**(S(5)/2)* \
(1 + x**(-3))**(S(3)/2)) + 1/(x**(S(11)/2)*(1 + x**(-3))**(S(3)/2))
assert minimal_polynomial(a, y) == y
raises(NotAlgebraic, lambda: minimal_polynomial(exp(x), y))
raises(GeneratorsError, lambda: minimal_polynomial(sqrt(x), x))
raises(GeneratorsError, lambda: minimal_polynomial(sqrt(x) - y, x))
raises(NotImplementedError, lambda: minimal_polynomial(sqrt(x), y, compose=False))
@slow
def test_minpoly_fraction_field_slow():
assert minimal_polynomial(minimal_polynomial(sqrt(x**Rational(1,5) - 1),
y).subs(y, sqrt(x**Rational(1,5) - 1)), z) == z
def test_minpoly_domain():
assert minimal_polynomial(sqrt(2), x, domain=QQ.algebraic_field(sqrt(2))) == \
x - sqrt(2)
assert minimal_polynomial(sqrt(8), x, domain=QQ.algebraic_field(sqrt(2))) == \
x - 2*sqrt(2)
assert minimal_polynomial(sqrt(Rational(3,2)), x,
domain=QQ.algebraic_field(sqrt(2))) == 2*x**2 - 3
raises(NotAlgebraic, lambda: minimal_polynomial(y, x, domain=QQ))
| gpl-2.0 | -7,219,002,142,231,568,000 | 37.005312 | 154 | 0.586379 | false |
nioinnovation/safepickle | safepickle/encoding.py | 1 | 1118 | from .types import TypesManager
def encode(obj):
""" Encodes an item preparing it to be json serializable
Encode relies on defined custom types to provide encoding, which in turn
are responsible of using the 'encode' function parameter passed to them
to recursively encoded contained items.
Args:
obj: item to encode
Returns:
encoded item
"""
# handle basic types separately
if obj is None or isinstance(obj, (bool, int, float, str)):
return obj
for type_ in TypesManager.get_types():
if type_.can_encode(obj):
return type_.encode(obj, encode)
raise TypeError("Type: '{}' is not supported".format(type(obj)))
def decode(dct):
""" Object hook to use to decode object literals.
This function is called from within json loading mechanism
for every literal
Args:
dct (dict): literal to decode
Returns:
decoded literal
"""
for type_ in TypesManager.get_types():
if type_.can_decode(dct):
return type_.decode(dct)
return dct
| apache-2.0 | -3,217,787,463,624,583,700 | 23.844444 | 77 | 0.627907 | false |
pligor/predicting-future-product-prices | 04_time_series_prediction/gp_opt/price_history_27_gp_opt.py | 1 | 6918 | from models.model_21_price_history_seq2seq_dyn_dec_ins import PriceHistorySeq2SeqDynDecIns
import pickle
import dill
from os import path, remove
import numpy as np
from skopt.space.space import Integer, Real, Categorical
from skopt import gp_minimize
import tensorflow as tf
from mylibs.jupyter_notebook_helper import MyOptimizeResult
class PriceHistoryGpOpt(object):
NUM_GPUS = 1
LAMDA2_COUNT = 3
# (silly?) idea: instead of the full training dataset random instances on every bayesian optimization run (not sure about this)
def __init__(self, model, stats_npy_filename, cv_score_dict_npy_filename, res_gp_filename, bayes_opt_dir,
random_state=None, plotter=None, **kwargs):
super(PriceHistoryGpOpt, self).__init__()
self.model = model
self.static_params = kwargs
self.plotter = plotter
self.random_state = random_state
self.stats_filepath = bayes_opt_dir + '/' + stats_npy_filename + '.npy'
self.cv_score_dict_filepath = bayes_opt_dir + '/' + cv_score_dict_npy_filename + '.npy'
self.res_gp_filepath = bayes_opt_dir + '/{}.pickle'.format(res_gp_filename)
def run_opt(self, n_random_starts, n_calls):
if path.isfile(self.res_gp_filepath):
with open(self.res_gp_filepath) as fp: # Python 3: open(..., 'rb')
opt_res = pickle.load(fp)
else:
res_gp = self.gpOptimization(n_random_starts=n_random_starts, n_calls=n_calls)
opt_res = MyOptimizeResult(res_gp=res_gp)
with open(self.res_gp_filepath, 'w') as fp: # Python 3: open(..., 'wb')
pickle.dump(opt_res, fp)
return opt_res
def objective(self, params): # Here we define the metric we want to minimise
params_str = "params: {}".format(params)
print 'num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim, mobile_attrs_dim, lambads'
print params_str
# try:
cv_score, stats_list = self.get_or_calc(params=params)
# save everytime in case it crashes
self.__save_dictionary(filepath=self.stats_filepath, key=params, val=stats_list)
self.__save_dictionary(filepath=self.cv_score_dict_filepath, key=params, val=cv_score)
if self.plotter is not None:
self.plotter(stats_list=stats_list, label_text=params_str)
# except AssertionError:
# cv_score = None
#
# return None
return cv_score # minimize validation error
def get_or_calc(self, params):
params = tuple(params)
if path.isfile(self.cv_score_dict_filepath):
cv_score_dict = np.load(self.cv_score_dict_filepath)[()]
if params in cv_score_dict:
stats_dic = np.load(self.stats_filepath)[()]
assert params in stats_dic, 'if you have created a cv score you must have saved the stats list before'
cv_score, stats_list = cv_score_dict[params], stats_dic[params]
else:
cv_score, stats_list = self.calc(params=params)
else:
cv_score, stats_list = self.calc(params=params)
return cv_score, stats_list
def calc(self, params):
num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim, mobile_attrs_dim = params[
:-self.LAMDA2_COUNT]
lamda2_list = params[-self.LAMDA2_COUNT:]
cv_score, stats_list = self.model.get_cross_validation_score(
enc_num_units=num_units,
dec_num_units=num_units,
keep_prob_rnn_out=keep_prob_rnn_out,
keep_prob_readout=keep_prob_readout,
learning_rate=learning_rate,
rnn_hidden_dim=rnn_hidden_dim,
mobile_attrs_dim=mobile_attrs_dim,
lamda2=lamda2_list,
# DO NOT TEST
decoder_first_input=PriceHistorySeq2SeqDynDecIns.DECODER_FIRST_INPUT.ZEROS,
batch_norm_enabled=True,
**self.static_params
)
return cv_score, stats_list
@staticmethod
def __save_dictionary(filepath, key, val):
if filepath is not None:
stats_dic = np.load(filepath)[()] if path.isfile(filepath) else dict()
stats_dic[tuple(key)] = val
np.save(filepath, stats_dic)
# def __clear_previously_saved_files(self):
# #filepaths = [self.stats_filepath, self.cv_score_dict_filepath]
# filepaths = [self.stats_filepath,]
# for filepath in filepaths:
# if path.isfile(filepath):
# remove(self.stats_filepath) # delete previously saved file
def gpOptimization(self, n_random_starts, n_calls):
# self.__clear_previously_saved_files()
# here we will exploit the information from the previous experiment to calibrate what we think are the best parameters
# best_params = [500, #50 was obviously small so we are going to range it from 300 to 700
# tf.nn.tanh, #activation we are not going to try and guess via gp opt, but just use this one
# 0.0001, #since we had as optimal the smallest one we are going to try and allow also smaller values
# 0.62488034788862112,
# 0.001]
num_units = Integer(300, 600) # the upper limit is mostly because of computational resources
rnn_hidden_dim = Integer(100, 300) # the upper limit is mostly because of computational resources
mobile_attrs_dim = Integer(100, 300) # the upper limit is mostly because of computational resources
keep_prob_rnn_out = Real(0.5, 1.0, prior='uniform') # uniform or log-uniform
keep_prob_readout = Real(0.5, 1.0, prior='uniform')
learning_rate = Real(1e-6, 1e-2, prior='log-uniform') # uniform or log-uniform
lamda2_list = [Real(1e-5, 1e0, prior='log-uniform')] * self.LAMDA2_COUNT # uniform or log-uniform
space = [num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim,
mobile_attrs_dim] + lamda2_list
return gp_minimize(
func=self.objective, # function that we wish to minimise
dimensions=space, # the search space for the hyper-parameters
# x0=x0, #inital values for the hyper-parameters
n_calls=n_calls, # number of times the function will be evaluated
random_state=self.random_state, # random seed
n_random_starts=n_random_starts, # before we start modelling the optimised function with a GP Regression
# model, we want to try a few random choices for the hyper-parameters.
# kappa=1.9, # trade-off between exploration vs. exploitation.
n_jobs=self.NUM_GPUS
)
| agpl-3.0 | -3,510,144,819,399,291,400 | 44.81457 | 131 | 0.616941 | false |
marcomaccio/python-docs-samples | scripts/auto_link_to_docs.py | 1 | 3864 | #!/usr/bin/env python
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Process docs-links.json and updates all READMEs and replaces
<!-- auto-doc-link --><!-- end-auto-doc-link -->
With a generated list of documentation backlinks.
"""
from collections import defaultdict
import json
import os
import re
REPO_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..'))
DOC_SITE_ROOT = 'https://cloud.google.com'
AUTO_DOC_LINK_EXP = re.compile(
r'<!-- auto-doc-link -->.*?<!-- end-auto-doc-link -->\n',
re.DOTALL)
def invert_docs_link_map(docs_links):
"""
The docs links map is in this format:
{
"doc_path": [
"file_path",
]
}
This transforms it to:
{
"file_path": [
"doc_path",
]
}
"""
files_to_docs = defaultdict(list)
for doc, files in docs_links.iteritems():
for file in files:
files_to_docs[file].append(doc)
files_to_docs[file] = list(set(files_to_docs[file]))
return files_to_docs
def collect_docs_for_readmes(files_to_docs):
"""
There's a one-to-many relationship between readmes and files. This method
finds the readme for each file and consolidates all docs references.
"""
readmes_to_docs = defaultdict(list)
for file, docs in files_to_docs.iteritems():
readme = get_readme_path(file)
readmes_to_docs[readme].extend(docs)
readmes_to_docs[readme] = list(set(readmes_to_docs[readme]))
return readmes_to_docs
def linkify(docs):
"""Adds the documentation site root to doc paths, creating a full URL."""
return [DOC_SITE_ROOT + x for x in docs]
def replace_contents(file_path, regex, new_content):
with open(file_path, 'r+') as f:
content = f.read()
content = regex.sub(new_content, content)
f.seek(0)
f.write(content)
def get_readme_path(file_path):
"""Gets the readme for an associated sample file, basically just the
README.md in the same directory."""
dir = os.path.dirname(file_path)
readme = os.path.join(
REPO_ROOT, dir, 'README.md')
return readme
def generate_doc_link_statement(docs):
links = linkify(docs)
if len(links) == 1:
return """<!-- auto-doc-link -->
These samples are used on the following documentation page:
> {}
<!-- end-auto-doc-link -->
""".format(links.pop())
else:
return """<!-- auto-doc-link -->
These samples are used on the following documentation pages:
>
{}
<!-- end-auto-doc-link -->
""".format('\n'.join(['* {}'.format(x) for x in links]))
def update_readme(readme_path, docs):
if not os.path.exists(readme_path):
print('{} doesn\'t exist'.format(readme_path))
return
replace_contents(
readme_path,
AUTO_DOC_LINK_EXP,
generate_doc_link_statement(docs))
print('Updated {}'.format(readme_path))
def main():
docs_links = json.load(open(
os.path.join(REPO_ROOT, 'scripts', 'docs-links.json'), 'r'))
files_to_docs = invert_docs_link_map(docs_links)
readmes_to_docs = collect_docs_for_readmes(files_to_docs)
for readme, docs in readmes_to_docs.iteritems():
update_readme(readme, docs)
if __name__ == '__main__':
main()
| apache-2.0 | 8,503,063,250,779,012,000 | 25.833333 | 77 | 0.627329 | false |
mlgruby/mining | mining/utils/__init__.py | 1 | 1603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import re
import os
import unicodedata
import ConfigParser
from bson import ObjectId
from datetime import datetime
from mining.settings import PROJECT_PATH
def slugfy(text):
slug = unicodedata.normalize("NFKD", text).encode("UTF-8", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug:
return None
return slug
def conf(section, ini="mining.ini"):
config = ConfigParser.ConfigParser()
config.read(os.path.join(PROJECT_PATH, ini))
_dict = {}
options = config.options(section)
for option in options:
try:
_dict[option] = config.get(section, option)
except:
_dict[option] = None
if 'sql_conn_params' in options:
import ast
_dict['sql_conn_params'] = ast.literal_eval(_dict['sql_conn_params'])
else:
_dict['sql_conn_params'] = {}
return _dict
def log_it(s, name=u"core"):
with open("/tmp/openmining-{}.log".format(name), "a") as log:
msg = u"{} => {}\n".format(datetime.now(), s)
log.write(msg.encode('utf-8'))
def parse_dumps(obj):
if isinstance(obj, datetime):
return str(obj.strftime("%Y-%m-%d %H:%M:%S"))
if isinstance(obj, ObjectId):
return str(obj)
return json.JSONEncoder.default(obj)
def __from__(path):
try:
_import = path.split('.')[-1]
_from = u".".join(path.split('.')[:-1])
return getattr(__import__(_from, fromlist=[_import]), _import)
except:
return object
| mit | -5,695,078,317,404,336,000 | 24.046875 | 77 | 0.592639 | false |
inspirehep/inspire-schemas | tests/unit/test_job_builder.py | 1 | 16050 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2019 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import pytest
from inspire_schemas.builders.jobs import JobBuilder
def test_no_data():
expected = {
'_collections': ['Jobs'],
'status': 'pending'
}
builder = JobBuilder()
assert builder.record == expected
def test_data_in_init():
expected = {
'_collections': ['Jobs'],
'status': 'pending',
'some_key': 'some_value',
'some_key_with_list': ['some', 'list'],
}
builder = JobBuilder(expected)
assert builder.record == expected
def test_ensure_field_no_field():
builder = JobBuilder()
assert 'test_field' not in builder.record
builder._ensure_field('test_field', default_value='test_value')
assert 'test_field' in builder.record
assert builder.record['test_field'] == 'test_value'
def test_ensure_field_existing_field():
builder = JobBuilder()
assert 'status' in builder.record
builder._ensure_field('status', 'other_status')
assert builder.record['status'] == 'pending'
def test_ensure_field_separate():
builder = JobBuilder()
obj = {'field_one': 'value'}
builder._ensure_field('test_field', default_value='test_value', obj=obj)
builder._ensure_field('field_one', 'wrong_value', obj=obj)
assert 'test_field' in obj
assert obj['test_field'] == 'test_value'
assert obj['field_one'] == 'value'
def test_ensure_list_field_missing():
builder = JobBuilder()
assert 'list_field' not in builder.record
builder._ensure_list_field('list_field')
assert 'list_field' in builder.record
assert builder.record['list_field'] == []
def test_prepare_url():
expected1 = {'value': 'http://url1.com'}
expected2 = {'description': 'Url description', 'value': 'http://url2.com'}
builder = JobBuilder()
url1 = builder._prepare_url('http://url1.com')
url2 = builder._prepare_url('http://url2.com', 'Url description')
with pytest.raises(TypeError):
builder._prepare_url(description='only description')
assert url1 == expected1
assert url2 == expected2
def test_ensure_list_on_existing():
builder = JobBuilder()
builder._ensure_list_field('_collections')
assert builder.record['_collections'] == ['Jobs']
def test_ensure_dict_field_missing():
builder = JobBuilder()
builder.record['existing_dict'] = {'some_dict': 'some_value'}
assert 'dict_field' not in builder.record
builder._ensure_dict_field('dict_field')
assert 'dict_field' in builder.record
assert builder.record['dict_field'] == {}
def test_ensure_dict_field_existing():
builder = JobBuilder()
builder.record['existing_dict'] = {'some_dict': 'some_value'}
builder._ensure_dict_field('existing_dict')
assert builder.record['existing_dict'] == {'some_dict': 'some_value'}
def test_sourced_dict_local_source():
builder = JobBuilder(source='global')
expected = {
'source': 'local',
'value': 'foo'
}
result = builder._sourced_dict('local', value='foo')
assert result == expected
def test_sourced_dict_global_source():
builder = JobBuilder(source='global')
expected = {
'source': 'global',
'value': 'foo'
}
result = builder._sourced_dict(None, value='foo')
assert result == expected
def test_sourced_dict_no_source():
builder = JobBuilder()
expected = {
'value': 'foo'
}
result = builder._sourced_dict(None, value='foo')
assert result == expected
def test_append_to_field_some_simple_data():
builder = JobBuilder()
builder._append_to('test_field', 'first_element')
assert 'test_field' in builder.record
assert builder.record['test_field'] == ['first_element']
builder._append_to('test_field', 'second_element')
assert builder.record['test_field'] == ['first_element', 'second_element']
def test_append_to_field_duplicated_simple_data():
builder = JobBuilder()
builder._append_to('test_field', 'first_element')
builder._append_to('test_field', 'second_element')
builder._append_to('test_field', 'first_element')
builder._append_to('test_field', 'second_element')
assert builder.record['test_field'] == ['first_element', 'second_element']
def test_append_to_field_complex_data():
element_one = {
'key': 'value',
'list_key': ['some', 'values'],
'dict_key': {
'key': 'another_value',
'something': 'else'
}
}
element_two = {
'key': 'value2',
'other_list_key': ['some', 'values'],
}
builder = JobBuilder()
builder._append_to('some_field', element_one)
assert builder.record['some_field'] == [element_one]
builder._append_to('some_field', element_two)
assert builder.record['some_field'] == [element_one, element_two]
def test_append_to_field_dumplicated_complex_data():
element_one = {
'key': 'value',
'list_key': ['some', 'values'],
'dict_key': {
'key': 'another_value',
'something': 'else'
}
}
element_two = {
'key': 'value2',
'other_list_key': ['some', 'values'],
}
builder = JobBuilder()
builder._append_to('some_field', element_one)
builder._append_to('some_field', element_two)
builder._append_to('some_field', element_one)
builder._append_to('some_field', element_two)
assert builder.record['some_field'] == [element_one, element_two]
def test_append_to_field_from_kwargs():
element_one = {
'key': 'value',
'list_key': ['some', 'values'],
'dict_key': {
'key': 'another_value',
'something': 'else'
}
}
element_two = {
'key': 'value2',
'other_list_key': ['some', 'values'],
}
builder = JobBuilder()
builder._append_to('some_field', **element_one)
assert builder.record['some_field'] == [element_one]
builder._append_to('some_field', element_two)
assert builder.record['some_field'] == [element_one, element_two]
def test_add_private_note_with_source():
expected = {
'_collections': ['Jobs'], 'status': 'pending',
'_private_notes': [{'source': 'http://some/source', 'value': 'Note'}]
}
builder = JobBuilder()
builder.add_private_note("Note", "http://some/source")
assert builder.record == expected
def test_add_private_note_without_source():
expected = {
'_collections': ['Jobs'], 'status': 'pending',
'_private_notes': [{'value': 'Note'}]
}
builder = JobBuilder()
builder.add_private_note("Note", "")
assert builder.record == expected
def test_add_accelerator_experiment():
expected = {
'_collections': ['Jobs'],
'status': 'pending',
'accelerator_experiments': [{
'accelerator': 'accelerator',
'curated_relation': False,
'experiment': 'test1',
'institution': 'test2',
'legacy_name': 'test3',
'record': {'$ref': 'http://something'}
}]
}
builder = JobBuilder()
builder.add_accelerator_experiment(
'accelerator', False, 'test1', 'test2', 'test3', 'http://something'
)
assert builder.record == expected
def test_add_acquisition_source():
expected = {
'_collections': ['Jobs'],
'status': 'pending',
'acquisition_source': {
'source': 'source',
'submission_number': '12345',
'datetime': '1999-02-01',
'email': '[email protected]',
'method': 'method',
'orcid': 'orcid',
'internal_uid': 'uuid'
}
}
expected2 = {
'_collections': ['Jobs'],
'status': 'pending',
'acquisition_source': {'submission_number': 'None', 'email': '[email protected]'}
}
builder = JobBuilder()
builder.add_acquisition_source(
'1999-02-01', '[email protected]', 'uuid', 'method', 'orcid', 'source', 12345
)
assert builder.record == expected
builder.add_acquisition_source(email='[email protected]')
assert builder.record == expected2
def test_add_arxiv_category():
expected = {
'_collections': ['Jobs'], 'status': 'pending',
'arxiv_categories': ['cat1', 'cat2']
}
builder = JobBuilder()
builder.add_arxiv_category('cat1')
builder.add_arxiv_category('cat2')
builder.add_arxiv_category('other')
builder.add_arxiv_category(''.join(list('other')))
assert builder.record == expected
def test_add_contact():
expected = [
{
'name': 'name',
'email': 'email',
'curated_relation': True,
'record': {'$ref': 'http://nothing'}
},
{
'name': 'name2',
'email': 'email2'
},
{
'name': 'name3',
},
{
'email': 'email3'
}
]
builder = JobBuilder()
builder.add_contact(
name='name', email='email', curated_relation=True, record='http://nothing'
)
builder.add_contact(
name='name2',
email='email2'
)
builder.add_contact(name='name3')
builder.add_contact(email='email3')
assert builder.record['contact_details'] == expected
def test_add_external_system_identifiers():
expected = [
{'schema': 'schema1', 'value': 'value1'},
{'schema': 'schema2', 'value': 'value2'}
]
builder = JobBuilder()
builder.add_external_system_identifiers('value1', 'schema1')
builder.add_external_system_identifiers(schema='schema2', value='value2')
with pytest.raises(TypeError):
builder.add_external_system_identifiers('aaaaa')
assert builder.record['external_system_identifiers'] == expected
def test_add_institution():
expected = [
{
'value': 'value',
'curated_relation': False,
'record': {'$ref': 'http://xyz'}
},
{'value': 'value2'}
]
builder = JobBuilder()
builder.add_institution(
value='value',
curated_relation=False,
record={'$ref': 'http://xyz'}
)
builder.add_institution('value2')
with pytest.raises(TypeError):
builder.add_institution(record='blah')
assert builder.record['institutions'] == expected
def test_add_rank():
expected = ['Rank1', 'Rank2']
builder = JobBuilder()
builder.add_rank('Rank1')
builder.add_rank('Rank2')
assert builder.record['ranks'] == expected
def test_add_reference_emails():
expected = {'emails': ['[email protected]', '[email protected]']}
builder = JobBuilder()
builder.add_reference_email('[email protected]')
builder.add_reference_email('[email protected]')
builder.add_reference_email('')
assert builder.record['reference_letters'] == expected
def test_reference_urls():
expected = {
'urls': [
{'value': 'http://some_url.ch'},
{'value': 'http://other.url.com', 'description': 'url description'}
]
}
builder = JobBuilder()
builder.add_reference_url('http://some_url.ch')
builder.add_reference_url('http://other.url.com', "url description")
builder.add_reference_url('')
assert builder.record['reference_letters'] == expected
def test_add_reference_both():
expected = {
'emails': ['[email protected]', '[email protected]'],
'urls': [
{'value': 'https://jakas_strona.pl'},
{'value': 'http://xyz.uk', 'description': 'Some description'}
]
}
builder = JobBuilder()
builder.add_reference_email('[email protected]')
builder.add_reference_email('[email protected]')
builder.add_reference_url("https://jakas_strona.pl")
builder.add_reference_url('http://xyz.uk', 'Some description')
assert builder.record['reference_letters'] == expected
def test_add_region():
expected = ['Region1', 'Region2']
builder = JobBuilder()
builder.add_region('Region1')
builder.add_region('Region2')
assert builder.record['regions'] == expected
def test_add_url():
expected = [
{'value': 'http://url.com'},
{'value': 'https://url2.ch', 'description': 'Description for this url'}
]
builder = JobBuilder()
builder.add_url('http://url.com')
builder.add_url('https://url2.ch', 'Description for this url')
with pytest.raises(TypeError):
builder.add_url(description="some description")
assert builder.record['urls'] == expected
def test_set_deadline():
expected1 = "2099-02-15"
expected2 = "1099-09-20"
builder = JobBuilder()
builder.set_deadline(expected1)
assert builder.record['deadline_date'] == expected1
builder.set_deadline(expected2)
assert builder.record['deadline_date'] == expected2
def test_set_external_job_identifier():
expected1 = 'Identifier1'
expected2 = 'Other Identifier'
builder = JobBuilder()
builder.set_external_job_identifier(expected1)
assert builder.record['external_job_identifier'] == expected1
builder.set_external_job_identifier(expected2)
assert builder.record['external_job_identifier'] == expected2
def test_set_description():
def test_set_deadline():
expected1 = "Description"
expected2 = "Other Description"
builder = JobBuilder()
builder.set_description(expected1)
assert builder.record['description'] == expected1
builder.set_description(expected2)
assert builder.record['description'] == expected2
def test_set_status():
expected1 = 'pending'
expected2 = 'closed'
builder = JobBuilder()
builder.set_status(expected1)
assert builder.record['status'] == expected1
builder.set_status(expected2)
assert builder.record['status'] == expected2
def test_set_title():
expected1 = 'TITLE1'
expected2 = 'TITLE2'
builder = JobBuilder()
builder.set_title(expected1)
assert builder.record['position'] == expected1
builder.set_title(expected2)
assert builder.record['position'] == expected2
def test_process_reference_contact_list():
contacts = [
"[email protected]",
"http://some-url.com/other/?url=1&part=2",
"[email protected]"
]
builder = JobBuilder()
builder.add_reference_contacts(contacts)
expected_data = {
'emails': [
'[email protected]',
'[email protected]'
],
'urls': [
{'value': 'http://some-url.com/other/?url=1&part=2'}
]
}
assert builder.record['reference_letters'] == expected_data
def test_sanitization_of_description():
expected = '<div>Some text <em>emphasized</em> linking to <a href="http://example.com">'\
'http://example.com</a></div>'
description = '<div>Some <span>text</span> <em class="shiny">emphasized</em> linking to '\
'http://example.com</div>'
builder = JobBuilder()
builder.set_description(description)
assert builder.record['description'] == expected
| gpl-2.0 | 1,155,045,660,382,006,500 | 24.516693 | 94 | 0.607788 | false |
FireBladeNooT/Medusa_1_6 | lib/github/tests/IssueComment.py | 1 | 3235 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class IssueComment(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.comment = self.g.get_user().get_repo("PyGithub").get_issue(28).get_comment(5808311)
def testAttributes(self):
self.assertEqual(self.comment.body, "Comment created by PyGithub")
self.assertEqual(self.comment.created_at, datetime.datetime(2012, 5, 20, 11, 46, 42))
self.assertEqual(self.comment.id, 5808311)
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 20, 11, 46, 42))
self.assertEqual(self.comment.url, "https://api.github.com/repos/jacquev6/PyGithub/issues/comments/5808311")
self.assertEqual(self.comment.user.login, "jacquev6")
self.assertEqual(self.comment.html_url, "https://github.com/jacquev6/PyGithub/issues/28#issuecomment-5808311")
# test __repr__() based on this attributes
self.assertEqual(self.comment.__repr__(), 'IssueComment(user=NamedUser(login="jacquev6"), id=5808311)')
def testEdit(self):
self.comment.edit("Comment edited by PyGithub")
self.assertEqual(self.comment.body, "Comment edited by PyGithub")
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 20, 11, 53, 59))
def testDelete(self):
self.comment.delete()
| gpl-3.0 | 2,692,575,707,421,382,000 | 57.818182 | 118 | 0.51221 | false |
dominicelse/scipy | benchmarks/benchmarks/sparse.py | 1 | 10059 | """
Simple benchmarks for the sparse module
"""
from __future__ import division, print_function, absolute_import
import warnings
import time
import timeit
import numpy
import numpy as np
from numpy import ones, array, asarray, empty, random, zeros
try:
from scipy import sparse
from scipy.sparse import (csr_matrix, coo_matrix, dia_matrix, lil_matrix,
dok_matrix, rand, SparseEfficiencyWarning)
except ImportError:
pass
from .common import Benchmark
def random_sparse(m, n, nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.randint(0, n, size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals, (rows, cols)), (m, n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N, dtype='d', format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]], dtype=dtype)
return dia_matrix((diags, [0]), shape=(1, 1)).asformat(format)
offsets = array([0, -N, N, -1, 1])
diags = empty((5, N**2), dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3, N-1::N] = 0 # first lower diagonal
diags[4, N::N] = 0 # first upper diagonal
return dia_matrix((diags, offsets), shape=(N**2, N**2)).asformat(format)
class Arithmetic(Benchmark):
param_names = ['format', 'XY', 'op']
params = [
['csr'],
['AA', 'AB', 'BA', 'BB'],
['__add__', '__sub__', 'multiply', '__mul__']
]
def setup(self, format, XY, op):
matrices = dict(A=poisson2d(250, format=format),
B=poisson2d(250, format=format)**2)
x = matrices[XY[0]]
self.y = matrices[XY[1]]
self.fn = getattr(x, op)
self.fn(self.y) # warmup
def time_arithmetic(self, format, XY, op):
self.fn(self.y)
class Sort(Benchmark):
params = ['Rand10', 'Rand25', 'Rand50', 'Rand100', 'Rand200']
param_names = ['matrix']
def setup(self, matrix):
n = 10000
if matrix.startswith('Rand'):
k = int(matrix[4:])
self.A = random_sparse(n, n, k)
self.A.has_sorted_indices = False
self.A.indices[:2] = 2, 1
else:
raise NotImplementedError()
def time_sort(self, matrix):
"""sort CSR column indices"""
self.A.sort_indices()
class Matvec(Benchmark):
params = [
['Identity', 'Poisson5pt', 'Block2x2', 'Block3x3'],
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr']
]
param_names = ['matrix', 'format']
def setup(self, matrix, format):
if matrix == 'Identity':
if format in ('lil', 'dok'):
raise NotImplementedError()
self.A = sparse.eye(10000, 10000, format=format)
elif matrix == 'Poisson5pt':
self.A = poisson2d(300, format=format)
elif matrix == 'Block2x2':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (2, 2)
self.A = sparse.kron(poisson2d(150),
ones(b)).tobsr(blocksize=b).asformat(format)
elif matrix == 'Block3x3':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (3, 3)
self.A = sparse.kron(poisson2d(100),
ones(b)).tobsr(blocksize=b).asformat(format)
else:
raise NotImplementedError()
self.x = ones(self.A.shape[1], dtype=float)
def time_matvec(self, matrix, format):
self.A * self.x
class Matvecs(Benchmark):
params = ['dia', 'coo', 'csr', 'csc', 'bsr']
param_names = ["format"]
def setup(self, format):
self.A = poisson2d(300, format=format)
self.x = ones((self.A.shape[1], 10), dtype=self.A.dtype)
def time_matvecs(self, format):
self.A * self.x
class Matmul(Benchmark):
def setup(self):
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
random.seed(0)
matrix1 = lil_matrix(zeros((H1, W1)))
matrix2 = lil_matrix(zeros((H2, W2)))
for i in range(C1):
matrix1[random.randint(H1), random.randint(W1)] = random.rand()
for i in range(C2):
matrix2[random.randint(H2), random.randint(W2)] = random.rand()
self.matrix1 = matrix1.tocsr()
self.matrix2 = matrix2.tocsr()
def time_large(self):
for i in range(100):
self.matrix1 * self.matrix2
class Construction(Benchmark):
params = [
['Empty', 'Identity', 'Poisson5pt'],
['lil', 'dok']
]
param_names = ['matrix', 'format']
def setup(self, name, format):
if name == 'Empty':
self.A = coo_matrix((10000, 10000))
elif name == 'Identity':
self.A = sparse.eye(10000, format='coo')
else:
self.A = poisson2d(100, format='coo')
formats = {'lil': lil_matrix, 'dok': dok_matrix}
self.cls = formats[format]
def time_construction(self, name, format):
T = self.cls(self.A.shape)
for i, j, v in zip(self.A.row, self.A.col, self.A.data):
T[i, j] = v
class Conversion(Benchmark):
params = [
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
]
param_names = ['from_format', 'to_format']
def setup(self, fromfmt, tofmt):
base = poisson2d(100, format=fromfmt)
try:
self.fn = getattr(base, 'to' + tofmt)
except:
def fn():
raise RuntimeError()
self.fn = fn
def time_conversion(self, fromfmt, tofmt):
self.fn()
class Getset(Benchmark):
params = [
[1, 10, 100, 1000, 10000],
['different', 'same'],
['csr', 'csc', 'lil', 'dok']
]
param_names = ['N', 'sparsity pattern', 'format']
unit = "seconds"
def setup(self, N, sparsity_pattern, format):
if format == 'dok' and N > 500:
raise NotImplementedError()
self.A = rand(1000, 1000, density=1e-5)
A = self.A
N = int(N)
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = numpy.random.randint(0, A.shape[0], size=n)
jp = numpy.random.randint(0, A.shape[1], size=n)
i = numpy.r_[i, ip]
j = numpy.r_[j, jp]
v = numpy.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
base = A.asformat(format)
self.m = base.copy()
self.i = i
self.j = j
self.v = v
def _timeit(self, kernel, recopy):
min_time = 1e99
if not recopy:
kernel(self.m, self.i, self.j, self.v)
number = 1
start = time.time()
while time.time() - start < 0.1:
if recopy:
m = self.m.copy()
else:
m = self.m
while True:
duration = timeit.timeit(
lambda: kernel(m, self.i, self.j, self.v), number=number)
if duration > 1e-5:
break
else:
number *= 10
min_time = min(min_time, duration/number)
return min_time
def track_fancy_setitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j] = v
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
def time_fancy_getitem(self, N, sparsity_pattern, format):
self.m[self.i, self.j]
class NullSlice(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 100000
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_getrow(self, density, format):
self.X.getrow(100)
def time_getcol(self, density, format):
self.X.getcol(100)
def time_3_rows(self, density, format):
self.X[[0, 100, 105], :]
def time_10000_rows(self, density, format):
self.X[np.arange(10000), :]
def time_3_cols(self, density, format):
self.X[:, [0, 100, 105]]
def time_100_cols(self, density, format):
self.X[:, np.arange(100)]
class Diagonal(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_diagonal(self, density, format):
self.X.diagonal()
class Sum(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_sum(self, density, format):
self.X.sum()
def time_sum_axis0(self, density, format):
self.X.sum(axis=0)
def time_sum_axis1(self, density, format):
self.X.sum(axis=1)
class Iteration(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 500
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_iteration(self, density, format):
for row in self.X:
pass
| bsd-3-clause | 3,352,419,571,991,209,000 | 27.019499 | 77 | 0.532856 | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_custom_audience_service.py | 1 | 35288 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v8.enums.types import custom_audience_member_type
from google.ads.googleads.v8.enums.types import custom_audience_status
from google.ads.googleads.v8.enums.types import custom_audience_type
from google.ads.googleads.v8.resources.types import custom_audience
from google.ads.googleads.v8.services.services.custom_audience_service import CustomAudienceServiceClient
from google.ads.googleads.v8.services.services.custom_audience_service import transports
from google.ads.googleads.v8.services.types import custom_audience_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CustomAudienceServiceClient._get_default_mtls_endpoint(None) is None
assert CustomAudienceServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_custom_audience_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = CustomAudienceServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = CustomAudienceServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = CustomAudienceServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_client_get_transport_class():
transport = CustomAudienceServiceClient.get_transport_class()
assert transport == transports.CustomAudienceServiceGrpcTransport
transport = CustomAudienceServiceClient.get_transport_class("grpc")
assert transport == transports.CustomAudienceServiceGrpcTransport
@mock.patch.object(CustomAudienceServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomAudienceServiceClient))
def test_custom_audience_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.CustomAudienceServiceClient.get_transport_class') as gtc:
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = CustomAudienceServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.CustomAudienceServiceClient.get_transport_class') as gtc:
client = CustomAudienceServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = CustomAudienceServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = CustomAudienceServiceClient()
@mock.patch.object(CustomAudienceServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomAudienceServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_custom_audience_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = CustomAudienceServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_custom_audience_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_custom_audience(transport: str = 'grpc', request_type=custom_audience_service.GetCustomAudienceRequest):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience.CustomAudience(
resource_name='resource_name_value',
id=205,
status=custom_audience_status.CustomAudienceStatusEnum.CustomAudienceStatus.UNKNOWN,
name='name_value',
type_=custom_audience_type.CustomAudienceTypeEnum.CustomAudienceType.UNKNOWN,
description='description_value',
)
response = client.get_custom_audience(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == custom_audience_service.GetCustomAudienceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_audience.CustomAudience)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.status == custom_audience_status.CustomAudienceStatusEnum.CustomAudienceStatus.UNKNOWN
assert response.name == 'name_value'
assert response.type_ == custom_audience_type.CustomAudienceTypeEnum.CustomAudienceType.UNKNOWN
assert response.description == 'description_value'
def test_get_custom_audience_from_dict():
test_get_custom_audience(request_type=dict)
def test_get_custom_audience_field_headers():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = custom_audience_service.GetCustomAudienceRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
call.return_value = custom_audience.CustomAudience()
client.get_custom_audience(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_custom_audience_flattened():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience.CustomAudience()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_custom_audience(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_custom_audience_flattened_error():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_custom_audience(
custom_audience_service.GetCustomAudienceRequest(),
resource_name='resource_name_value',
)
def test_mutate_custom_audiences(transport: str = 'grpc', request_type=custom_audience_service.MutateCustomAudiencesRequest):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience_service.MutateCustomAudiencesResponse(
)
response = client.mutate_custom_audiences(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == custom_audience_service.MutateCustomAudiencesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_audience_service.MutateCustomAudiencesResponse)
def test_mutate_custom_audiences_from_dict():
test_mutate_custom_audiences(request_type=dict)
def test_mutate_custom_audiences_field_headers():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = custom_audience_service.MutateCustomAudiencesRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
call.return_value = custom_audience_service.MutateCustomAudiencesResponse()
client.mutate_custom_audiences(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_custom_audiences_flattened():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience_service.MutateCustomAudiencesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_custom_audiences(
customer_id='customer_id_value',
operations=[custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_custom_audiences_flattened_error():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_custom_audiences(
custom_audience_service.MutateCustomAudiencesRequest(),
customer_id='customer_id_value',
operations=[custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CustomAudienceServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CustomAudienceServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.CustomAudienceServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_custom_audience_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CustomAudienceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_custom_audience',
'mutate_custom_audiences',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_custom_audience_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CustomAudienceServiceTransport()
adc.assert_called_once()
def test_custom_audience_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CustomAudienceServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_custom_audience_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.CustomAudienceServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_custom_audience_service_host_no_port():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_host_with_port():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_custom_audience_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.CustomAudienceServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.CustomAudienceServiceGrpcTransport])
def test_custom_audience_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.CustomAudienceServiceGrpcTransport,])
def test_custom_audience_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_custom_audience_path():
customer_id = "squid"
custom_audience_id = "clam"
expected = "customers/{customer_id}/customAudiences/{custom_audience_id}".format(customer_id=customer_id, custom_audience_id=custom_audience_id, )
actual = CustomAudienceServiceClient.custom_audience_path(customer_id, custom_audience_id)
assert expected == actual
def test_parse_custom_audience_path():
expected = {
"customer_id": "whelk",
"custom_audience_id": "octopus",
}
path = CustomAudienceServiceClient.custom_audience_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_custom_audience_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CustomAudienceServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = CustomAudienceServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = CustomAudienceServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = CustomAudienceServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = CustomAudienceServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = CustomAudienceServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = CustomAudienceServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = CustomAudienceServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CustomAudienceServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = CustomAudienceServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CustomAudienceServiceTransport, '_prep_wrapped_messages') as prep:
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CustomAudienceServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = CustomAudienceServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 | -7,432,984,637,234,446,000 | 42.836025 | 217 | 0.682555 | false |
coronary/RandomEpisode | depends/Lib/site-packages/tmdbsimple/people.py | 1 | 7235 | # -*- coding: utf-8 -*-
"""
tmdbsimple.people
~~~~~~~~~~~~~~~~~
This module implements the People, Credits, and Jobs functionality
of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2017 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class People(TMDB):
"""
People functionality.
See: http://docs.themoviedb.apiary.io/#people
"""
BASE_PATH = 'person'
URLS = {
'info': '/{id}',
'movie_credits': '/{id}/movie_credits',
'tv_credits': '/{id}/tv_credits',
'combined_credits': '/{id}/combined_credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'changes': '/{id}/changes',
'popular': '/popular',
'latest': '/latest',
}
def __init__(self, id=0):
super(People, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the general person information for a specific id.
Args:
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def movie_credits(self, **kwargs):
"""
Get the movie credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('movie_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv_credits(self, **kwargs):
"""
Get the TV credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('tv_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def combined_credits(self, **kwargs):
"""
Get the combined (movie and TV) credits for a specific person id.
To get the expanded details for each TV record, call the /credit method
with the provided credit_id. This will provide details about which
episode and/or season the credit is for.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('combined_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def changes(self, **kwargs):
"""
Get the changes for a specific person id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The maximum
number of days that can be returned in a single request is 14. The
language is present on fields that are translatable.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular people on The Movie Database. This list
refreshes every day.
Args:
page: (optional) Minimum 1, maximum 1000.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the latest person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Credits(TMDB):
"""
Credits functionality.
See: http://docs.themoviedb.apiary.io/#credits
"""
BASE_PATH = 'credit'
URLS = {
'info': '/{credit_id}',
}
def __init__(self, credit_id):
super(Credits, self).__init__()
self.credit_id = credit_id
def info(self, **kwargs):
"""
Get the detailed information about a particular credit record. This is
currently only supported with the new credit model found in TV. These
ids can be found from any TV credit response as well as the tv_credits
and combined_credits methods for people.
The episodes object returns a list of episodes and are generally going
to be guest stars. The season array will return a list of season
numbers. Season credits are credits that were marked with the
"add to every season" option in the editing interface and are
assumed to be "season regulars".
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_credit_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Jobs(TMDB):
"""
Jobs functionality.
See: http://docs.themoviedb.apiary.io/#jobs
"""
BASE_PATH = 'job'
URLS = {
'list': '/list',
}
def list(self, **kwargs):
"""
Get a list of valid jobs.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('list')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| mit | 481,468,421,621,754,600 | 27.710317 | 80 | 0.581617 | false |
ruslanloman/nova | nova/virt/libvirt/volume/volume.py | 1 | 16814 | # Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import os
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import paths
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwadress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils._get_root_helper(),
use_multipath=CONF.libvirt.iscsi_use_multipath,
device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
self.connector.disconnect_volume(connection_info['data'], None)
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISER', utils._get_root_helper(),
use_multipath=CONF.libvirt.iser_use_multipath,
device_scan_attempts=CONF.libvirt.num_iser_scan_tries,
transport=self._get_transport())
def _get_transport(self):
return 'iser'
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
| apache-2.0 | -3,995,553,806,361,658,000 | 40.210784 | 79 | 0.591352 | false |
Quihico/repository.spartacus | script.trtv/streaming.py | 1 | 24691 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Sean Poyser and Richard Dean ([email protected]) - With acknowledgement to some original code by twinther (Tommy Winther)
#
import xbmc
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
import ConfigParser
import os
import re
import xbmcaddon
import urllib
import requests
import json
import dixie
ADDON = dixie.ADDON
SF_METALLIQ = dixie.GetSetting('SF_METALLIQ')
autoplay = dixie.GetSetting('autoplay')
LOCAL = dixie.GetSetting('local.ini') == 'true'
FTVINI = dixie.GetSetting('ftv.ini')
datapath = dixie.PROFILE
class StreamsService(object):
def __init__(self):
self.addonsParser = ConfigParser.ConfigParser(dict_type=OrderedDict)
self.addonsParser.optionxform = lambda option: option
iniFiles = self.getIniFiles()
for file in iniFiles:
try: self.addonsParser.read(file)
except: pass
def getIniFiles(self):
files = []
import glob
ini = os.path.join(datapath, 'ini', '*.*')
files = glob.glob(ini)
for i in range(10):
file = dixie.GetSetting('INI_%d' % i)
if len(file) > 0:
if file not in files:
files.append(file)
if os.path.exists(os.path.join(datapath,'addons.ini')):
files.append(os.path.join(datapath, 'addons.ini'))
return files
def loadFavourites(self):
entries = list()
path = xbmc.translatePath('special://profile/favourites.xml')
if os.path.exists(path):
f = open(path)
xml = f.read()
f.close()
try:
doc = ElementTree.fromstring(xml)
for node in doc.findall('favourite'):
value = node.text
value = node.text.replace(',return','')
if value[0:11] == 'PlayMedia("':
value = value[11:-2]
elif value[0:10] == 'PlayMedia(':
value = value[10:-1]
elif value[0:22] == 'ActivateWindow(10025,"':
value = value[22:-2]
elif value[0:21] == 'ActivateWindow(10025,':
value = value[22:-1]
elif value[0:22] == 'ActivateWindow(10001,"':
value = value[22:-2]
elif value[0:21] == 'ActivateWindow(10001,':
value = value[22:-1]
else:
continue
entries.append((node.get('name'), value))
except ExpatError:
pass
return entries
def loadPlaylist(self):
iptv_type = dixie.GetSetting('playlist.type')
IPTV_URL = '0'
IPTV_FILE = '1'
entries = list()
label = ''
value = ''
if iptv_type == IPTV_FILE:
path = os.path.join(dixie.GetSetting('playlist.file'))
else:
url = dixie.GetSetting('playlist.url')
path = os.path.join(datapath, 'playlist.m3u')
try:
request = requests.get(url)
playlist = request.content
with open(path, 'wb') as f:
f.write(playlist)
except: pass
if os.path.exists(path):
f = open(path)
playlist = f.readlines()
f.close()
for line in playlist:
if line.startswith('#EXTINF:'):
label = line.split(',')[-1].strip()
elif line.startswith('rtmp') or line.startswith('rtmpe') or line.startswith('rtsp') or line.startswith('http'):
value = line.replace('rtmp://$OPT:rtmp-raw=', '').replace('\n', '')
entries.append((label, value))
return entries
def locateSuperFavourites(self, title):
SUPERFAVES = 'plugin.program.super.favourites'
SF_INSTALLED = xbmc.getCondVisibility('System.HasAddon(%s)' % SUPERFAVES) == 1
if not SF_INSTALLED:
return None
sfAddon = xbmcaddon.Addon(id = SUPERFAVES)
# Detect the root folder for SF items, set to default if not already set
ROOT = sfAddon.getSetting('FOLDER')
if not ROOT:
ROOT = 'special://profile/addon_data/plugin.program.super.favourites'
folder = os.path.join(ROOT, 'Super Favourites')
items = []
self._locateSuperFavourites(title.upper(), folder, items)
return items
def _locateSuperFavourites(self, title, folder, items):
import sfile
import settings
import urllib
current, dirs, files = sfile.walk(folder)
for dir in dirs:
folder = os.path.join(current, dir)
# check against SF list, if it exists then match up
if dir.upper() == title:
# cfg = os.path.join(folder, 'folder.cfg')
# autoplay = settings.get('AUTOPLAY', cfg)
if autoplay == 'true':
uTitle = urllib.quote_plus(title)
mode = 5400
uFolder = urllib.quote_plus(folder)
toAdd = 'plugin://plugin.program.super.favourites/?label=%s&mode=%d&path=%s' % (uTitle, mode, uFolder)
else:
uTitle = urllib.quote_plus(title)
mode = 400
uFolder = urllib.quote_plus(folder)
toAdd = 'plugin://plugin.program.super.favourites/?label=%s&mode=%d&path=%s' % (uTitle, mode, uFolder)
toAdd = '__SF__ActivateWindow(10025,"%s",return)' % toAdd
xbmc.log('##### FOLDER: %s' % folder)
if os.path.exists(xbmc.translatePath(os.path.join(folder,'favourites.xml'))):
items.append(['SF_'+folder, toAdd])
self._locateSuperFavourites(title, folder, items)
def getAddons(self):
return self.addonsParser.sections()
def getAddonStreams(self, id):
return self.addonsParser.items(id)
def detectStream(self, channel, catchup=''):
"""
@param channel:
@type channel: source.Channel
"""
matches = list()
xbmc.log('CATCHUP: %s'%catchup)
# If user chooses to watch via catchup then call meta addons
if catchup != '':
catchup = catchup.replace(' ','+')
stream = ('plugin://plugin.video.metalliq/%s' % (catchup))
matches.append(('plugin.video.metalliq', 'Catchup', [str(stream)]))
# For a live tv selection grab valid ini files and present options
else:
# Get any Super Favourites with channel name
superFaves = self.locateSuperFavourites(channel.id)
xbmc.log('### SF: %s' % superFaves)
if superFaves:
if len(superFaves) == 1 and not '-metalliq' in superFaves[0][0]:
matches.append((superFaves[0][0], 'Social Share', superFaves[0][1]))
elif len(superFaves) == 1 and '-metalliq' in superFaves[0][0] and SF_METALLIQ == 'true':
matches.append((superFaves[0][0], 'MetalliQ', superFaves[0][1]))
else:
index = 0
for superFave in superFaves:
if '-metalliq' in superFave[0] and SF_METALLIQ == 'true':
label = 'MetalliQ'
matches.append((superFave[0], label, superFave[1]))
elif not '-metalliq' in superFave[0]:
if len(superFaves) == 2 and ('-metalliq' in superFaves[0][0] or '-metalliq' in superFaves[1][0]):
label = 'Social Share'
else:
index += 1
label = 'Social Share (%d)' % index
matches.append((superFave[0], label, superFave[1]))
# Get any Add-ons with channel name
for id in self.getAddons():
try:
xbmcaddon.Addon(id)
except Exception:
pass # ignore addons that are not installed
for (label, stream) in self.getAddonStreams(id):
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper().replace('_',' ')
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
# If meta is chosen we clean the name up a bit more
if SF_METALLIQ == 'false':
if id == "plugin.video.metalliq" or id == "plugin.video.meta":
label = channel.title
chanx = channel.title.replace(" ","+").replace("_","+")
if chanx.endswith("%20HDTV"):
chanx = chanx.replace("%20HDTV","")
if chanx.endswith("%20HD"):
chanx = chanx.replace("%20HD","")
if chanx.endswith("%20PLUS1"):
chanx = chanx.replace("%20PLUS1","")
stream = str(stream.replace("<channel>",'live/%s/None/en'% chanx))
xbmc.log('STREAM: %s'%stream)
if type(stream) is list:
stream = stream[0]
if (channel_temp in label_temp) or (label_temp in channel_temp):
# Workaround for getting clean id if ini contains badly formatted items
if stream.startswith('plugin://') and not 'plugin.program.super.favourites' in stream:
idtemp = stream.split('plugin://')[1]
xbmc.log('idtemp: %s' % idtemp)
id = idtemp.split('/')[0]
# Clean up badly formatted labels in the ini files
label = re.sub('[:\\/?\<>|"]', '', label)
label = label.strip()
try:
label = label.encode('ascii', 'ignore')
except:
try:
label = label.decode('utf-8').encode('ascii', 'ignore')
except:
label = label
matches.append((id, label, stream))
# Get any Kodi Favourites with channel name
kodiFaves = self.loadFavourites()
if kodiFaves:
id = 'kodi-favourite'
for (label, stream) in kodiFaves:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
# Get any Playlist entries with channel name
iptvPlaylist = self.loadPlaylist()
if iptvPlaylist:
id = 'iptv-playlist'
for (label, stream) in iptvPlaylist:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
# Get entries from PVRchannels with channel name
import pvr
PVRchannels = pvr.getPVRChannels()
if PVRchannels:
id = 'xbmc.pvr'
for (label, stream) in PVRchannels:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
xbmc.log('### matches length: %s' % len(matches))
# if len(matches) == 1:
# return [matches[0][0],matches[0][1],str(matches[0][2])]
# else:
return matches
class OrderedDict(dict):
# From: http://code.activestate.com/recipes/576693/
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| gpl-2.0 | 8,102,808,333,039,620,000 | 40.849153 | 403 | 0.507108 | false |
moorecoin/MooreCoinMiningAlgorithm | contrib/devtools/update-translations.py | 1 | 6779 | #!/usr/bin/python
# copyright (c) 2014 wladimir j. van der laan
# distributed under the mit software license, see the accompanying
# file copying or http://www.opensource.org/licenses/mit-license.php.
'''
run this script from the root of the repository to update all translations from
transifex.
it will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
todo:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.elementtree as et
# name of transifex tool
tx = 'tx'
# name of source language file
source_lang = 'moorecoin_en.ts'
# directory with locale files
locale_dir = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('no .git directory found')
print('execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([tx, 'pull', '-f']):
print('error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''find all format specifiers in a string.'''
pos = 0
specifiers = []
while true:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''split format specifiers between numeric (qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except indexerror:
errors.append("parse error in translation '%s'" % sanitize_string(translation))
return false
else:
if source_f != translation_f:
errors.append("mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return false
return true
def all_ts_files(suffix=''):
for filename in os.listdir(locale_dir):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == source_lang+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(locale_dir, filename)
yield(filename, filepath)
fix_re = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''remove invalid characters from translation string'''
return fix_re.sub(b'', s)
# override cdata escape function to make our output match qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = none
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=false):
print('checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = et._escape_cdata
et._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = false
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = et.xmlparser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the xml parser will fail
data = remove_invalid_characters(data)
tree = et.parse(io.bytesio(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is none:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = true
# remove location tags
for location in message.findall('location'):
message.remove(location)
# remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some xml to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.bytesio()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit | -6,915,173,566,015,647,000 | 35.446237 | 121 | 0.618675 | false |
kssim/efp | making_decisions/python/bmi_calculator.py | 1 | 1254 | # Pratice 19. BMI Calculator
# Output:
# Your BMI is 19.5.
# You are within the ideal weight range.
# Or
# Your BMI is 32.5.
# You are overweight. You should see your doctor.
# Formula:
# bmi = (weight / (height x height)) x 703
# Standard:
# BMI 18.5 ~ 25 is nomal weight.
# Constraint:
# - Ensure your program takes only numeric data.
# Don't let the user continue unless the data is valid.
#!/usr/bin/env python
from __future__ import division
import sys
def input_process(in_question):
return input(in_question) if sys.version_info >= (3,0) else raw_input(in_question)
if __name__ == '__main__':
try:
weight = int(input_process('What is your weight(pound)? '))
height = int(input_process('What is your height(inch)? '))
except:
print ('You must input only numbers.')
else:
bmi_convert_value = 703
bmi_raw_data = float(weight / (height * height))
bmi = bmi_raw_data * bmi_convert_value
print ('Your BMI is %s' % bmi)
if bmi < 18.5:
print ('You are within the ideal weight range.')
elif bmi > 25:
print ('You are overweight. You should see your doctor.')
else:
print ('You are nomal weight.')
| mit | -8,714,691,810,336,679,000 | 28.162791 | 86 | 0.606858 | false |
barsnadcat/evegant | Process.py | 1 | 1289 |
from unittest import TestCase
from unittest.mock import Mock
from Schemes import Blueprint
from ItemStack import ItemStack
class TestProcess(TestCase):
def test_InitProcess(self):
scheme = Blueprint(0, "Name", 0, [ItemStack(0, 1)], ItemStack(0, 1))
process = Process(scheme)
assert process.inputs[0].ammount == 1
def test_SetRuns(self):
scheme = Blueprint(0, "Name", 0, [ItemStack(0, 1)], ItemStack(0, 2))
process = Process(scheme)
process.SetRuns(2)
assert process.inputs[0].ammount == 2
assert process.outputs[0].ammount == 4
from copy import copy
class Process:
def __init__(self, aScheme):
self.scheme = aScheme
self.runs = 1
self.inputs = [copy(inp) for inp in aScheme.GetInputs()]
self.outputs = [copy(out) for out in aScheme.GetOutputs()]
self.runsChangedCallback = None
self.manual = False
def SetRuns(self, aRuns):
if self.runs == aRuns:
return
self.runs = aRuns
schemeInputs = self.scheme.GetInputs()
for i in range(len(self.inputs)):
self.inputs[i].ammount = schemeInputs[i].ammount * aRuns
schemeOutputs = self.scheme.GetOutputs()
for i in range(len(self.outputs)):
self.outputs[i].ammount = schemeOutputs[i].ammount * aRuns
if self.manual and self.runsChangedCallback:
self.runsChangedCallback()
| gpl-3.0 | 3,894,875,797,590,188,500 | 22.436364 | 70 | 0.706749 | false |
redsolution/django-menu-proxy | menuproxy/utils.py | 1 | 10086 | # -*- coding: utf-8 -*-
from django import conf
from django.core.cache import cache
from importpath import importpath
METHODS = (
'replace', # Указывает, что объект point следует заменить объектом object
'insert', # Указывает, что к списку дочерних элементов inside-правила нужно добавить элемент object
'children', # Указывает, что к списку дочерних элементов inside-правила нужно добавить дочерние элементы object-а
)
def get_title(menu_proxy, object):
"""Correct value returned by menu_proxy.title function"""
result = menu_proxy.title(object)
if result is None:
return u''
return unicode(result)
def get_url(menu_proxy, object):
"""Correct value returned by menu_proxy.url function"""
result = menu_proxy.url(object)
if result is None:
return u''
return unicode(result)
def get_ancestors(menu_proxy, object):
"""Correct value returned by menu_proxy.ancestors function"""
result = menu_proxy.ancestors(object)
if result is None:
return []
return [value for value in result]
def get_children(menu_proxy, object, lasy):
"""
Call ``children`` or ``lasy_children`` function for ``menu_proxy``.
Pass to it ``object``.
Correct result.
"""
if lasy:
result = menu_proxy.lasy_children(object)
else:
result = menu_proxy.children(object)
if result is None:
return []
return [value for value in result]
class DoesNotDefined(object):
"""
Class to indicate that value was not pressend in rule.
"""
pass
def try_to_import(value, exception_text):
"""
If ``value`` is not None and is not DoesNotDefined
then try to import specified by ``value`` path.
"""
if value is not DoesNotDefined and value is not None:
return importpath(value, exception_text)
return value
def get_rules():
"""Return dictionary of rules with settings"""
rules = cache.get('menuproxy.rules', None)
if rules is not None:
return rules
rules = {}
sequence = {None: []}
def add_to_sequence(rule, value):
if rule not in sequence:
sequence[rule] = []
sequence[rule].append(value)
rules[None] = MenuRule(name=None, method='replace', proxy=None, rules=rules)
for kwargs in getattr(conf.settings, 'MENU_PROXY_RULES', []):
rule = MenuRule(rules=rules, **kwargs)
rules[rule.name] = rule
add_to_sequence(rule.name, rule.name)
add_to_sequence(rule.inside, rule.name)
for name, rule in rules.iteritems():
rule.sequence = [rules[item] for item in sequence[name]]
cache.set('menuproxy.rules', rules)
return rules
def get_front_page(rules):
"""If MENU_PROXY_FRONT_PAGED is True and there is front page return MenuItem for it"""
front_page = cache.get('menuproxy.front_page', DoesNotDefined)
if front_page is not DoesNotDefined:
return front_page
front_page = None
if getattr(conf.settings, 'MENU_PROXY_FRONT_PAGED', True):
root = MenuItem(None, DoesNotDefined)
children = root.children(False)
if children:
front_page = children[0]
cache.set('menuproxy.front_page', front_page)
return front_page
class MenuRule(object):
"""Rule"""
def __init__(self, name, method, proxy, rules, inside=None,
model=DoesNotDefined, point=DoesNotDefined, object=DoesNotDefined,
point_function=DoesNotDefined, object_function=DoesNotDefined, **other):
self.name = name
self.method = method
assert self.method in METHODS, 'menuproxy does`t support method: %s' % self.method
self.inside = inside
self.model = try_to_import(model, 'model class')
self.point = try_to_import(point, 'mount point')
if callable(self.point) and self.point is not DoesNotDefined:
self.point = self.point()
if self.point is DoesNotDefined:
self.point_function = try_to_import(point_function, 'mount point function')
else:
self.point_function = DoesNotDefined
self.object = try_to_import(object, 'mount object')
if callable(self.object) and self.object is not DoesNotDefined:
self.object = self.object()
if self.object is DoesNotDefined:
self.object_function = try_to_import(object_function, 'mount object function')
else:
self.object_function = DoesNotDefined
self.proxy = try_to_import(proxy, 'MenuProxy class')
other.update(self.__dict__)
if callable(self.proxy) and self.proxy is not DoesNotDefined:
self.proxy = self.proxy(**other)
self.rules = rules
self.sequence = []
def _get_point(self, object, forward):
if self.point is not DoesNotDefined:
return self.point
elif self.point_function is not DoesNotDefined:
return self.point_function(object, forward)
else:
return DoesNotDefined
def _get_object(self, object, forward):
if self.object is not DoesNotDefined:
return self.object
elif self.object_function is not DoesNotDefined:
return self.object_function(object, forward)
else:
return DoesNotDefined
def forward_point(self, object):
return self._get_point(object, True)
def backward_point(self, object):
return self._get_point(object, False)
def forward_object(self, object):
return self._get_object(object, True)
def backward_object(self, object):
return self._get_object(object, False)
class MenuItem(object):
"""Objects of this class will be send to templates. Class provide to walk through nested rules"""
active = False
current = False
def __init__(self, name=None, object=None):
if isinstance(object, MenuItem):
self.rules = object.rules
self.name, self.object = object.name, object.object
else:
self.rules = get_rules()
for rule in self.rules[name].sequence:
if rule.name != name and rule.method == 'replace':
point = rule.forward_point(object)
if point is DoesNotDefined or point == object:
self.name, self.object = rule.name, rule.forward_object(object)
break
else:
self.name, self.object = name, object
self.front_paged_ancestors = False
def title(self):
"""Returns title for object"""
if hasattr(self, '_title'):
return getattr(self, '_title')
title = get_title(self.rules[self.name].proxy, self.object)
setattr(self, '_title', title)
return title
def url(self):
"""Returns url for object"""
if hasattr(self, '_url'):
return getattr(self, '_url')
url = get_url(self.rules[self.name].proxy, self.object)
setattr(self, '_url', url)
return url
def ancestors(self):
"""Returns ancestors for object, started from top level"""
if hasattr(self, '_ancestors'):
return getattr(self, '_ancestors')
ancestors = []
name = self.name
object = self.object
while True:
items = get_ancestors(self.rules[name].proxy, object)
until = self.rules[name].backward_object(object)
items.reverse()
for item in items:
ancestors.insert(0, MenuItem(name, item))
if item == until:
break
method, object, name = self.rules[name].method, self.rules[name].backward_point(object), self.rules[name].inside
if name is None:
break
if method != 'replace':
ancestors.insert(0, MenuItem(name, object))
front_page = get_front_page(self.rules)
if front_page is not None:
if not ancestors or ancestors[0].object != front_page.object:
if (front_page.name, front_page.object) != (self.name, self.object):
self.front_paged_ancestors = True
ancestors.insert(0, front_page)
setattr(self, '_ancestors', ancestors)
return ancestors
def ancestors_for_menu(self):
"""
Returns ancestors for show_menu tags.
Ancestors will not contain front page and will contain object itself.
"""
ancestors = self.ancestors()
if self.front_paged_ancestors:
ancestors = ancestors[1:]
else:
ancestors = ancestors[:]
ancestors.append(self)
return ancestors
def children(self, lasy=False):
"""Returns children for object"""
if lasy:
field_name = '_children_lasy'
else:
field_name = '_children'
if hasattr(self, field_name):
return getattr(self, field_name)
children = []
for rule in self.rules[self.name].sequence:
point = rule.forward_point(self.object)
if rule.name == self.name:
children += [MenuItem(self.name, item) for item in get_children(
self.rules[self.name].proxy, self.object, lasy)
]
elif point is DoesNotDefined or point == self.object:
object = rule.forward_object(self.object)
if rule.method == 'insert' and not lasy:
children += [MenuItem(rule.name, object)]
elif rule.method == 'children':
children += [MenuItem(rule.name, item) for item in get_children(
rule.proxy, object, lasy)
]
setattr(self, field_name, children)
return children
| gpl-3.0 | 7,323,693,844,569,859,000 | 35.032727 | 124 | 0.598345 | false |
nirvn/QGIS | tests/src/python/test_provider_ogr_gpkg.py | 1 | 28594 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the OGR/GPKG provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-04-21'
__copyright__ = 'Copyright 2016, Even Rouault'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import shutil
import sys
import tempfile
import time
import qgis # NOQA
from osgeo import gdal, ogr
from qgis.core import (QgsFeature, QgsFieldConstraints, QgsGeometry,
QgsRectangle, QgsSettings, QgsVectorLayer,
QgsVectorLayerExporter, QgsPointXY)
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class ErrorReceiver():
def __init__(self):
self.msg = None
def receiveError(self, msg):
self.msg = msg
def count_opened_filedescriptors(filename_to_test):
count = -1
if sys.platform.startswith('linux'):
count = 0
open_files_dirname = '/proc/%d/fd' % os.getpid()
filenames = os.listdir(open_files_dirname)
for filename in filenames:
full_filename = open_files_dirname + '/' + filename
if os.path.exists(full_filename):
link = os.readlink(full_filename)
if os.path.basename(link) == os.path.basename(filename_to_test):
count += 1
return count
class TestPyQgsOGRProviderGpkg(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("TestPyQgsOGRProviderGpkg.com")
QCoreApplication.setApplicationName("TestPyQgsOGRProviderGpkg")
QgsSettings().clear()
start_app()
# Create test layer
cls.basetestpath = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
shutil.rmtree(cls.basetestpath, True)
QgsSettings().clear()
def testSingleToMultiPolygonPromotion(self):
tmpfile = os.path.join(self.basetestpath, 'testSingleToMultiPolygonPromotion.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
ds = None
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'test', 'ogr')
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [feat for feat in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('MultiPolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def testCurveGeometryType(self):
tmpfile = os.path.join(self.basetestpath, 'testCurveGeometryType.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbCurvePolygon)
ds = None
vl = QgsVectorLayer('{}'.format(tmpfile), 'test', 'ogr')
self.assertEqual(vl.dataProvider().subLayers(), ['0:test:0:CurvePolygon:geom'])
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [feat for feat in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('CurvePolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def internalTestBug15351(self, orderClosing):
tmpfile = os.path.join(self.basetestpath, 'testBug15351.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('Point (3 50)')))
# Iterate over features (will open a new OGR connection), but do not
# close the iterator for now
it = vl.getFeatures()
f = QgsFeature()
it.nextFeature(f)
if orderClosing == 'closeIter_commit_closeProvider':
it = None
# Commit changes
cbk = ErrorReceiver()
vl.dataProvider().raiseError.connect(cbk.receiveError)
self.assertTrue(vl.commitChanges())
self.assertIsNone(cbk.msg)
# Close layer and iterator in different orders
if orderClosing == 'closeIter_commit_closeProvider':
vl = None
elif orderClosing == 'commit_closeProvider_closeIter':
vl = None
it = None
else:
assert orderClosing == 'commit_closeIter_closeProvider'
it = None
vl = None
# Test that we succeeded restoring default journal mode, and we
# are not let in WAL mode.
ds = ogr.Open(tmpfile)
lyr = ds.ExecuteSQL('PRAGMA journal_mode')
f = lyr.GetNextFeature()
res = f.GetField(0)
ds.ReleaseResultSet(lyr)
ds = None
self.assertEqual(res, 'delete')
# We need GDAL 2.0 to issue PRAGMA journal_mode
# Note: for that case, we don't strictly need turning on WAL
def testBug15351_closeIter_commit_closeProvider(self):
self.internalTestBug15351('closeIter_commit_closeProvider')
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_commit_closeProvider_closeIter(self):
self.internalTestBug15351('commit_closeProvider_closeIter')
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_commit_closeIter_closeProvider(self):
self.internalTestBug15351('commit_closeIter_closeProvider')
@unittest.skip(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 1, 2))
def testGeopackageExtentUpdate(self):
''' test https://issues.qgis.org/issues/15273 '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageExtentUpdate.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 1)'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 0.5)'))
lyr.CreateFeature(f)
f = None
gdal.ErrorReset()
ds.ExecuteSQL('RECOMPUTE EXTENT ON test')
has_error = gdal.GetLastErrorMsg() != ''
ds = None
if has_error:
print('Too old GDAL trunk version. Please update')
return
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
# Test moving a geometry that touches the bbox
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('Point (0.5 0)')))
self.assertTrue(vl.commitChanges())
reference = QgsGeometry.fromRect(QgsRectangle(0.5, 0.0, 1.0, 1.0))
provider_extent = QgsGeometry.fromRect(vl.extent())
self.assertTrue(QgsGeometry.compare(provider_extent.asPolygon()[0], reference.asPolygon()[0], 0.00001),
provider_extent.asPolygon()[0])
# Test deleting a geometry that touches the bbox
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeature(2))
self.assertTrue(vl.commitChanges())
reference = QgsGeometry.fromRect(QgsRectangle(0.5, 0.0, 1.0, 0.5))
provider_extent = QgsGeometry.fromRect(vl.extent())
self.assertTrue(QgsGeometry.compare(provider_extent.asPolygon()[0], reference.asPolygon()[0], 0.00001),
provider_extent.asPolygon()[0])
def testSelectSubsetString(self):
tmpfile = os.path.join(self.basetestpath, 'testSelectSubsetString.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'baz'
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'test', 'ogr')
vl.setSubsetString("SELECT fid, foo FROM test WHERE foo = 'baz'")
got = [feat for feat in vl.getFeatures()]
self.assertEqual(len(got), 1)
def testStyle(self):
# First test with invalid URI
vl = QgsVectorLayer('/idont/exist.gpkg', 'test', 'ogr')
self.assertFalse(vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, -1)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, success = vl.loadNamedStyle('/idont/exist.gpkg')
self.assertFalse(success)
errorMsg = vl.saveStyleToDatabase("name", "description", False, "")
self.assertNotEqual(errorMsg, "")
# Now with valid URI
tmpfile = os.path.join(self.basetestpath, 'testStyle.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('test2', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer('{}|layername=test'.format(tmpfile), 'test', 'ogr')
self.assertTrue(vl.isValid())
vl2 = QgsVectorLayer('{}|layername=test2'.format(tmpfile), 'test2', 'ogr')
self.assertTrue(vl2.isValid())
self.assertTrue(vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 0)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("not_existing")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, success = vl.loadNamedStyle('{}|layerid=0'.format(tmpfile))
self.assertFalse(success)
errorMsg = vl.saveStyleToDatabase("name", "description", False, "")
self.assertEqual(errorMsg, "")
qml, errmsg = vl.getStyleFromDatabase("not_existing")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description'])
qml, errmsg = vl.getStyleFromDatabase("100")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertTrue(qml.startswith('<!DOCTYPE qgis'), qml)
self.assertEqual(errmsg, "")
# Try overwrite it but simulate answer no
settings = QgsSettings()
settings.setValue("/qgis/overwriteStyle", False)
errorMsg = vl.saveStyleToDatabase("name", "description_bis", False, "")
self.assertNotEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description'])
# Try overwrite it and simulate answer yes
settings = QgsSettings()
settings.setValue("/qgis/overwriteStyle", True)
errorMsg = vl.saveStyleToDatabase("name", "description_bis", False, "")
self.assertEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description_bis'])
errorMsg = vl2.saveStyleToDatabase("name_test2", "description_test2", True, "")
self.assertEqual(errorMsg, "")
errorMsg = vl.saveStyleToDatabase("name2", "description2", True, "")
self.assertEqual(errorMsg, "")
errorMsg = vl.saveStyleToDatabase("name3", "description3", True, "")
self.assertEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 3)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1', '3', '4', '2'])
self.assertEqual(namelist, ['name', 'name2', 'name3', 'name_test2'])
self.assertEqual(desclist, ['description_bis', 'description2', 'description3', 'name_test2'])
# Check that layers_style table is not list in subLayers()
vl = QgsVectorLayer(tmpfile, 'test', 'ogr')
sublayers = vl.dataProvider().subLayers()
self.assertEqual(len(sublayers), 2, sublayers)
def testDisablewalForSqlite3(self):
''' Test disabling walForSqlite3 setting '''
QgsSettings().setValue("/qgis/walForSqlite3", False)
tmpfile = os.path.join(self.basetestpath, 'testDisablewalForSqlite3.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr0', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('attr1', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
# Test that we are using default delete mode and not WAL
ds = ogr.Open(tmpfile)
lyr = ds.ExecuteSQL('PRAGMA journal_mode')
f = lyr.GetNextFeature()
res = f.GetField(0)
ds.ReleaseResultSet(lyr)
ds = None
self.assertEqual(res, 'delete')
self.assertTrue(vl.startEditing())
feature = next(vl.getFeatures())
self.assertTrue(vl.changeAttributeValue(feature.id(), 1, 1001))
# Commit changes
cbk = ErrorReceiver()
vl.dataProvider().raiseError.connect(cbk.receiveError)
self.assertTrue(vl.commitChanges())
self.assertIsNone(cbk.msg)
vl = None
QgsSettings().setValue("/qgis/walForSqlite3", None)
def testSimulatedDBManagerImport(self):
uri = 'point?field=f1:int'
uri += '&field=f2:double(6,4)'
uri += '&field=f3:string(20)'
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
f = QgsFeature(lyr.fields())
f['f1'] = 1
f['f2'] = 123.456
f['f3'] = '12345678.90123456789'
f2 = QgsFeature(lyr.fields())
f2['f1'] = 2
lyr.dataProvider().addFeatures([f, f2])
tmpfile = os.path.join(self.basetestpath, 'testSimulatedDBManagerImport.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds = None
options = {}
options['update'] = True
options['driverName'] = 'GPKG'
options['layerName'] = 'my_out_table'
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
lyr = QgsVectorLayer(tmpfile + "|layername=my_out_table", "y", "ogr")
self.assertTrue(lyr.isValid())
features = lyr.getFeatures()
f = next(features)
self.assertEqual(f['f1'], 1)
self.assertEqual(f['f2'], 123.456)
self.assertEqual(f['f3'], '12345678.90123456789')
f = next(features)
self.assertEqual(f['f1'], 2)
features = None
# Test overwriting without overwrite option
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.ErrCreateDataSource)
# Test overwriting
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
f = QgsFeature(lyr.fields())
f['f1'] = 3
lyr.dataProvider().addFeatures([f])
options['overwrite'] = True
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
lyr = QgsVectorLayer(tmpfile + "|layername=my_out_table", "y", "ogr")
self.assertTrue(lyr.isValid())
features = lyr.getFeatures()
f = next(features)
self.assertEqual(f['f1'], 3)
features = None
def testGeopackageTwoLayerEdition(self):
''' test https://issues.qgis.org/issues/17034 '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageTwoLayerEdition.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('layer1', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('layer2', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 1)'))
lyr.CreateFeature(f)
f = None
ds = None
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer1', u'ogr')
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer2", u'layer2', u'ogr')
# Edit vl1, vl2 multiple times
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeGeometry(1, QgsGeometry.fromWkt('Point (2 2)')))
self.assertTrue(vl2.changeGeometry(1, QgsGeometry.fromWkt('Point (3 3)')))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeAttributeValue(1, 1, 100))
self.assertTrue(vl2.changeAttributeValue(1, 1, 101))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeGeometry(1, QgsGeometry.fromWkt('Point (4 4)')))
self.assertTrue(vl2.changeGeometry(1, QgsGeometry.fromWkt('Point (5 5)')))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
vl1 = None
vl2 = None
# Check everything is as expected after re-opening
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer1', u'ogr')
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer2", u'layer2', u'ogr')
got = [feat for feat in vl1.getFeatures()][0]
got_geom = got.geometry()
self.assertEqual(got['attr'], 100)
reference = QgsGeometry.fromWkt('Point (4 4)')
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
got = [feat for feat in vl2.getFeatures()][0]
got_geom = got.geometry()
self.assertEqual(got['attr'], 101)
reference = QgsGeometry.fromWkt('Point (5 5)')
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def testGeopackageManyLayers(self):
''' test opening more than 64 layers without running out of Spatialite connections '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageManyLayers.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
for i in range(70):
lyr = ds.CreateLayer('layer%d' % i, geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d 0)' % i))
lyr.CreateFeature(f)
f = None
ds = None
vl_tab = []
for i in range(70):
layername = 'layer%d' % i
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl.isValid())
vl_tab += [vl]
count = count_opened_filedescriptors(tmpfile)
if count > 0:
self.assertEqual(count, 1)
for i in range(70):
got = [feat for feat in vl.getFeatures()]
self.assertTrue(len(got) == 1)
# We shouldn't have more than 2 file handles opened:
# one shared by the QgsOgrProvider object
# one shared by the feature iterators
count = count_opened_filedescriptors(tmpfile)
if count > 0:
self.assertEqual(count, 2)
# Re-open an already opened layers. We should get a new handle
layername = 'layer%d' % 0
vl_extra0 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl_extra0.isValid())
countNew = count_opened_filedescriptors(tmpfile)
if countNew > 0:
self.assertLessEqual(countNew, 4) # for some reason we get 4 and not 3
layername = 'layer%d' % 1
vl_extra1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl_extra1.isValid())
countNew2 = count_opened_filedescriptors(tmpfile)
self.assertEqual(countNew2, countNew)
def testGeopackageRefreshIfTableListUpdated(self):
''' test that creating/deleting a layer is reflected when opening a new layer '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageRefreshIfTableListUpdated.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbPoint)
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.extent().isNull())
time.sleep(1) # so timestamp gets updated
ds = ogr.Open(tmpfile, update=1)
ds.CreateLayer('test2', geom_type=ogr.wkbPoint)
ds = None
vl2 = QgsVectorLayer(u'{}'.format(tmpfile), 'test', u'ogr')
vl2.subLayers()
self.assertEqual(vl2.dataProvider().subLayers(), ['0:test:0:Point:geom', '1:test2:0:Point:geom'])
def testGeopackageLargeFID(self):
tmpfile = os.path.join(self.basetestpath, 'testGeopackageLargeFID.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
f = QgsFeature()
f.setAttributes([1234567890123, None])
self.assertTrue(vl.startEditing())
self.assertTrue(vl.dataProvider().addFeatures([f]))
self.assertTrue(vl.commitChanges())
got = [feat for feat in vl.getFeatures()][0]
self.assertEqual(got['fid'], 1234567890123)
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1234567890123, QgsGeometry.fromWkt('Point (3 50)')))
self.assertTrue(vl.changeAttributeValue(1234567890123, 1, 'foo'))
self.assertTrue(vl.commitChanges())
got = [feat for feat in vl.getFeatures()][0]
self.assertEqual(got['str_field'], 'foo')
got_geom = got.geometry()
self.assertIsNotNone(got_geom)
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeature(1234567890123))
self.assertTrue(vl.commitChanges())
def test_AddFeatureNullFid(self):
"""Test gpkg feature with NULL fid can be added"""
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSplitFeatures.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
ds = None
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
# Check that pk field has unique constraint
fields = layer.fields()
pkfield = fields.at(0)
self.assertTrue(pkfield.constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
# Test add feature with default Fid (NULL)
layer.startEditing()
f = QgsFeature()
feat = QgsFeature(layer.fields())
feat.setGeometry(QgsGeometry.fromWkt('Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))'))
feat.setAttribute(1, 'test_value')
layer.addFeature(feat)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 1)
def test_SplitFeature(self):
"""Test gpkg feature can be split"""
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSplitFeatures.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
ds = None
# Split features
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))')
layer.startEditing()
self.assertEqual(layer.splitFeatures([QgsPointXY(0.5, 0), QgsPointXY(0.5, 1)], 0), 0)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 2)
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertEqual(layer.featureCount(), 2)
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0.5 0, 0.5 1, 1 1, 1 0, 0.5 0))')
self.assertEqual([f for f in layer.getFeatures()][1].geometry().asWkt(), 'Polygon ((0.5 1, 0.5 0, 0 0, 0 1, 0.5 1))')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 7,914,798,326,296,936,000 | 40.44058 | 128 | 0.627474 | false |
laurentb/weboob | weboob/core/ouiboube.py | 1 | 19192 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import os
from weboob.core.bcall import BackendsCall
from weboob.core.modules import ModulesLoader, RepositoryModulesLoader
from weboob.core.backendscfg import BackendsConfig
from weboob.core.requests import RequestsManager
from weboob.core.repositories import Repositories, PrintProgress
from weboob.core.scheduler import Scheduler
from weboob.tools.backend import Module
from weboob.tools.compat import basestring, unicode
from weboob.tools.config.iconfig import ConfigError
from weboob.tools.log import getLogger
from weboob.exceptions import ModuleLoadError
__all__ = ['WebNip', 'Weboob']
class VersionsMismatchError(ConfigError):
pass
class WebNip(object):
"""
Weboob in Non Integrated Programs
It provides methods to build backends or call methods on all loaded
backends.
You should use this class when you want to build an application
using Weboob as a library, without using the standard modules nor
the automatic module download and update machanism. When using
WebNip, you have to explicitely provide module paths and deal
yourself with backend configuration.
:param modules_path: path to directory containing modules.
:type modules_path: :class:`basestring`
:param storage: provide a storage where backends can save data
:type storage: :class:`weboob.tools.storage.IStorage`
:param scheduler: what scheduler to use; default is :class:`weboob.core.scheduler.Scheduler`
:type scheduler: :class:`weboob.core.scheduler.IScheduler`
"""
VERSION = '2.1'
def __init__(self, modules_path=None, storage=None, scheduler=None):
self.logger = getLogger('weboob')
self.backend_instances = {}
self.requests = RequestsManager()
if modules_path is None:
import pkg_resources
# Package weboob_modules is provided by
# https://git.weboob.org/weboob/modules
# and should be pip-installed separately.
# Note that Weboob users should rather install Weboob modules
# through https://updates.weboob.org/.
modules_path = pkg_resources.resource_filename('weboob_modules', '')
if modules_path:
self.modules_loader = ModulesLoader(modules_path, self.VERSION)
if scheduler is None:
scheduler = Scheduler()
self.scheduler = scheduler
self.storage = storage
def __deinit__(self):
self.deinit()
def deinit(self):
"""
Call this method when you stop using Weboob, to
properly unload all correctly.
"""
self.unload_backends()
def build_backend(self, module_name, params=None, storage=None, name=None, nofail=False, logger=None):
"""
Create a backend.
It does not load it into the Weboob object, so you are responsible for
deinitialization and calls.
:param module_name: name of module
:param params: parameters to give to backend
:type params: :class:`dict`
:param storage: storage to use
:type storage: :class:`weboob.tools.storage.IStorage`
:param name: name of backend
:type name: :class:`basestring`
:rtype: :class:`weboob.tools.backend.Module`
:param nofail: if true, this call can't fail
:type nofail: :class:`bool`
"""
module = self.modules_loader.get_or_load_module(module_name)
backend_instance = module.create_instance(self, name or module_name, params or {}, storage, nofail, logger=logger or self.logger)
return backend_instance
class LoadError(Exception):
"""
Raised when a backend is unabled to load.
:param backend_name: name of backend we can't load
:param exception: exception object
"""
def __init__(self, backend_name, exception):
super(WebNip.LoadError, self).__init__(unicode(exception))
self.backend_name = backend_name
def load_backend(self, module_name, name, params=None, storage=None):
"""
Load a backend.
:param module_name: name of module to load
:type module_name: :class:`basestring`:
:param name: name of instance
:type name: :class:`basestring`
:param params: parameters to give to backend
:type params: :class:`dict`
:param storage: storage to use
:type storage: :class:`weboob.tools.storage.IStorage`
:rtype: :class:`weboob.tools.backend.Module`
"""
if name is None:
name = module_name
if name in self.backend_instances:
raise self.LoadError(name, 'A loaded backend already named "%s"' % name)
backend = self.build_backend(module_name, params, storage, name)
self.backend_instances[name] = backend
return backend
def unload_backends(self, names=None):
"""
Unload backends.
:param names: if specified, only unload that backends
:type names: :class:`list`
"""
unloaded = {}
if isinstance(names, basestring):
names = [names]
elif names is None:
names = list(self.backend_instances.keys())
for name in names:
backend = self.backend_instances.pop(name)
with backend:
backend.deinit()
unloaded[backend.name] = backend
return unloaded
def __getitem__(self, name):
"""
Alias for :func:`WebNip.get_backend`.
"""
return self.get_backend(name)
def get_backend(self, name, **kwargs):
"""
Get a backend from its name.
:param name: name of backend to get
:type name: str
:param default: if specified, get this value when the backend is not found
:type default: whatever you want
:raises: :class:`KeyError` if not found.
"""
try:
return self.backend_instances[name]
except KeyError:
if 'default' in kwargs:
return kwargs['default']
else:
raise
def count_backends(self):
"""
Get number of loaded backends.
"""
return len(self.backend_instances)
def iter_backends(self, caps=None, module=None):
"""
Iter on each backends.
Note: each backend is locked when it is returned.
:param caps: optional list of capabilities to select backends
:type caps: tuple[:class:`weboob.capabilities.base.Capability`]
:param module: optional name of module
:type module: :class:`basestring`
:rtype: iter[:class:`weboob.tools.backend.Module`]
"""
for _, backend in sorted(self.backend_instances.items()):
if (caps is None or backend.has_caps(caps)) and \
(module is None or backend.NAME == module):
with backend:
yield backend
def __getattr__(self, name):
def caller(*args, **kwargs):
return self.do(name, *args, **kwargs)
return caller
def do(self, function, *args, **kwargs):
r"""
Do calls on loaded backends with specified arguments, in separated
threads.
This function has two modes:
- If *function* is a string, it calls the method with this name on
each backends with the specified arguments;
- If *function* is a callable, it calls it in a separated thread with
the locked backend instance at first arguments, and \*args and
\*\*kwargs.
:param function: backend's method name, or a callable object
:type function: :class:`str`
:param backends: list of backends to iterate on
:type backends: list[:class:`str`]
:param caps: iterate on backends which implement this caps
:type caps: list[:class:`weboob.capabilities.base.Capability`]
:rtype: A :class:`weboob.core.bcall.BackendsCall` object (iterable)
"""
backends = list(self.backend_instances.values())
_backends = kwargs.pop('backends', None)
if _backends is not None:
if isinstance(_backends, Module):
backends = [_backends]
elif isinstance(_backends, basestring):
if len(_backends) > 0:
try:
backends = [self.backend_instances[_backends]]
except (ValueError, KeyError):
backends = []
elif isinstance(_backends, (list, tuple, set)):
backends = []
for backend in _backends:
if isinstance(backend, basestring):
try:
backends.append(self.backend_instances[backend])
except (ValueError, KeyError):
pass
else:
backends.append(backend)
else:
self.logger.warning(u'The "backends" value isn\'t supported: %r', _backends)
if 'caps' in kwargs:
caps = kwargs.pop('caps')
backends = [backend for backend in backends if backend.has_caps(caps)]
# The return value MUST BE the BackendsCall instance. Please never iterate
# here on this object, because caller might want to use other methods, like
# wait() on callback_thread().
# Thanks a lot.
return BackendsCall(backends, function, *args, **kwargs)
def schedule(self, interval, function, *args):
"""
Schedule an event.
:param interval: delay before calling the function
:type interval: int
:param function: function to call
:type function: callabale
:param args: arguments to give to function
:returns: an event identificator
"""
return self.scheduler.schedule(interval, function, *args)
def repeat(self, interval, function, *args):
"""
Repeat a call to a function
:param interval: interval between two calls
:type interval: int
:param function: function to call
:type function: callable
:param args: arguments to give to function
:returns: an event identificator
"""
return self.scheduler.repeat(interval, function, *args)
def cancel(self, ev):
"""
Cancel an event
:param ev: the event identificator
"""
return self.scheduler.cancel(ev)
def want_stop(self):
"""
Plan to stop the scheduler.
"""
return self.scheduler.want_stop()
def loop(self):
"""
Run the scheduler loop
"""
return self.scheduler.run()
def load_or_install_module(self, module_name):
""" Load a backend, but can't install it """
return self.modules_loader.get_or_load_module(module_name)
class Weboob(WebNip):
"""
The main class of Weboob, used to manage backends, modules repositories and
call methods on all loaded backends.
:param workdir: optional parameter to set path of the working directory
:type workdir: str
:param datadir: optional parameter to set path of the data directory
:type datadir: str
:param backends_filename: name of the *backends* file, where configuration of
backends is stored
:type backends_filename: str
:param storage: provide a storage where backends can save data
:type storage: :class:`weboob.tools.storage.IStorage`
"""
BACKENDS_FILENAME = 'backends'
def __init__(self, workdir=None, datadir=None, backends_filename=None, scheduler=None, storage=None):
super(Weboob, self).__init__(modules_path=False, scheduler=scheduler, storage=storage)
# Create WORKDIR
if workdir is None:
if 'WEBOOB_WORKDIR' in os.environ:
workdir = os.environ['WEBOOB_WORKDIR']
else:
workdir = os.path.join(os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config')), 'weboob')
self.workdir = os.path.realpath(workdir)
self._create_dir(workdir)
# Create DATADIR
if datadir is None:
if 'WEBOOB_DATADIR' in os.environ:
datadir = os.environ['WEBOOB_DATADIR']
elif 'WEBOOB_WORKDIR' in os.environ:
datadir = os.environ['WEBOOB_WORKDIR']
else:
datadir = os.path.join(os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share')), 'weboob')
_datadir = os.path.realpath(datadir)
self._create_dir(_datadir)
# Modules management
self.repositories = Repositories(workdir, _datadir, self.VERSION)
self.modules_loader = RepositoryModulesLoader(self.repositories)
# Backend instances config
if not backends_filename:
backends_filename = os.environ.get('WEBOOB_BACKENDS', os.path.join(self.workdir, self.BACKENDS_FILENAME))
elif not backends_filename.startswith('/'):
backends_filename = os.path.join(self.workdir, backends_filename)
self.backends_config = BackendsConfig(backends_filename)
def _create_dir(self, name):
if not os.path.exists(name):
os.makedirs(name)
elif not os.path.isdir(name):
self.logger.error(u'"%s" is not a directory', name)
def update(self, progress=PrintProgress()):
"""
Update modules from repositories.
"""
self.repositories.update(progress)
modules_to_check = set([module_name for _, module_name, _ in self.backends_config.iter_backends()])
for module_name in modules_to_check:
minfo = self.repositories.get_module_info(module_name)
if minfo and not minfo.is_installed():
self.repositories.install(minfo, progress)
def build_backend(self, module_name, params=None, storage=None, name=None, nofail=False):
"""
Create a single backend which is not listed in configuration.
:param module_name: name of module
:param params: parameters to give to backend
:type params: :class:`dict`
:param storage: storage to use
:type storage: :class:`weboob.tools.storage.IStorage`
:param name: name of backend
:type name: :class:`basestring`
:rtype: :class:`weboob.tools.backend.Module`
:param nofail: if true, this call can't fail
:type nofail: :class:`bool`
"""
minfo = self.repositories.get_module_info(module_name)
if minfo is None:
raise ModuleLoadError(module_name, 'Module does not exist.')
if not minfo.is_installed():
self.repositories.install(minfo)
return super(Weboob, self).build_backend(module_name, params, storage, name, nofail)
def load_backends(self, caps=None, names=None, modules=None, exclude=None, storage=None, errors=None):
"""
Load backends listed in config file.
:param caps: load backends which implement all of specified caps
:type caps: tuple[:class:`weboob.capabilities.base.Capability`]
:param names: load backends in list
:type names: tuple[:class:`str`]
:param modules: load backends which module is in list
:type modules: tuple[:class:`str`]
:param exclude: do not load backends in list
:type exclude: tuple[:class:`str`]
:param storage: use this storage if specified
:type storage: :class:`weboob.tools.storage.IStorage`
:param errors: if specified, store every errors in this list
:type errors: list[:class:`LoadError`]
:returns: loaded backends
:rtype: dict[:class:`str`, :class:`weboob.tools.backend.Module`]
"""
loaded = {}
if storage is None:
storage = self.storage
if not self.repositories.check_repositories():
self.logger.error(u'Repositories are not consistent with the sources.list')
raise VersionsMismatchError(u'Versions mismatch, please run "weboob-config update"')
for backend_name, module_name, params in self.backends_config.iter_backends():
if '_enabled' in params and not params['_enabled'].lower() in ('1', 'y', 'true', 'on', 'yes') or \
names is not None and backend_name not in names or \
modules is not None and module_name not in modules or \
exclude is not None and backend_name in exclude:
continue
minfo = self.repositories.get_module_info(module_name)
if minfo is None:
self.logger.warning(u'Backend "%s" is referenced in %s but was not found. '
u'Perhaps a missing repository or a removed module?', module_name, self.backends_config.confpath)
continue
if caps is not None and not minfo.has_caps(caps):
continue
if not minfo.is_installed():
self.repositories.install(minfo)
module = None
try:
module = self.modules_loader.get_or_load_module(module_name)
except ModuleLoadError as e:
self.logger.error(u'Unable to load module "%s": %s', module_name, e)
continue
if backend_name in self.backend_instances:
self.logger.warning(u'Oops, the backend "%s" is already loaded. Unload it before reloading...', backend_name)
self.unload_backends(backend_name)
try:
backend_instance = module.create_instance(self, backend_name, params, storage)
except Module.ConfigError as e:
if errors is not None:
errors.append(self.LoadError(backend_name, e))
else:
self.backend_instances[backend_name] = loaded[backend_name] = backend_instance
return loaded
def load_or_install_module(self, module_name):
""" Load a backend, and install it if not done before """
try:
return self.modules_loader.get_or_load_module(module_name)
except ModuleLoadError:
self.repositories.install(module_name)
return self.modules_loader.get_or_load_module(module_name)
| lgpl-3.0 | 7,523,653,314,922,785,000 | 37.079365 | 139 | 0.616298 | false |
samuel-phan/mssh-copy-id | msshcopyid/cli.py | 1 | 9466 | from __future__ import unicode_literals
import argparse
import datetime
import logging
import os
import socket
import sys
import traceback
import paramiko
import msshcopyid
from msshcopyid.constants import DEFAULT_KNOWN_HOSTS
from msshcopyid.constants import DEFAULT_SSH_DSA
from msshcopyid.constants import DEFAULT_SSH_RSA
from msshcopyid.errors import CopySSHKeyError, CopySSHKeysError
from msshcopyid.log import format_exception, format_error
from msshcopyid import utils
logger = logging.getLogger(__name__)
def main():
start_dt = datetime.datetime.now()
mc = Main()
mc.init()
try:
mc.run()
rc = 0
except:
rc = 1
logger.debug('Elapsed time: %s', datetime.datetime.now() - start_dt)
sys.exit(rc)
class Main(object):
def __init__(self):
self.args = None
self.hosts = None
self.ssh_config = None
self.sshcopyid = None
def init(self, argv=sys.argv):
# Parse input arguments
parser = self.get_parser()
self.args = parser.parse_args(argv[1:])
# Init logging
self.init_log(self.args.verbose)
# Check input arguments
self.check_ssh_key_exists()
self.check_add_remove_options_exclusion()
# Get the password
default_password = self.args.password or utils.get_password(from_stdin_only=True)
# Load ~/.ssh/config if it exists
self.ssh_config = utils.load_ssh_config()
# Init `SSHCopyId` object
self.sshcopyid = msshcopyid.SSHCopyId(priv_key=self.args.identity, ssh_config=self.ssh_config,
default_password=default_password)
# Parse the hosts to extract the username if given
self.hosts = utils.parse_hosts(self.args.hosts, ssh_port=self.args.port, ssh_config=self.ssh_config)
def init_log(self, verbose):
root_logger = logging.getLogger()
sh = logging.StreamHandler()
root_logger.addHandler(sh)
if verbose:
sh.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] [%(name)s] %(message)s"))
root_logger.setLevel(logging.DEBUG)
else:
sh.setFormatter(logging.Formatter('%(message)s'))
root_logger.setLevel(logging.INFO)
paramiko_logger = logging.getLogger('paramiko')
paramiko_logger.setLevel(logging.ERROR)
def check_ssh_key_exists(self):
error_msg = None
if not self.args.identity:
if os.path.exists(DEFAULT_SSH_RSA):
self.args.identity = DEFAULT_SSH_RSA
elif os.path.exists(DEFAULT_SSH_DSA):
self.args.identity = DEFAULT_SSH_DSA
else:
error_msg = 'Cannot find any SSH keys "{0}" and "{1}".'.format(DEFAULT_SSH_RSA, DEFAULT_SSH_DSA)
elif not os.path.exists(self.args.identity):
error_msg = 'Cannot find the SSH key "{0}".'.format(self.args.identity)
if error_msg:
logger.error(format_error(error_msg))
sys.exit(1)
else:
logger.debug('Found SSH key: %s', self.args.identity)
def check_add_remove_options_exclusion(self):
if self.args.add and self.args.remove:
logger.error(format_error('argument -a/--add not allowed with argument -r/--remove.'))
sys.exit(1)
def get_parser(self):
parser = argparse.ArgumentParser(description='Copy SSH keys to multiple servers.')
parser.add_argument('hosts', metavar='host', nargs='+',
help='the remote hosts to copy the keys to. Syntax: [user@]hostname')
parser.add_argument('-k', '--known-hosts', default=DEFAULT_KNOWN_HOSTS,
help='the known_hosts file to use. Default: ~/.ssh/known_hosts')
parser.add_argument('-n', '--dry', action='store_true', help='do a dry run. Do not change anything')
parser.add_argument('-v', '--verbose', action='store_true', help='enable verbose mode.')
parser.add_argument('--version', action='version', version=msshcopyid.__version__)
copy_group = parser.add_argument_group('Copy SSH keys')
copy_group.add_argument('-A', '--no-add-host', action='store_true',
help='don\'t add automatically new hosts into "known_hosts" file')
copy_group.add_argument('-c', '--clear', action='store_true',
help='clear the hosts from the "known_hosts" file before copying the SSH keys')
copy_group.add_argument('-i', '--identity', help='the SSH identity file. Default: {0} or {1}'
.format(DEFAULT_SSH_RSA, DEFAULT_SSH_DSA))
copy_group.add_argument('-p', '--port', type=int, help='the SSH port for the remote hosts')
copy_group.add_argument('-P', '--password',
help='the password to log into the remote hosts. It is NOT SECURED to set the '
'password that way, since it stays in the bash history. Password can also be sent '
'on the STDIN.')
known_host_group = parser.add_argument_group('Manage the "known_host" file only')
known_host_group.add_argument('-a', '--add', action='store_true',
help='don\'t copy the SSH keys, but instead, add the given hosts to the '
'"known_hosts" file')
known_host_group.add_argument('-r', '--remove', action='store_true',
help='don\'t copy the SSH keys, but instead, remove the given hosts from the '
'"known_hosts" file')
return parser
def run(self):
# Check dry run
if self.args.dry:
logger.info('Dry run: nothing will be changed.')
# Check the action to perform
if self.args.add or self.args.remove:
# Action on the known_hosts file
# Check that known_hosts file exists
if not os.path.exists(self.args.known_hosts):
with open(self.args.known_hosts, 'w'):
pass
if self.args.add:
self.sshcopyid.add_to_known_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
else:
self.sshcopyid.remove_from_known_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
else:
# Copy the SSH keys to the hosts
if self.args.clear:
# Clear the hosts from the known_hosts file
self.sshcopyid.remove_from_known_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
# Read the public key
if not self.sshcopyid.pub_key_content:
self.sshcopyid.read_pub_key()
try:
self.copy_ssh_keys_to_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
except CopySSHKeysError as ex:
logger.error(format_error(format_exception(ex)))
raise
def copy_ssh_keys_to_hosts(self, hosts, known_hosts=DEFAULT_KNOWN_HOSTS, dry=False):
"""
Copy the SSH keys to the given hosts.
:param hosts: the list of `Host` objects to copy the SSH keys to.
:param known_hosts: the `known_hosts` file to store the SSH public keys.
:param dry: perform a dry run.
:raise msshcopyid.errors.CopySSHKeysError:
"""
exceptions = [] # list of `CopySSHKeyError`
for host in hosts:
logger.info('[%s] Copy the SSH public key [%s]...', host.hostname, self.sshcopyid.pub_key)
if not dry:
try:
self.copy_ssh_keys_to_host(host, known_hosts=known_hosts)
except (paramiko.ssh_exception.SSHException, socket.error) as ex:
logger.error(format_error(format_exception(ex)))
logger.debug(traceback.format_exc())
exceptions.append(CopySSHKeyError(host=host, exception=ex))
if exceptions:
raise CopySSHKeysError(exceptions=exceptions)
def copy_ssh_keys_to_host(self, host, known_hosts=DEFAULT_KNOWN_HOSTS):
"""
Copy the SSH keys to the given host.
:param host: the `Host` object to copy the SSH keys to.
:param known_hosts: the `known_hosts` file to store the SSH public keys.
:raise paramiko.ssh_exception.AuthenticationException:
"""
password = host.password or self.sshcopyid.default_password
try:
self.sshcopyid.copy_ssh_keys_to_host(host, password=password, no_add_host=self.args.no_add_host,
known_hosts=known_hosts)
except paramiko.ssh_exception.AuthenticationException:
if password:
# A password was given, and it is wrong
raise
else:
# Ask for password
password = utils.get_password()
self.sshcopyid.default_password = password
# Try to connect again
self.sshcopyid.copy_ssh_keys_to_host(host, password=password, no_add_host=self.args.no_add_host,
known_hosts=known_hosts)
| mit | -8,442,804,895,174,885,000 | 41.63964 | 120 | 0.584196 | false |
oneraghavan/portcache | setup.py | 1 | 2150 | from setuptools import setup
setup(name='portcache',
version='0.3 ',
description='A simple cache for port from remote service',
url='https://github.com/oneraghavan/portcache',
author='Raghavan',
author_email='[email protected]',
license='MIT',
packages=['portcache'],
install_requires=[
'web.py', 'PyYAML' , 'requests'
],
zip_safe=False,
entry_points={
'console_scripts': ['portcache=portcache.command_line:main'],
})
print "___________________________________"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "| Succesfully installed portcache |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|_________________________________|"
print "\nportcache is a cache for remote calls . In microservices world, we have to work with lots of services which are needed to run our service and \n" \
"its a pain if the list of these service list grows big .portcache gives you the ability to point to a remote service instance and also cache \n" \
"the responses for you calls.\n\n" \
"To start : portcache <config yml file> \n\n" \
"The config file requires three params localport , remote , cache_file .\n" \
"localport - The port you want to run your cache service . you will point your dependent app/service to this port \n" \
"remote - The remote url with port that corresponds to the service you would like to cache \n" \
"cache_file - The location of the cache you want to save \n\n" \
"A sample config yml file looks like this \n\n" \
"localport: 9090 \n" \
"remote: http://myremoteserviceurl.com \n" \
"cache_file: \"/data/tmp/merch \n\n" \
"Starting with this config file, starts a server at port 9090.Whenever a request comes to the localhost:9090, it \n" \
"will check if this request has been already cached ,if yes then it will serve from cache file, else it will call \n" \
"the http://myremoteserviceurl.com with the request, cache and return the response"
| mit | 51,269,814,623,588,696 | 50.190476 | 156 | 0.596279 | false |
phil65/KodiDevKit | script.py | 1 | 3245 | # -*- coding: utf8 -*-
# Copyright (C) 2017 - Philipp Temminghoff <[email protected]>
# This program is Free Software see LICENSE file for details
import os
import sys
import codecs
import logging
RESULTS_FILE = "results.txt"
settings = {"kodi_path": "C:/Kodi",
"portable_mode": True,
"language_folders": ["resource.language.en_gb", "English"]}
def check_tags(check_type):
"""
triggers of test of type "check_type", then formats and logs them
"""
errors = INFOS.get_check_listitems(check_type)
for e in errors:
logging.info(e["message"])
path = "/".join(e["file"].split(os.sep)[-2:])
logging.info("%s: line %s\n" % (path, e["line"]))
if __name__ == "__main__":
from libs import utils
from libs.infoprovider import InfoProvider
from libs import chardet
from libs.eol import eol
INFOS = InfoProvider()
open(RESULTS_FILE, 'w').close()
INFOS.load_settings(settings)
INFOS.load_data()
filehandler = logging.FileHandler("result.txt", mode="w")
formatter = logging.Formatter('%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
filehandler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(filehandler)
project_folder = sys.argv[1] if len(sys.argv) >= 2 else input("Enter Path to skin: ")
INFOS.init_addon(project_folder)
if len(sys.argv) < 3:
repo = input('Enter Kodi version (%s): ' % " / ".join([item["name"] for item in INFOS.addon.RELEASES]))
else:
repo = sys.argv[2]
INFOS.check_xml_files()
for path in INFOS.addon.get_xml_files():
if utils.check_bom(path):
logging.info("found BOM. File: " + path)
try:
with codecs.open(path, "rb", encoding='utf-8', errors="strict") as f:
text = f.read()
except Exception:
logging.info("Error when trying to read %s as UTF-8" % path)
with codecs.open(path, "rb", errors="ignore") as f:
rawdata = f.read()
encoding = chardet.detect(rawdata)
logging.info("detected encoding: %s" % encoding["encoding"])
with codecs.open(path, "rb", encoding=encoding["encoding"]) as f:
text = f.read()
result = eol.eol_info_from_path_patterns([project_folder],
recursive=True,
includes=[],
excludes=['.svn', '.git'])
for item in result:
if item[1] == '\n' or None:
continue
elif item[1] == '\r':
logging.info("MAC Line Endings detected in " + item[0])
else:
logging.info("Windows Line Endings detected in " + item[0])
logging.info("ADDON DEPENDENCY CHECK")
INFOS.check_dependencies()
logging.info("INCLUDE CHECK")
check_tags("include")
logging.info("VARIABLE CHECK")
check_tags("variable")
logging.info("FONT CHECK")
check_tags("font")
logging.info("LABEL CHECK")
check_tags("label")
logging.info("ID CHECK")
check_tags("id")
logging.info("CHECK FOR COMMON MISTAKES")
check_tags("general")
| gpl-3.0 | -1,780,904,815,994,852,400 | 35.460674 | 111 | 0.572265 | false |
Abjad/abjad | tests/test_NumberedPitchClass.py | 1 | 1452 | import typing
import pytest
import abjad
values: typing.List[typing.Tuple] = []
values.extend((x / 2, (x / 2) % 12) for x in range(-48, 49))
values.extend(
[
("bf,", 10),
("c'", 0),
("cs'", 1),
("gff''", 5),
("", 0),
("dss,,", 4),
("fake", ValueError),
(("bf", 2), 10),
(("c", 4), 0),
(("cs", 4), 1),
(("dss", 1), 4),
(("gff", 5), 5),
(abjad.NamedPitch("bs'"), 0),
(abjad.NamedPitch("c"), 0),
(abjad.NamedPitch("cf,"), 11),
(abjad.NamedPitch(), 0),
(abjad.NamedPitchClass("cs'"), 1),
(abjad.NamedPitchClass("c"), 0),
(abjad.NamedPitchClass("cf,"), 11),
(None, 0),
(abjad.NumberedPitch("bs'"), 0),
(abjad.NumberedPitch("c"), 0),
(abjad.NumberedPitch("cf,"), 11),
(abjad.NumberedPitch(), 0),
(abjad.NumberedPitchClass("bs'"), 0),
(abjad.NumberedPitchClass("c"), 0),
(abjad.NumberedPitchClass("cf,"), 11),
]
)
@pytest.mark.parametrize("input_, expected_semitones", values)
def test_init(input_, expected_semitones):
if isinstance(expected_semitones, type) and issubclass(
expected_semitones, Exception
):
with pytest.raises(expected_semitones):
abjad.NumberedPitchClass(input_)
return
instance = abjad.NumberedPitchClass(input_)
assert float(instance) == expected_semitones
| gpl-3.0 | -7,251,316,322,670,107,000 | 27.470588 | 62 | 0.522039 | false |
seecr/meresco-solr | meresco/solr/fields2solrdoc.py | 1 | 2921 | ## begin license ##
#
# "Meresco Solr" is a set of components and tools
# to integrate Solr into "Meresco."
#
# Copyright (C) 2011-2013 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2012 SURF http://www.surf.nl
# Copyright (C) 2012-2013 Stichting Kennisnet http://www.kennisnet.nl
#
# This file is part of "Meresco Solr"
#
# "Meresco Solr" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Solr" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Solr"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Observable
from xml.sax.saxutils import escape as escapeXml
from itertools import chain
class Fields2SolrDoc(Observable):
def __init__(self, transactionName, partname="solr", singularValueFields=None, isSingularValueField=None):
Observable.__init__(self)
self._transactionName = transactionName
self._partname = partname
if singularValueFields and isSingularValueField:
raise ValueError("Use either 'singularValueFields' or 'isSingularValueField'")
self._isSingularValueField = isSingularValueField
if singularValueFields:
singularValueFields = set(singularValueFields)
self._isSingularValueField = lambda name: name in singularValueFields
def begin(self, name):
if name != self._transactionName:
return
tx = self.ctx.tx
tx.join(self)
def addField(self, name, value):
tx = self.ctx.tx
valueList = tx.objectScope(self).setdefault(name, [])
if not self._isSingularValueField is None:
if len(valueList) == 1 and self._isSingularValueField(name):
return
valueList.append(value)
def commit(self, id):
tx = self.ctx.tx
fields = tx.objectScope(self)
if not fields:
return
recordIdentifier = tx.locals["id"]
specialFields = [
('__id__', recordIdentifier),
]
def fieldStatement(key, value):
return '<field name="%s">%s</field>' % (escapeXml(key), escapeXml(value))
allFields = ((k, v) for k, vs in fields.items() for v in vs)
xml = "<doc xmlns=''>%s</doc>" % ''.join(fieldStatement(*args) for args in chain(iter(specialFields), allFields))
yield self.all.add(identifier=recordIdentifier, partname=self._partname, data=xml)
| gpl-2.0 | 5,766,375,609,882,647,000 | 40.140845 | 121 | 0.6734 | false |
lasote/conan | conans/client/cmd/new.py | 1 | 9249 | import re
from conans.errors import ConanException
from conans.model.ref import ConanFileReference
from conans.client.cmd.new_ci import ci_get_files
conanfile = """from conans import ConanFile, CMake, tools
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
def source(self):
self.run("git clone https://github.com/memsharded/hello.git")
self.run("cd hello && git checkout static_shared")
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
tools.replace_in_file("hello/CMakeLists.txt", "PROJECT(MyHello)", '''PROJECT(MyHello)
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.configure(source_dir="%s/hello" % self.source_folder)
cmake.build()
# Explicit way:
# self.run('cmake %s/hello %s' % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*hello.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_bare = """from conans import ConanFile
from conans import tools
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
settings = "os", "compiler", "build_type", "arch"
description = "<Description of {package_name} here>"
url = "None"
license = "None"
def package(self):
self.copy("*")
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
"""
conanfile_sources = """from conans import ConanFile, CMake
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
exports_sources = "src/*"
def build(self):
cmake = CMake(self)
cmake.configure(source_dir="%s/src" % self.source_folder)
cmake.build()
# Explicit way:
# self.run('cmake %s/src %s' % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_header = """from conans import ConanFile, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
# No settings/options are necessary, this is header only
def source(self):
'''retrieval of the source code here. Remember you can also put the code in the folder and
use exports instead of retrieving it with this source() method
'''
#self.run("git clone ...") or
#tools.download("url", "file.zip")
#tools.unzip("file.zip" )
def package(self):
self.copy("*.h", "include")
"""
test_conanfile = """from conans import ConanFile, CMake
import os
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is in "test_package"
cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
"""
test_cmake = """project(PackageTest CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_executable(example example.cpp)
target_link_libraries(example ${CONAN_LIBS})
# CTest is a testing tool that can be used to test your project.
# enable_testing()
# add_test(NAME example
# WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin
# COMMAND example)
"""
test_main = """#include <iostream>
#include "hello.h"
int main() {
hello();
}
"""
hello_h = """#pragma once
#ifdef WIN32
#define HELLO_EXPORT __declspec(dllexport)
#else
#define HELLO_EXPORT
#endif
HELLO_EXPORT void hello();
"""
hello_cpp = """#include <iostream>
#include "hello.h"
void hello(){
#ifdef NDEBUG
std::cout << "Hello World Release!" <<std::endl;
#else
std::cout << "Hello World Debug!" <<std::endl;
#endif
}
"""
cmake = """project(MyHello CXX)
cmake_minimum_required(VERSION 2.8)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_library(hello hello.cpp)
"""
gitignore_template = """
*.pyc
test_package/build
"""
def cmd_new(ref, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None, osx_clang_versions=None,
shared=None, upload_url=None, gitignore=None, gitlab_gcc_versions=None, gitlab_clang_versions=None):
try:
tokens = ref.split("@")
name, version = tokens[0].split("/")
if len(tokens) == 2:
user, channel = tokens[1].split("/")
else:
user, channel = "user", "channel"
pattern = re.compile('[\W_]+')
package_name = pattern.sub('', name).capitalize()
except ValueError:
raise ConanException("Bad parameter, please use full package name,"
"e.g: MyLib/1.2.3@user/testing")
# Validate it is a valid reference
ConanFileReference(name, version, user, channel)
if header and exports_sources:
raise ConanException("'header' and 'sources' are incompatible options")
if pure_c and (header or exports_sources):
raise ConanException("'pure_c' is incompatible with 'header' and 'sources'")
if bare and (header or exports_sources):
raise ConanException("'bare' is incompatible with 'header' and 'sources'")
if header:
files = {"conanfile.py": conanfile_header.format(name=name, version=version,
package_name=package_name)}
elif exports_sources:
files = {"conanfile.py": conanfile_sources.format(name=name, version=version,
package_name=package_name),
"src/hello.cpp": hello_cpp,
"src/hello.h": hello_h,
"src/CMakeLists.txt": cmake}
elif bare:
files = {"conanfile.py": conanfile_bare.format(name=name, version=version,
package_name=package_name)}
else:
files = {"conanfile.py": conanfile.format(name=name, version=version,
package_name=package_name)}
if pure_c:
config = "\n def configure(self):\n del self.settings.compiler.libcxx"
files["conanfile.py"] = files["conanfile.py"] + config
if test:
files["test_package/conanfile.py"] = test_conanfile.format(name=name, version=version,
user=user, channel=channel,
package_name=package_name)
files["test_package/CMakeLists.txt"] = test_cmake
files["test_package/example.cpp"] = test_main
if gitignore:
files[".gitignore"] = gitignore_template
files.update(ci_get_files(name, version, user, channel, visual_versions,
linux_gcc_versions, linux_clang_versions,
osx_clang_versions, shared, upload_url,
gitlab_gcc_versions, gitlab_clang_versions))
return files
| mit | -2,767,009,680,822,622,700 | 32.51087 | 112 | 0.601362 | false |
grschafer/BejeweledBot | train/agent.py | 1 | 2406 | from pybrain.rl.agents.logging import LoggingAgent
from pybrain.rl.agents.learning import LearningAgent
from scipy import where
from random import choice
class BejeweledAgent(LearningAgent):
def getAction(self):
# get best action for every state observation
# overlay all action values for every state observation, pick best
LoggingAgent.getAction(self)
# for each color, get best action, then pick highest-value action
# among those actions
actions = []
values = []
# TODO: why are same values printed many times in a row here?
#print '========== in agent =========='
#print 'states:', [[i] for i in self.lastobs.flatten()]
for state in self.lastobs:
#print 'state:', state
actions.append(self.module.activate(state))
values.append(self.module.lastMaxActionValue)
#self.module.printState(state)
#print ' best:', actions[-1], 'value:', values[-1]
actionIdx = where(values == max(values))[0]
ch = choice(actionIdx)
self.lastaction = actions[ch]
self.bestState = self.lastobs[ch]
#print 'assigning reward to state', self.bestState
#print 'chosen action:', self.lastaction, 'value:', max(values)
# add a chance to pick a random other action
if self.learning:
self.lastaction = self.learner.explore(self.lastobs, self.lastaction)
#print 'after explorer:', self.lastaction
#print '============= end ============'
return self.lastaction
def giveReward(self, r):
"""Step 3: store observation, action and reward in the history dataset. """
# step 3: assume that state and action have been set
assert self.lastobs != None
assert self.lastaction != None
assert self.lastreward == None
self.lastreward = r
# store state, action and reward in dataset if logging is enabled
if self.logging:
# TODO: assigning reward to only best estimate for now
#for state in self.lastobs:
# TODO: assign reward to state correctly? NO because we're in
# the learner -- learning will be slower though, because of
# false positives for every obs
self.history.addSample(self.bestState, self.lastaction, self.lastreward)
| mit | 1,512,328,622,695,829,200 | 41.210526 | 88 | 0.618038 | false |
annahs/atmos_research | LEO_calc_coating_from_meas_scat_amp_and_write_to_db.py | 1 | 3857 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#coat_thickness_from_actual_scat_amp FLOAT
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
c2 = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = '20110105'
end_date = '20120601'
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2-nc(2p26,1p26).lupckl'
rBC_density = 1.8
incand_sat = 3750
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
c.execute('''SELECT * FROM SP2_coating_analysis''')
names = [description[0] for description in c.description]
pprint(names)
begin_data = calendar.timegm(datetime.strptime(start_date,'%Y%m%d').timetuple())
end_data = calendar.timegm(datetime.strptime(end_date,'%Y%m%d').timetuple())
def get_rBC_mass(incand_pk_ht, year):
if year == 2012:
rBC_mass = 0.003043*incand_pk_ht + 0.24826 #AD corrected linear calibration for UBCSP2 at WHI 2012
if year == 2010:
rBC_mass = 0.01081*incand_pk_ht - 0.32619 #AD corrected linear calibration for ECSP2 at WHI 2010
return rBC_mass
def get_coating_thickness(BC_VED,scat_amp,coating_lookup_table):
#get the coating thicknesses from the lookup table which is a dictionary of dictionaries, the 1st keyed with BC core size and the second being coating thicknesses keyed with calc scat amps
core_diameters = sorted(coating_lookup_table.keys())
prev_diameter = core_diameters[0]
for core_diameter in core_diameters:
if core_diameter > BC_VED:
core_dia_to_use = prev_diameter
break
prev_diameter = core_diameter
#now get the coating thickness for the scat_amp this is the coating thickness based on the raw scattering max
scattering_amps = sorted(coating_lookup_table[core_dia_to_use].keys())
prev_amp = scattering_amps[0]
for scattering_amp in scattering_amps:
if scat_amp < scattering_amp:
scat_amp_to_use = prev_amp
break
prev_amp = scattering_amp
scat_coating_thickness = coating_lookup_table[core_dia_to_use].get(scat_amp_to_use, np.nan) # returns value for the key, or none
return scat_coating_thickness
LOG_EVERY_N = 10000
i = 0
for row in c.execute('''SELECT incand_amp, LF_scat_amp, unix_ts_utc, sp2b_file, file_index, instr FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and incand_amp<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle,incand_sat,begin_data,end_data)):
incand_amp = row[0]
LF_amp = row[1]
event_time = datetime.utcfromtimestamp(row[2])
file = row[3]
index = row[4]
instrt = row[5]
rBC_mass = get_rBC_mass(incand_amp, event_time.year)
if rBC_mass >= 0.25:
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
coat_th = get_coating_thickness(rBC_VED,LF_amp,lookup_table)
else:
rBC_VED = None
coat_th = None
c2.execute('''UPDATE SP2_coating_analysis SET coat_thickness_from_actual_scat_amp=? WHERE sp2b_file=? and file_index=? and instr=?''', (coat_th, file,index,instrt))
i+=1
if (i % LOG_EVERY_N) == 0:
print 'record: ', i
conn.commit()
conn.close()
| mit | -3,604,193,547,735,604,000 | 29.611111 | 207 | 0.721027 | false |
LPgenerator/django-cacheops | cacheops/redis.py | 1 | 3494 | from __future__ import absolute_import
import warnings
from contextlib import contextmanager
import six
from funcy import decorator, identity, memoize, LazyObject
import redis
from redis.sentinel import Sentinel
from .conf import settings
if settings.CACHEOPS_DEGRADE_ON_FAILURE:
@decorator
def handle_connection_failure(call):
try:
return call()
except redis.ConnectionError as e:
warnings.warn("The cacheops cache is unreachable! Error: %s" % e, RuntimeWarning)
except redis.TimeoutError as e:
warnings.warn("The cacheops cache timed out! Error: %s" % e, RuntimeWarning)
else:
handle_connection_failure = identity
LOCK_TIMEOUT = 60
class CacheopsRedis(redis.StrictRedis):
get = handle_connection_failure(redis.StrictRedis.get)
@contextmanager
def getting(self, key, lock=False):
if not lock:
yield self.get(key)
else:
locked = False
try:
data = self._get_or_lock(key)
locked = data is None
yield data
finally:
if locked:
self._release_lock(key)
@handle_connection_failure
def _get_or_lock(self, key):
self._lock = getattr(self, '_lock', self.register_script("""
local locked = redis.call('set', KEYS[1], 'LOCK', 'nx', 'ex', ARGV[1])
if locked then
redis.call('del', KEYS[2])
end
return locked
"""))
signal_key = key + ':signal'
while True:
data = self.get(key)
if data is None:
if self._lock(keys=[key, signal_key], args=[LOCK_TIMEOUT]):
return None
elif data != b'LOCK':
return data
# No data and not locked, wait
self.brpoplpush(signal_key, signal_key, timeout=LOCK_TIMEOUT)
@handle_connection_failure
def _release_lock(self, key):
self._unlock = getattr(self, '_unlock', self.register_script("""
if redis.call('get', KEYS[1]) == 'LOCK' then
redis.call('del', KEYS[1])
end
redis.call('lpush', KEYS[2], 1)
redis.call('expire', KEYS[2], 1)
"""))
signal_key = key + ':signal'
self._unlock(keys=[key, signal_key])
@LazyObject
def redis_client():
if settings.CACHEOPS_SENTINEL and isinstance(settings.CACHEOPS_SENTINEL, dict):
sentinel = Sentinel(
settings.CACHEOPS_SENTINEL['location'],
socket_timeout=settings.CACHEOPS_SENTINEL.get('socket_timeout')
)
return sentinel.master_for(
settings.CACHEOPS_SENTINEL['service_name'],
redis_class=CacheopsRedis,
db=settings.CACHEOPS_SENTINEL.get('db') or 0
)
# Allow client connection settings to be specified by a URL.
if isinstance(settings.CACHEOPS_REDIS, six.string_types):
return CacheopsRedis.from_url(settings.CACHEOPS_REDIS)
else:
return CacheopsRedis(**settings.CACHEOPS_REDIS)
### Lua script loader
import re
import os.path
STRIP_RE = re.compile(r'TOSTRIP.*/TOSTRIP', re.S)
@memoize
def load_script(name, strip=False):
filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
with open(filename) as f:
code = f.read()
if strip:
code = STRIP_RE.sub('', code)
return redis_client.register_script(code)
| bsd-3-clause | -5,793,251,046,439,883,000 | 29.382609 | 93 | 0.591013 | false |
rhhayward/podcast_generator | podcast_generator/PodcastCreator.py | 1 | 4935 | import urllib.request as urllib
from lxml import etree
import os
from os.path import basename
from urllib.parse import urlparse
### PodcastCreator is the class that
### takes a set of downloaders,
### sets their settings, takes
### their downloaded files and
### makes them into an rss file
### for use with podcast
### aggregators.
class PodcastCreator:
""" takes a list of files, creates an output xml file for use with podcatcher """
def __init__(self):
self.files = []
self.outputFile = ""
self.title = ""
self.link = ""
self.enclosureBaseUrl = ""
self.db = None
self.destFolder = None
self.maxCount = None
self.downloaders = []
os.chdir("/tmp")
### addDownloader takes a PodcastDownloader
### object, sets its dest folder and
### db, and adds it to the list of
### available downloaders.
def addDownloader(self, Downloader):
if not self.destFolder is None:
Downloader.setDestFolder(self.destFolder)
if not self.db is None:
Downloader.useDb(self.db)
self.downloaders.append(Downloader)
### getFiles iterates through all
### the available downloaders,
### set their maxCount to our
### maxCount, and decrement our
### maxCount by however many
### the downloader got.
def getFiles(self):
downloadedCount=0
for downloader in self.downloaders:
if(self.maxCount is not None and downloader.maxCount is None):
downloader.setMaxCount(self.maxCount)
count = downloader.getFiles()
downloadedCount += count
if(self.maxCount is not None):
self.maxCount -= count
return downloadedCount
### setMaxCount is an accessor function
### for the maxCount which regulates
### the number of files to download.
def setMaxCount(self, count):
self.maxCount = count;
### setDestFolder takes a destionation
### folder to move files to after
### they've been downloaded.
def setDestFolder(self, destFolder):
self.destFolder = destFolder
### useDb is an accessor function
### for the podcast database object.
def useDb(self, db):
self.db = db
### setLink is used in the rss file for
### the rss link tag.
def setLink(self, link):
self.link = link
### setEnclosureBaseUrl is where the
### files will be avilable for http
### download.
def setEnclosureBaseUrl(self, enclosureBaseUrl):
self.enclosureBaseUrl = enclosureBaseUrl
### setOutputXmlFile is the location
### where the rss file will be written.
def setOutputXmlFile(self, updatedOutputFile):
self.outputFile = updatedOutputFile
### setTitle sets the title of the rss
### file.
def setTitle(self, title):
self.title = title
### writeOutputFile generates the output
### xml file.
def writeOutputFile(self):
self.podcasts = self.db.getPodcastsFromDb()
fh = open(self.outputFile, "wb")
rss = etree.Element("rss")
channel = etree.SubElement(rss, "channel")
etree.SubElement(channel, "title").text = self.title
etree.SubElement(channel, "description").text = self.title
etree.SubElement(channel, "link").text = self.link
etree.SubElement(channel, "language").text = "en-us"
etree.SubElement(channel, "copyright").text = "Copyright 2999"
for podcast in self.podcasts:
file = podcast.getFileName()
pubDate = podcast.getDate()
item = etree.SubElement(channel, "item")
etree.SubElement(item, "enclosure").set("url", self.enclosureBaseUrl + urllib.quote(file))
etree.SubElement(item, "category").text = "Podcasts"
etree.SubElement(item, "pubDate").text = pubDate
etree.SubElement(item, "guid").text = self.enclosureBaseUrl + urllib.quote(file)
titleAdded = False
for field in podcast.getAdditionalFields():
if field['fieldName'] == "title":
titleAdded = True
etree.SubElement(item, field['fieldName']).text = field['fieldValue']
if titleAdded == False:
etree.SubElement(item, "title").text = file
fh.write(etree.tostring(rss, encoding='UTF-8', xml_declaration=True, pretty_print=True))
fh.close()
### cleanupFiles takes a number of days before
### today to remove files from the fs and db
### Returns count of files removeD
def cleanupFiles(self, count):
files = self.db.cleanupFiles(count)
for file in files:
try:
os.unlink(self.destFolder+file)
except:
"there was a problem removing file " + self.destFolder+file
| gpl-3.0 | 4,101,729,319,851,635,700 | 34.25 | 102 | 0.61459 | false |
RedHatQE/cfme_tests | cfme/tests/automate/custom_button/test_service_objects.py | 1 | 25666 | import fauxfactory
import pytest
from widgetastic_patternfly import Dropdown
from cfme.services.myservice import MyService
from cfme.tests.automate.custom_button import CustomButtonSSUIDropdwon
from cfme.tests.automate.custom_button import log_request_check
from cfme.tests.automate.custom_button import TextInputDialogSSUIView
from cfme.tests.automate.custom_button import TextInputDialogView
from cfme.utils.appliance import ViaREST
from cfme.utils.appliance import ViaSSUI
from cfme.utils.appliance import ViaUI
from cfme.utils.appliance.implementations.ssui import navigate_to as ssui_nav
from cfme.utils.appliance.implementations.ui import navigate_to as ui_nav
from cfme.utils.blockers import BZ
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for
pytestmark = [pytest.mark.tier(2)]
OBJECTS = ["SERVICE", "GENERIC"]
DISPLAY_NAV = {
"Single entity": ["Details"],
"List": ["All"],
"Single and list": ["All", "Details"],
}
SUBMIT = ["Submit all", "One by one"]
TEXT_DISPLAY = {
"group": {"group_display": False, "btn_display": True},
"button": {"group_display": True, "btn_display": False},
}
@pytest.fixture(scope="module")
def service(appliance):
service_name = "service_{}".format(fauxfactory.gen_numeric_string(3))
service = appliance.rest_api.collections.services.action.create(
name=service_name, display=True
)[0]
yield service
service.action.delete()
@pytest.fixture(scope="module")
def definition(appliance):
with appliance.context.use(ViaREST):
definition = appliance.collections.generic_object_definitions.create(
name="generic_class_{}".format(fauxfactory.gen_numeric_string(3)),
description="Generic Object Definition",
attributes={"addr01": "string"},
associations={"services": "Service"},
methods=["add_vm", "remove_vm"],
)
yield definition
if definition.exists:
definition.delete()
@pytest.fixture(scope="module")
def objects(appliance, definition, service):
with appliance.context.use(ViaREST):
instance = appliance.collections.generic_objects.create(
name="generic_instance_{}".format(fauxfactory.gen_numeric_string(3)),
definition=definition,
attributes={"addr01": "Test Address"},
associations={"services": [service]},
)
service.action.add_resource(
resource=appliance.rest_api.collections.generic_objects.find_by(name=instance.name)[
0
]._ref_repr()
)
instance.my_service = MyService(appliance, name=service.name)
obj_dest = {
"GENERIC": {
"All": (instance.my_service, "GenericObjectInstance"),
"Details": (instance, "MyServiceDetails"),
},
"SERVICE": {
"All": (instance.my_service, "All"),
"Details": (instance.my_service, "Details"),
},
}
yield obj_dest
if instance.exists:
instance.delete()
@pytest.fixture(params=OBJECTS, ids=[obj.capitalize() for obj in OBJECTS], scope="module")
def button_group(appliance, request):
with appliance.context.use(ViaUI):
collection = appliance.collections.button_groups
button_gp = collection.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
type=getattr(collection, request.param),
)
yield button_gp, request.param
button_gp.delete_if_exists()
@pytest.fixture(params=TEXT_DISPLAY, scope="module")
def serv_button_group(appliance, request):
with appliance.context.use(ViaUI):
collection = appliance.collections.button_groups
button_gp = collection.create(
text="group_{}".format(fauxfactory.gen_numeric_string(3)),
hover="hover_{}".format(fauxfactory.gen_alphanumeric(3)),
display=TEXT_DISPLAY[request.param]["group_display"],
type=getattr(collection, "SERVICE"),
)
button = button_gp.buttons.create(
text="btn_{}".format(fauxfactory.gen_numeric_string(3)),
hover="hover_{}".format(fauxfactory.gen_alphanumeric(3)),
display=TEXT_DISPLAY[request.param]["btn_display"],
display_for="Single and list",
system="Request",
request="InspectMe",
)
yield button, button_gp
button_gp.delete_if_exists()
button.delete_if_exists()
@pytest.mark.tier(1)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.parametrize(
"display", DISPLAY_NAV.keys(), ids=[item.replace(" ", "_") for item in DISPLAY_NAV.keys()]
)
@pytest.mark.uncollectif(
lambda context, button_group: context == ViaSSUI and "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
@pytest.mark.meta(
blockers=[
BZ(
1650066,
unblock=lambda display, context: not (
context is ViaSSUI and display in ["List", "Single and list"]
),
)
]
)
def test_custom_button_display_service_obj(
request, appliance, context, display, objects, button_group
):
""" Test custom button display on a targeted page
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: critical
caseposneg: positive
testtype: functional
startsin: 5.8
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with specific display
3. Navigate to object type page as per display selected [For service SSUI]
4. Single entity: Details page of the entity
5. List: All page of the entity
6. Single and list: Both All and Details page of the entity
7. Check for button group and button
Bugzilla:
1650066
"""
group, obj_type = button_group
with appliance.context.use(ViaUI):
button = group.buttons.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
display_for=display,
system="Request",
request="InspectMe",
)
request.addfinalizer(button.delete_if_exists)
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
for destination in DISPLAY_NAV[display]:
obj = objects[obj_type][destination][0]
dest_name = objects[obj_type][destination][1]
view = navigate_to(obj, dest_name)
custom_button_group = Dropdown(view, group.text)
assert custom_button_group.is_displayed
assert custom_button_group.has_item(button.text)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.parametrize("submit", SUBMIT, ids=[item.replace(" ", "_") for item in SUBMIT])
@pytest.mark.uncollectif(
lambda context, button_group: context == ViaSSUI and "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
def test_custom_button_automate_service_obj(
request, appliance, context, submit, objects, button_group
):
""" Test custom button for automate and requests count as per submit
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with specific submit option and Single and list display
3. Navigate to object type pages (All and Details)
4. Check for button group and button
5. Select/execute button from group dropdown for selected entities
6. Check for the proper flash message related to button execution
7. Check automation log requests. Submitted as per selected submit option or not.
8. Submit all: single request for all entities execution
9. One by one: separate requests for all entities execution
Bugzilla:
1650066
"""
group, obj_type = button_group
with appliance.context.use(ViaUI):
button = group.buttons.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
display_for="Single and list",
submit=submit,
system="Request",
request="InspectMe",
)
request.addfinalizer(button.delete_if_exists)
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
# BZ-1650066: no custom button on All page
destinations = (
["Details"]
if context == ViaSSUI and BZ(1650066).blocks
else ["All", "Details"]
)
for destination in destinations:
obj = objects[obj_type][destination][0]
dest_name = objects[obj_type][destination][1]
view = navigate_to(obj, dest_name)
custom_button_group = Dropdown(view, group.text)
assert custom_button_group.has_item(button.text)
# Entity count depends on the destination for `All` available entities and
# `Details` means a single entity.
if destination == "All":
try:
paginator = view.paginator
except AttributeError:
paginator = view.entities.paginator
entity_count = min(paginator.items_amount, paginator.items_per_page)
view.entities.paginator.check_all()
else:
entity_count = 1
# Clear the automation log
assert appliance.ssh_client.run_command(
'echo -n "" > /var/www/miq/vmdb/log/automation.log'
)
custom_button_group.item_select(button.text)
# SSUI not support flash messages
if context is ViaUI:
diff = "executed" if appliance.version < "5.10" else "launched"
view.flash.assert_message('"{btn}" was {diff}'.format(btn=button.text, diff=diff))
# Submit all: single request for all entity execution
# One by one: separate requests for all entity execution
expected_count = 1 if submit == "Submit all" else entity_count
try:
wait_for(
log_request_check,
[appliance, expected_count],
timeout=600,
message="Check for expected request count",
delay=20,
)
except TimedOutError:
assert False, "Expected {count} requests not found in automation log".format(
count=str(expected_count)
)
@pytest.mark.meta(
blockers=[BZ(1659452, unblock=lambda serv_button_group: "group" not in serv_button_group)]
)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
def test_custom_button_text_display(appliance, context, serv_button_group, service):
""" Test custom button text display on option
Polarion:
assignee: ndhandre
initialEstimate: 1/6h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Appliance with Service
2. Create custom button `Group` or `Button` without display option
3. Check Group/Button text display or not on UI and SSUI.
Bugzilla:
1650066
1659452
"""
my_service = MyService(appliance, name=service.name)
button, group = serv_button_group
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
destinations = (
["Details"]
if (BZ(1650066).blocks and context is ViaSSUI)
else ["All", "Details"]
)
for destination in destinations:
view = navigate_to(my_service, destination)
custom_button_group = Dropdown(view, group.text)
if group.display is True:
assert "" in custom_button_group.items
else:
assert custom_button_group.read() == ""
@pytest.fixture(params=["enablement", "visibility"], scope="module")
def vis_enb_button(request, appliance, button_group):
"""Create custom button with enablement/visibility expression"""
group, _ = button_group
exp = {request.param: {"tag": "My Company Tags : Department", "value": "Engineering"}}
with appliance.context.use(ViaUI):
button = group.buttons.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
display_for="Single entity",
system="Request",
request="InspectMe",
**exp
)
yield button, request.param
button.delete_if_exists()
@pytest.mark.tier(0)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.uncollectif(
lambda context, button_group: "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
def test_custom_button_expression_service_obj(
appliance, context, objects, button_group, vis_enb_button
):
""" Test custom button as per expression enablement/visibility.
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: medium
caseposneg: positive
testtype: functional
casecomponent: CustomButton
startsin: 5.9
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with expression (Tag)
a. Enablement Expression
b. Visibility Expression
3. Navigate to object Detail page
4. Check: button should not enable/visible without tag
5. Check: button should enable/visible with tag
Bugzilla:
1509959, 1513498
"""
# ToDo: Add support for Generic Object by adding tagging ability from All page.
group, obj_type = button_group
button, expression = vis_enb_button
obj = objects[obj_type]["Details"][0]
dest_name = objects[obj_type]["Details"][1]
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
tag_cat = appliance.collections.categories.instantiate(
name="department", display_name="Department"
)
tag = tag_cat.collections.tags.instantiate(name="engineering", display_name="Engineering")
# Check without tag
with appliance.context.use(ViaUI):
if tag.display_name in [item.display_name for item in obj.get_tags()]:
obj.remove_tag(tag)
with appliance.context.use(context):
view = navigate_to(obj, dest_name, wait_for_view=15)
custom_button_group = (
CustomButtonSSUIDropdwon(view, group.text)
if context is ViaSSUI
else Dropdown(view, group.text)
)
if expression == "enablement":
# Note: SSUI still fallow enablement behaviour like 5.9. In latest version dropdown
# having single button and button is disabled then dropdown disabled.
if appliance.version < "5.10" or (context is ViaSSUI):
assert not custom_button_group.item_enabled(button.text)
else:
assert not custom_button_group.is_enabled
elif expression == "visibility":
assert not custom_button_group.is_displayed
# Check with tag
with appliance.context.use(ViaUI):
if tag.display_name not in [item.display_name for item in obj.get_tags()]:
obj.add_tag(tag)
with appliance.context.use(context):
view = navigate_to(obj, dest_name)
custom_button_group = (
CustomButtonSSUIDropdwon(view, group.text)
if context is ViaSSUI
else Dropdown(view, group.text)
)
if expression == "enablement":
assert custom_button_group.item_enabled(button.text)
elif expression == "visibility":
assert button.text in custom_button_group.items
@pytest.mark.manual
@pytest.mark.tier(2)
@pytest.mark.parametrize("context", [ViaSSUI])
def test_custom_button_on_vm_resource_detail(context):
""" Test custom button on SSUI vm resource detail page
Polarion:
assignee: ndhandre
initialEstimate: 1/2h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
setup:
1. Add Provider
2. Refresh Provider; Data should be collected.
3. Create Simple Dialog for catalog
* Navigate to Automation > Automate > Customization
* Click on All Dialogs from a sidebar
* From toolbar select "Add a new Dialog"
* Fill Dialog's name and description
* Drag TextBox (we can select other as well)
* Save changes
4. Create Catalog
* Navigate to Services > Catalogs
* Click on Catalogs from a sidebar
* From toolbar Configuration select "Add New Catalog"
* Fill name and description
* Save changes
5. Create a Catalog item
* Navigate to Services > Catalogs
* From sidebar select All Catalogs > catalog (created above)
* From toolbar select Add New catalog item
* Select Provider
* Fill Name, description, catalog and dialog (created above)
* Select VM name proper template etc...
6. Order Catalog
* Navigate to Services > Catalogs
* Click on Service catalogs from a sidebar
* Order catalog from this Page
testSteps:
1. Add custom button group for VM/Instance object from automation
* Navigate to Automate > Automation > Customization
* Click on Buttons from a sidebar
* Select VM/Instance
* From configuration select Add new Button Group
* Fill text, hover, icon, icon color
* Save change
2. Add custom button in above group
* Under this Group Click on configuration and select Add new Button
* Fill text, hover, icon, icon color, dialog, method
* Save changes
3. Navigate to SSUI (https://hostname/ui/service/login) (Credentials as Normal UI)
4. Click on My Services and select service (as per catalog item name)
5. Click on Instance which ordered by service.
6. Click on Group and select button
7. Fill dialog and submit it.
expectedResults:
1. A group should be created (is_displayed)
2. A button should created (is_displayed)
3.
4.
5. Check button displayed in a toolbar or not (Details page of an instance)
6. Dialog should display
7. Check for the flash message "Order Request was Submitted" and
check automation log for the request (Note request as per method attach to button in
step-1).
Bugzilla:
1427430, 1450473, 1454910
"""
pass
@pytest.mark.manual
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
def test_custom_button_role_access_service(context):
"""Test custom button for role access of SSUI
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create role by copying EvmRole-user_self_service
2. Create Group and respective user for role
3. Create custom button group
4. Create custom button with role
5. Check use able to access custom button or not
"""
pass
@pytest.mark.manual
def test_custom_button_on_catalog_item():
"""
Polarion:
assignee: ndhandre
initialEstimate: 1/8h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Add catalog_item
2. Goto catalog detail page and select `add button` from toolbar
3. Fill info and save button
"""
pass
@pytest.mark.manual
def test_custom_button_dialog_service_archived():
""" From Service OPS check if archive vms"s dialog invocation via custom button. ref: BZ1439883
Polarion:
assignee: ndhandre
initialEstimate: 1/8h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create a button at the service level with InspectMe method
2. Create a service that contains 1 VM
3. Remove this VM from the provider, resulting in a VM state of 'Archived'
4. Go to the service and try to execute the button
Bugzilla:
1439883
"""
pass
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.uncollectif(
lambda context, button_group: context == ViaSSUI and "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
def test_custom_button_dialog_service_obj(
appliance, dialog, request, context, objects, button_group
):
""" Test custom button with dialog and InspectMe method
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with service dialog
3. Navigate to object Details page
4. Check for button group and button
5. Select/execute button from group dropdown for selected entities
6. Fill dialog and submit
7. Check for the proper flash message related to button execution
Bugzilla:
1574774
"""
group, obj_type = button_group
with appliance.context.use(ViaUI):
button = group.buttons.create(
text="btn_{}".format(fauxfactory.gen_alphanumeric(3)),
hover="btn_hover_{}".format(fauxfactory.gen_alphanumeric(3)),
dialog=dialog,
system="Request",
request="InspectMe",
)
request.addfinalizer(button.delete_if_exists)
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
obj = objects[obj_type]["Details"][0]
dest_name = objects[obj_type]["Details"][1]
view = navigate_to(obj, dest_name)
custom_button_group = Dropdown(view, group.text)
assert custom_button_group.has_item(button.text)
# Clear the automation log
assert appliance.ssh_client.run_command(
'echo -n "" > /var/www/miq/vmdb/log/automation.log'
)
custom_button_group.item_select(button.text)
_dialog_view = TextInputDialogView if context is ViaUI else TextInputDialogSSUIView
dialog_view = view.browser.create_view(_dialog_view, wait="10s")
assert dialog_view.service_name.fill("Custom Button Execute")
dialog_view.submit.click()
# SSUI not support flash messages
if context is ViaUI:
view.flash.assert_message("Order Request was Submitted")
# check request in log
try:
wait_for(
log_request_check,
[appliance, 1],
timeout=600,
message="Check for expected request count",
delay=20,
)
except TimedOutError:
assert False, "Expected {count} requests not found in automation log".format(
count=str(1)
)
@pytest.mark.manual
def test_custom_button_open_url_service_obj(objects, button_group):
""" Test Open url functionality of custom button.
Polarion:
assignee: ndhandre
initialEstimate: 1/2h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.11
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create ruby method for url functionality
2. Create custom button group with the Object type
3. Create a custom button with open_url option and respective method
4. Navigate to object Detail page
5. Execute custom button
expectedResults:
1.
2.
3.
4.
5. New tab should open with respective url
Bugzilla:
1550002
"""
pass
| gpl-2.0 | 6,226,643,836,394,651,000 | 35.149296 | 99 | 0.614237 | false |
cefn/firmware-codesign-readinglog | ui/index.py | 1 | 2487 | #!/usr/bin/python
import sys,os,glob,urlparse,urllib,subprocess
def setcwd():
realpath = os.path.realpath(sys.argv[0])
dname = os.path.dirname(realpath)
os.chdir(dname)
# sets working directory based on path to index.py
setcwd()
# loads local python modules, relative to index.py
sys.path.append(os.path.realpath('py'))
from logx import Viewer,Editor,debug_trace
'''
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication
'''
from PyQt4 import uic
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QObject,pyqtSlot
notesdir = "../notes"
pdfdir = "../papers"
startquery = "./xq/index.xq"
class PdfAdaptor(QObject):
@pyqtSlot(str)
def loadid(self, pdfid):
pdfid = str(pdfid)
pdfpath = pdfdir + os.sep + pdfid + '.pdf'
self.loadpdf(pdfpath)
@pyqtSlot(str)
def loadpdf(self, pdfpath):
pdfpath = str(pdfpath)
pdfpath = os.path.realpath(pdfpath)
subprocess.Popen(['xdg-open', pdfpath])
def path2url(path):
return urlparse.urljoin(
'file:', urllib.pathname2url(path))
def main(argv):
querypath = os.path.realpath(startquery)
sourcedir = os.path.realpath(notesdir)
sourcepaths = glob.glob(sourcedir + "/*.html")
# for PyQt4
sourceurls = ",".join([("file://" + path) for path in sourcepaths])
# for PyQt5
#sourceurls = ",".join([path2url(path) for path in sourcepaths])
xquerynames = [
['sourceurls', sourceurls,'http://cefn.com/logx']
]
javascriptnames = dict()
# create application context
app = QApplication(sys.argv)
ui = uic.loadUi('index.ui')
editor = Editor(focuspath=None,view=ui.editView,javascriptnames=javascriptnames,xquerynames=xquerynames)
viewer = Viewer(querypath=querypath,view=ui.navView,javascriptnames=javascriptnames,xquerynames=xquerynames)
pdf = PdfAdaptor()
javascriptnames['editor']=editor
javascriptnames['viewer']=viewer
javascriptnames['pdf']=pdf
# subscribe viewer to refresh whenever source files refresh
# implicitly bound through 'sourcepaths' xquery name
for sourcepath in sourcepaths:
viewer.registersource(sourcepath)
ui.show()
# edit a notes file, if specified
if len(argv) > 0:
editor.focuspath = os.path.realpath(argv[0])
# load the view
viewer.render()
sys.exit(app.exec_())
if __name__ == "__main__":
main(sys.argv[1:]) | mit | -1,832,972,551,694,617,600 | 24.916667 | 112 | 0.657821 | false |
jason-neal/spectrum_overload | spectrum_overload/differential.py | 1 | 1536 | # -*- coding: utf-8 -*-
"""Differential Class which takes the difference between two spectra."""
from typing import Any, Dict, Optional
from spectrum_overload.spectrum import Spectrum
# TODO: Add in s-profile from
# Ferluga 1997: Separating the spectra of binary stars-I. A simple method: Secondary reconstruction
class DifferentialSpectrum(object):
"""A differential spectrum."""
def __init__(self, Spectrum1: Spectrum, Spectrum2: Spectrum) -> None:
"""Initialise class with both spectra."""
if not (Spectrum1.calibrated and Spectrum2.calibrated):
raise ValueError("Input spectra are not calibrated.")
self.spec1 = Spectrum1
self.spec2 = Spectrum2
self.params = None # type: Optional[Dict[str, Any]]
def barycentric_correct(self):
"""Barycentric correct each spectra."""
pass
def rest_frame(self, frame):
"""Change rest frame to one of the spectra."""
pass
def diff(self):
"""Calculate difference between the two spectra."""
# TODO: Access interpolations
return self.spec1 - self.spec2
def sort(self, method: str = "time"):
"""Sort spectra in specific order. e.g. time, reversed."""
pass
def swap(self):
"""Swap order of the two spectra."""
self.spec1, self.spec2 = self.spec2, self.spec1
def add_orbital_params(self, params: Dict[str, Any]):
"""A dictionary of orbital parameters to use for shifting frames."""
self.params = params
| mit | 1,857,235,720,947,687,000 | 31.680851 | 99 | 0.646484 | false |
sidrakesh93/grpc-tools | benchmarking/performance_db/performance_db_frontend/app/views.py | 1 | 3289 | #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""View for the front-end."""
from django import shortcuts
from user_data import UserData
user_data = UserData()
def display_performance_database(request):
"""View for performance database table page."""
data = user_data.get_all_users_data()
return shortcuts.render(request, 'data_table.html', {'all_users_data': data})
def display_configs(request):
"""View for config page."""
return shortcuts.render(request, 'configs.html', {})
def display_general_statistic(request, metric):
"""View for general statistic page."""
return general_statistic_renderer(request, metric)
def general_statistic_renderer(request, metric):
"""General statistic page renderer."""
data = user_data.get_all_users_single_metric_data(metric)
return shortcuts.render(
request, 'general_plots.html',
{'metric': get_metric_full_desc(metric),
'all_users_data': data})
def display_user_metrics(request, username):
"""View for user metrics page."""
complete_data = user_data.get_single_user_data(username)
return shortcuts.render(
request, 'user_plots.html',
{'username': complete_data[0],
'user_data': complete_data[1]})
def get_metric_full_desc(metric):
"""Returns full metric name."""
metric_name = {
'qps': 'Queries Per Second',
'qpspercore': 'QPS Per Core',
'perc50': '50th Percentile Latency',
'perc90': '90th Percentile Latency',
'perc95': '95th Percentile Latency',
'perc99': '99th Percentile Latency',
'perc99point9': '99.9th Percentile Latency',
'serversystime': 'Server System Time',
'serverusertime': 'Server User Time',
'clientsystime': 'Client System Time',
'clientusertime': 'Client User Time'
}[metric]
return metric_name
| bsd-3-clause | 2,365,342,280,328,738,000 | 35.544444 | 79 | 0.725753 | false |
great-expectations/great_expectations | great_expectations/expectations/core/expect_column_values_to_be_in_type_list.py | 1 | 17690 | import logging
from typing import Dict, Optional
import numpy as np
import pandas as pd
from great_expectations.core import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.core.expect_column_values_to_be_of_type import (
_get_dialect_type_module,
_native_type_type_map,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.registry import get_metric_kwargs
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from great_expectations.validator.validation_graph import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import pyspark.sql.types as sparktypes
except ImportError as e:
logger.debug(str(e))
logger.debug(
"Unable to load spark context; install optional spark dependency for support."
)
class ExpectColumnValuesToBeInTypeList(ColumnMapExpectation):
"""
Expect a column to contain values from a specified type list.
expect_column_values_to_be_in_type_list is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype provides an unambiguous constraints (any dtype except
'object'). For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type_list (str): \
A list of strings representing the data type that each column should have as entries. Valid types are
defined by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_of_type \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_of_type>`
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
map_metric = "column_values.in_type_list"
success_keys = (
"type_list",
"mostly",
)
default_kwarg_values = {
"type_list": None,
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
super().validate_configuration(configuration)
try:
assert "type_list" in configuration.kwargs, "type_list is required"
assert (
isinstance(configuration.kwargs["type_list"], (list, dict))
or configuration.kwargs["type_list"] is None
), "type_list must be a list or None"
if isinstance(configuration.kwargs["type_list"], dict):
assert (
"$PARAMETER" in configuration.kwargs["type_list"]
), 'Evaluation Parameter dict for type_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "type_list", "mostly", "row_condition", "condition_parser"],
)
if params["type_list"] is not None:
for i, v in enumerate(params["type_list"]):
params["v__" + str(i)] = v
values_string = " ".join(
["$v__" + str(i) for i, v in enumerate(params["type_list"])]
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
if include_column_name:
template_str = (
"$column value types must belong to this set: "
+ values_string
+ ", at least $mostly_pct % of the time."
)
else:
template_str = (
"value types must belong to this set: "
+ values_string
+ ", at least $mostly_pct % of the time."
)
else:
if include_column_name:
template_str = (
"$column value types must belong to this set: "
+ values_string
+ "."
)
else:
template_str = (
"value types must belong to this set: " + values_string + "."
)
else:
if include_column_name:
template_str = "$column value types may be any value, but observed value will be reported"
else:
template_str = (
"value types may be any value, but observed value will be reported"
)
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate_pandas(
self,
actual_column_type,
expected_types_list,
):
if expected_types_list is None:
success = True
else:
comp_types = []
for type_ in expected_types_list:
try:
comp_types.append(np.dtype(type_).type)
comp_types.append(np.dtype(type_))
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = _native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = actual_column_type in comp_types
return {
"success": success,
"result": {"observed_value": actual_column_type.type.__name__},
}
def _validate_sqlalchemy(
self, actual_column_type, expected_types_list, execution_engine
):
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if expected_types_list is None:
success = True
else:
types = []
type_module = _get_dialect_type_module(execution_engine=execution_engine)
for type_ in expected_types_list:
try:
type_class = getattr(type_module, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
logger.warning(
"No recognized sqlalchemy types in type_list for current dialect."
)
types = tuple(types)
success = isinstance(actual_column_type, types)
return {
"success": success,
"result": {"observed_value": type(actual_column_type).__name__},
}
def _validate_spark(
self,
actual_column_type,
expected_types_list,
):
if expected_types_list is None:
success = True
else:
types = []
for type_ in expected_types_list:
try:
type_class = getattr(sparktypes, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
raise ValueError("No recognized spark types in expected_types_list")
types = tuple(types)
success = isinstance(actual_column_type, types)
return {
"success": success,
"result": {"observed_value": type(actual_column_type).__name__},
}
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
# This calls TableExpectation.get_validation_dependencies to set baseline dependencies for the aggregate version
# of the expectation.
# We need to keep this as super(ColumnMapExpectation, self), which calls
# TableExpectation.get_validation_dependencies instead of ColumnMapExpectation.get_validation_dependencies.
# This is because the map version of this expectation is only supported for Pandas, so we want the aggregate
# version for the other backends.
dependencies = super(ColumnMapExpectation, self).get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
# Only PandasExecutionEngine supports the column map version of the expectation.
if isinstance(execution_engine, PandasExecutionEngine):
column_name = configuration.kwargs.get("column")
expected_types_list = configuration.kwargs.get("type_list")
metric_kwargs = get_metric_kwargs(
configuration=configuration,
metric_name="table.column_types",
runtime_configuration=runtime_configuration,
)
metric_domain_kwargs = metric_kwargs.get("metric_domain_kwargs")
metric_value_kwargs = metric_kwargs.get("metric_value_kwargs")
table_column_types_configuration = MetricConfiguration(
"table.column_types",
metric_domain_kwargs=metric_domain_kwargs,
metric_value_kwargs=metric_value_kwargs,
)
actual_column_types_list = execution_engine.resolve_metrics(
[table_column_types_configuration]
)[table_column_types_configuration.id]
actual_column_type = [
type_dict["type"]
for type_dict in actual_column_types_list
if type_dict["name"] == column_name
][0]
# only use column map version if column dtype is object
if (
actual_column_type.type.__name__ == "object_"
and expected_types_list is not None
):
# this resets dependencies using ColumnMapExpectation.get_validation_dependencies
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
# this adds table.column_types dependency for both aggregate and map versions of expectation
column_types_metric_kwargs = get_metric_kwargs(
metric_name="table.column_types",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
dependencies["metrics"]["table.column_types"] = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs=column_types_metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=column_types_metric_kwargs["metric_value_kwargs"],
)
return dependencies
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
column_name = configuration.kwargs.get("column")
expected_types_list = configuration.kwargs.get("type_list")
actual_column_types_list = metrics.get("table.column_types")
actual_column_type = [
type_dict["type"]
for type_dict in actual_column_types_list
if type_dict["name"] == column_name
][0]
if isinstance(execution_engine, PandasExecutionEngine):
# only PandasExecutionEngine supports map version of expectation and
# only when column type is object
if (
actual_column_type.type.__name__ == "object_"
and expected_types_list is not None
):
# this calls ColumnMapMetric._validate
return super()._validate(
configuration, metrics, runtime_configuration, execution_engine
)
return self._validate_pandas(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
)
elif isinstance(execution_engine, SqlAlchemyExecutionEngine):
return self._validate_sqlalchemy(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
execution_engine=execution_engine,
)
elif isinstance(execution_engine, SparkDFExecutionEngine):
return self._validate_spark(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
)
| apache-2.0 | -3,555,744,648,507,275,300 | 40.721698 | 120 | 0.585246 | false |
SU-ECE-17-7/ibeis | ibeis/algo/hots/word_index.py | 1 | 9442 | # -*- coding: utf-8 -*-
"""
TODO: DEPRICATE OR REFACTOR INTO SMK
python -c "import doctest, ibeis; print(doctest.testmod(ibeis.algo.hots.word_index))"
python -m doctest -v ibeis/algo/hots/word_index.py
python -m doctest ibeis/algo/hots/word_index.py
"""
from __future__ import absolute_import, division, print_function
# Standard
import six
#from itertools import chain
# Science
import numpy as np
# UTool
import vtool
import utool
# VTool
import vtool.nearest_neighbors as nntool
(print, print_, printDBG, rrr_, profile) = utool.inject(__name__, '[entroid_index]')
NOCACHE_WORD = utool.get_argflag('--nocache-word')
# TODO:
class NeighborAssignment():
def __init__(asgn):
pass
def test_windex():
from ibeis.algo.hots.query_request import new_ibeis_query_request
import ibeis
daid_list = [7, 8, 9, 10, 11]
ibs = ibeis.opendb(db='testdb1')
qreq_ = new_ibeis_query_request(ibs, daid_list, daid_list)
windex = new_ibeis_windex(ibs, qreq_.get_internal_daids())
return windex, qreq_, ibs
def new_word_index(aid_list=[], vecs_list=[], flann_params={},
flann_cachedir=None, indexer_cfgstr='', hash_rowids=True,
use_cache=not NOCACHE_WORD, use_params_hash=True):
print('[windex] building WordIndex object')
_check_input(aid_list, vecs_list)
# Create indexes into the input aids
ax_list = np.arange(len(aid_list))
idx2_vec, idx2_ax, idx2_fx = invert_index(vecs_list, ax_list)
if hash_rowids:
# Fingerprint
aids_hashstr = utool.hashstr_arr(aid_list, '_AIDS')
cfgstr = aids_hashstr + indexer_cfgstr
else:
# Dont hash rowids when given enough info in indexer_cfgstr
cfgstr = indexer_cfgstr
# Build/Load the flann index
flann = nntool.flann_cache(idx2_vec, **{
'cache_dir': flann_cachedir,
'cfgstr': cfgstr,
'flann_params': flann_params,
'use_cache': use_cache,
'use_params_hash': use_params_hash})
ax2_aid = np.array(aid_list)
windex = WordIndex(ax2_aid, idx2_vec, idx2_ax, idx2_fx, flann)
return windex
def new_ibeis_windex(ibs, daid_list):
"""
IBEIS interface into word_index
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
"""
daids_hashid = ibs.get_annot_hashid_visual_uuid(daid_list, 'D')
flann_cfgstr = ibs.cfg.query_cfg.flann_cfg.get_cfgstr()
feat_cfgstr = ibs.cfg.query_cfg._feat_cfg.get_cfgstr()
indexer_cfgstr = daids_hashid + flann_cfgstr + feat_cfgstr
try:
# Grab the keypoints names and image ids before query time
flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params()
# Get annotation descriptors that will be searched
# FIXME; qreq_
vecs_list = ibs.get_annot_vecs(daid_list)
flann_cachedir = ibs.get_flann_cachedir()
windex = new_word_index(
daid_list, vecs_list, flann_params, flann_cachedir,
indexer_cfgstr, hash_rowids=False, use_params_hash=False)
return windex
except Exception as ex:
utool.printex(ex, True, msg_='cannot build inverted index', key_list=['ibs.get_infostr()'])
raise
def _check_input(aid_list, vecs_list):
assert len(aid_list) == len(vecs_list), 'invalid input'
assert len(aid_list) > 0, ('len(aid_list) == 0.'
'Cannot invert index without features!')
@six.add_metaclass(utool.ReloadingMetaclass)
class WordIndex(object):
"""
Abstract wrapper around flann
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
"""
def __init__(windex, ax2_aid, idx2_vec, idx2_ax, idx2_fx, flann):
windex.ax2_aid = ax2_aid # (A x 1) Mapping to original annot ids
windex.idx2_vec = idx2_vec # (M x D) Descriptors to index
windex.idx2_ax = idx2_ax # (M x 1) Index into the aid_list
windex.idx2_fx = idx2_fx # (M x 1) Index into the annot's features
windex.flann = flann # Approximate search structure
def knn(windex, qfx2_vec, K, checks=1028):
"""
Args:
qfx2_vec (ndarray): (N x D) array of N, D-dimensional query vectors
K (int): number of approximate nearest words to find
Returns:
tuple of (qfx2_idx, qfx2_dist)
qfx2_idx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector w.r.t qfx2_vec[n]
qfx2_dist (ndarray): (N x K) qfx2_dist[n][k] is the distance to the kth
approximate nearest data vector w.r.t. qfx2_vec[n]
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
>>> new_aid_list = [2, 3, 4]
>>> qfx2_vec = ibs.get_annot_vecs(1, config2_=qreq_.get_internal_query_config2())
>>> K = 2
>>> checks = 1028
>>> (qfx2_idx, qfx2_dist) = windex.knn(qfx2_vec, K, checks=checks)
"""
(qfx2_idx, qfx2_dist) = windex.flann.nn_index(qfx2_vec, K, checks=checks)
return (qfx2_idx, qfx2_dist)
def empty_words(K):
qfx2_idx = np.empty((0, K), dtype=np.int32)
qfx2_dist = np.empty((0, K), dtype=np.float64)
return (qfx2_idx, qfx2_dist)
def add_points(windex, new_aid_list, new_vecs_list):
"""
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
>>> new_aid_list = [2, 3, 4]
>>> qfx2_vec = ibs.get_annot_vecs(1, config2_=qreq_.get_internal_query_config2())
>>> new_vecs_list = ibs.get_annot_vecs(new_aid_list, config2_=qreq_.get_internal_data_config2())
>>> K = 2
>>> checks = 1028
>>> (qfx2_idx1, qfx2_dist1) = windex.knn(qfx2_vec, K, checks=checks)
>>> windex.add_points(new_aid_list, new_vecs_list)
>>> (qfx2_idx2, qfx2_dist2) = windex.knn(qfx2_vec, K, checks=checks)
>>> assert qfx2_idx2.max() > qfx2_idx1.max()
"""
nAnnots = windex.num_indexed_annots()
nNew = len(new_aid_list)
new_ax_list = np.arange(nAnnots, nAnnots + nNew)
new_idx2_vec, new_idx2_ax, new_idx2_fx = \
invert_index(new_vecs_list, new_ax_list)
# Stack inverted information
_ax2_aid = np.hstack((windex.ax2_aid, new_aid_list))
_idx2_ax = np.hstack((windex.idx2_ax, new_idx2_ax))
_idx2_fx = np.hstack((windex.idx2_fx, new_idx2_fx))
_idx2_vec = np.vstack((windex.idx2_vec, new_idx2_vec))
windex.ax2_aid = _ax2_aid
windex.idx2_ax = _idx2_ax
windex.idx2_vec = _idx2_vec
windex.idx2_fx = _idx2_fx
#windex.idx2_kpts = None
#windex.idx2_oris = None
# Add new points to flann structure
windex.flann.add_points(new_idx2_vec)
def num_indexed_vecs(windex):
return len(windex.idx2_vec)
def num_indexed_annots(windex):
return len(windex.ax2_aid)
def get_nn_axs(windex, qfx2_nnidx):
#return windex.idx2_ax[qfx2_nnidx]
return windex.idx2_ax.take(qfx2_nnidx)
def get_nn_aids(windex, qfx2_nnidx):
"""
Args:
qfx2_nnidx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector
Returns:
ndarray: qfx2_aid - (N x K) qfx2_fx[n][k] is the annotation id index
of the kth approximate nearest data vector
"""
#qfx2_ax = windex.idx2_ax[qfx2_nnidx]
#qfx2_aid = windex.ax2_aid[qfx2_ax]
qfx2_ax = windex.idx2_ax.take(qfx2_nnidx)
qfx2_aid = windex.ax2_aid.take(qfx2_ax)
return qfx2_aid
def get_nn_featxs(windex, qfx2_nnidx):
"""
Args:
qfx2_nnidx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector
Returns:
ndarray: qfx2_fx - (N x K) qfx2_fx[n][k] is the feature index (w.r.t
the source annotation) of the kth approximate nearest data vector
"""
#return windex.idx2_fx[qfx2_nnidx]
return windex.idx2_fx.take(qfx2_nnidx)
def invert_index(vecs_list, ax_list):
"""
Aggregates descriptors of input annotations and returns inverted information
"""
if utool.NOT_QUIET:
print('[hsnbrx] stacking descriptors from %d annotations'
% len(ax_list))
try:
idx2_vec, idx2_ax, idx2_fx = nntool.invertible_stack(vecs_list, ax_list)
assert idx2_vec.shape[0] == idx2_ax.shape[0]
assert idx2_vec.shape[0] == idx2_fx.shape[0]
except MemoryError as ex:
utool.printex(ex, 'cannot build inverted index', '[!memerror]')
raise
if utool.NOT_QUIET:
print('stacked nVecs={nVecs} from nAnnots={nAnnots}'.format(
nVecs=len(idx2_vec), nAnnots=len(ax_list)))
return idx2_vec, idx2_ax, idx2_fx
def vlad(qfx2_vec, qfx2_cvec):
qfx2_rvec = qfx2_cvec - qfx2_vec
aggvlad = qfx2_rvec.sum(axis=0)
aggvlad_norm = vtool.l2normalize(aggvlad)
return aggvlad_norm
#if __name__ == '__main__':
# #python -m doctest -v ibeis/algo/hots/word_index.py
# import doctest
# doctest.testmod()
| apache-2.0 | -6,176,846,014,270,310,000 | 35.7393 | 108 | 0.596484 | false |
faneshion/MatchZoo | matchzoo/engine/base_preprocessor.py | 1 | 4116 | """:class:`BasePreprocessor` define input and ouutput for processors."""
import abc
import functools
import typing
from pathlib import Path
import dill
import matchzoo as mz
def validate_context(func):
"""Validate context in the preprocessor."""
@functools.wraps(func)
def transform_wrapper(self, *args, **kwargs):
if not self.context:
raise ValueError('Please call `fit` before calling `transform`.')
return func(self, *args, **kwargs)
return transform_wrapper
class BasePreprocessor(metaclass=abc.ABCMeta):
"""
:class:`BasePreprocessor` to input handle data.
A preprocessor should be used in two steps. First, `fit`, then,
`transform`. `fit` collects information into `context`, which includes
everything the preprocessor needs to `transform` together with other
useful information for later use. `fit` will only change the
preprocessor's inner state but not the input data. In contrast,
`transform` returns a modified copy of the input data without changing
the preprocessor's inner state.
"""
DATA_FILENAME = 'preprocessor.dill'
def __init__(self):
"""Initialization."""
self._context = {}
@property
def context(self):
"""Return context."""
return self._context
@abc.abstractmethod
def fit(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'BasePreprocessor':
"""
Fit parameters on input data.
This method is an abstract base method, need to be
implemented in the child class.
This method is expected to return itself as a callable
object.
:param data_pack: :class:`Datapack` object to be fitted.
:param verbose: Verbosity.
"""
@abc.abstractmethod
def transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Transform input data to expected manner.
This method is an abstract base method, need to be
implemented in the child class.
:param data_pack: :class:`DataPack` object to be transformed.
:param verbose: Verbosity.
or list of text-left, text-right tuples.
"""
def fit_transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Call fit-transform.
:param data_pack: :class:`DataPack` object to be processed.
:param verbose: Verbosity.
"""
return self.fit(data_pack, verbose=verbose) \
.transform(data_pack, verbose=verbose)
def save(self, dirpath: typing.Union[str, Path]):
"""
Save the :class:`DSSMPreprocessor` object.
A saved :class:`DSSMPreprocessor` is represented as a directory with
the `context` object (fitted parameters on training data), it will
be saved by `pickle`.
:param dirpath: directory path of the saved :class:`DSSMPreprocessor`.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(self.DATA_FILENAME)
if data_file_path.exists():
raise FileExistsError(
f'{data_file_path} instance exist, fail to save.')
elif not dirpath.exists():
dirpath.mkdir()
dill.dump(self, open(data_file_path, mode='wb'))
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
mz.preprocessors.units.tokenize.Tokenize(),
mz.preprocessors.units.lowercase.Lowercase(),
mz.preprocessors.units.punc_removal.PuncRemoval(),
]
def load_preprocessor(dirpath: typing.Union[str, Path]) -> 'mz.DataPack':
"""
Load the fitted `context`. The reverse function of :meth:`save`.
:param dirpath: directory path of the saved model.
:return: a :class:`DSSMPreprocessor` instance.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(BasePreprocessor.DATA_FILENAME)
return dill.load(open(data_file_path, 'rb'))
| apache-2.0 | -928,447,038,971,826,800 | 28.191489 | 78 | 0.622449 | false |
bolkedebruin/airflow | airflow/operators/hive_stats_operator.py | 1 | 1212 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.apache.hive.operators.hive_stats`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.hive.operators.hive_stats import HiveStatsCollectionOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.hive.operators.hive_stats`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 | 2,665,886,250,090,306,000 | 40.793103 | 98 | 0.768977 | false |
domeger/SplunkTAforPuppetEnterprise | bin/SplunkTAforPuppetEnterprise_rh_puppet_enterprise_overview_enforcement.py | 1 | 1978 |
import splunktaforpuppetenterprise_declare
from splunktaucclib.rest_handler.endpoint import (
field,
validator,
RestModel,
DataInputModel,
)
from splunktaucclib.rest_handler import admin_external, util
from splunk_aoblib.rest_migration import ConfigMigrationHandler
util.remove_http_proxy_env_vars()
fields = [
field.RestField(
'interval',
required=True,
encrypted=False,
default=None,
validator=validator.Pattern(
regex=r"""^\-[1-9]\d*$|^\d*$""",
)
),
field.RestField(
'index',
required=True,
encrypted=False,
default='default',
validator=validator.String(
min_len=1,
max_len=80,
)
),
field.RestField(
'puppet_enterprise_server_',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'server_',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'token_',
required=True,
encrypted=True,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'port_',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'disabled',
required=False,
validator=None
)
]
model = RestModel(fields, name=None)
endpoint = DataInputModel(
'puppet_enterprise_overview_enforcement',
model,
)
if __name__ == '__main__':
admin_external.handle(
endpoint,
handler=ConfigMigrationHandler,
)
| apache-2.0 | 2,639,743,791,885,354,000 | 19.183673 | 63 | 0.5364 | false |
hugdiniz/anuarioDjango | yearbook/migrations/0005_auto_20141214_0017.py | 1 | 1444 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('yearbook', '0004_auto_20141212_1558'),
]
operations = [
migrations.RemoveField(
model_name='lotacao',
name='comentarios',
),
migrations.AddField(
model_name='pessoa',
name='historico',
field=models.ManyToManyField(related_name=b'lotacoes_anteriores', to='yearbook.Lotacao', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='unidade_organizacional',
name='localidade_sala',
field=models.ForeignKey(blank=True, to='yearbook.Sala', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='lotacao',
name='funcao',
field=models.ForeignKey(blank=True, to='yearbook.Funcao', null=True),
),
migrations.AlterField(
model_name='pessoa',
name='ferias_fim',
field=models.DateTimeField(null=True, verbose_name=b'fim das ferias', blank=True),
),
migrations.AlterField(
model_name='pessoa',
name='ferias_inicio',
field=models.DateTimeField(null=True, verbose_name=b'inicio das ferias', blank=True),
),
]
| gpl-2.0 | -4,371,260,729,932,835,000 | 31.088889 | 113 | 0.572022 | false |
MeteorKepler/RICGA | ricga/eval_tools/pycocoevalcap/tokenizer/ptbtokenizer.py | 1 | 2833 | #!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
import os
import subprocess
import tempfile
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname = os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| apache-2.0 | 4,010,289,009,320,253,400 | 41.283582 | 114 | 0.484998 | false |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/ARB/robustness_isolation.py | 1 | 1333 | '''OpenGL extension ARB.robustness_isolation
This module customises the behaviour of the
OpenGL.raw.GL.ARB.robustness_isolation to provide a more
Python-friendly API
Overview (from the spec)
GL_ARB_robustness and supporting window system extensions allow
creating an OpenGL context supporting graphics reset notification
behavior. GL_ARB_robustness_isolation provides stronger
guarantees about the possible side-effects of a graphics reset.
It is expected that there may be a performance cost associated
with isolating an application or share group from other contexts
on the GPU. For this reason, GL_ARB_robustness_isolation is
phrased as an opt-in mechanism, with a new context creation bit
defined in the window system bindings. It is expected that
implementations might only advertise the strings in this extension
if both the implementation supports the desired isolation
properties, and the context was created with the appropriate reset
isolation bit.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/robustness_isolation.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.robustness_isolation import *
### END AUTOGENERATED SECTION | bsd-2-clause | -7,856,028,671,895,230,000 | 40.6875 | 67 | 0.814704 | false |
guoxiaoyong/simple-useful | cxx_learn/cronx/spider/spider_daily_ftse100.py | 2 | 2199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2;
import re;
import string;
import sys;
from BeautifulSoup import BeautifulSoup
month_num = {
'Jan' : '01',
'Feb' : '02',
'Mar' : '03',
'Apr' : '04',
'May' : '05',
'Jun' : '06',
'Jul' : '07',
'Aug' : '08',
'Sep' : '09',
'Oct' : '10',
'Nov' : '11',
'Dec' : '12'
''
};
def process_date(raw_date):
global month_num;
raw_list=raw_date.split(' ');
month_str=month_num[raw_list[0]];
day_list=raw_list[1].split(',');
if len(day_list[0]) == 1:
day_str = '0' + day_list[0];
else:
day_str = day_list[0];
year_str = raw_list[2];
return year_str + '-' + month_str + '-' + day_str;
def process_num(raw_num):
raw_list=raw_num.split(',');
sz = len(raw_list);
str_num=raw_list[0];
for i in range(1,sz):
str_num = str_num+raw_list[i];
return str_num;
str_url = "http://finance.yahoo.com/q/hp?s=%5EFTSE+Historical+Prices";
req=urllib2.Request(str_url);
resp=urllib2.urlopen(req);
respHtml=resp.read();
HtmlEncoding = "UTF-8";
soup = BeautifulSoup(respHtml, fromEncoding=HtmlEncoding);
tag_top = soup.find('table', {"class":"yfnc_datamodoutline1"});
tag_body = tag_top.contents[0].contents[0].contents[0];
str_date = process_date(tag_body.contents[1].contents[0].contents[0]);
open_price = process_num(tag_body.contents[1].contents[1].contents[0]);
high_price = process_num(tag_body.contents[1].contents[2].contents[0]);
low_price = process_num(tag_body.contents[1].contents[3].contents[0]);
close_price = process_num(tag_body.contents[1].contents[4].contents[0]);
volume = process_num(tag_body.contents[1].contents[5].contents[0]);
if volume != "0":
daily_file = sys.argv[1];
history_file = sys.argv[2];
daily_fp = open(daily_file, 'w');
history_fp = open(history_file, 'a');
title_str = "Date,Open Price,High Price,Low Price,Close Price,Volume(GBP)\n";
daily_fp.write(title_str);
day_market_data = str_date+","+open_price+","+high_price+","+low_price+","+close_price+","+volume+'\n';
daily_fp.write(day_market_data);
history_fp.write(day_market_data);
daily_fp.close();
history_fp.close();
| cc0-1.0 | -2,231,879,005,828,900,900 | 29.123288 | 108 | 0.612551 | false |
YAmikep/django-xmlmapping | setup.py | 1 | 3687 | """
Based entirely on Django's own ``setup.py`` for now.
"""
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils not to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
xmlmapping_dir = 'xmlmapping'
for dirpath, dirnames, filenames in os.walk(xmlmapping_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
# Dynamically calculate the version based on django.VERSION.
version = __import__('xmlmapping').get_version()
setup(
name = "django-xmlmapping",
version = version,
url = 'https://github.com/YAmikep/django-xmlmapping',
author = 'Michael Palumbo',
author_email = '[email protected]',
description = 'Library to map XML data to a Django data model and persist the data in the data base.',
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
'Topic :: Internet'
],
)
| bsd-3-clause | 3,709,194,845,001,403,000 | 35.622449 | 106 | 0.652834 | false |
aewallin/openvoronoi | python_examples/line-segment/lineseg_3.py | 1 | 4439 | import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
def drawLine(myscreen, p1, p2):
myscreen.addActor(ovdvtk.Line(p1=(p1.x, p1.y, 0), p2=(p2.x, p2.y, 0), color=ovdvtk.yellow))
def writeFrame(w2if, lwr, n):
w2if.Modified()
current_dir = os.getcwd()
filename = current_dir + "/frames/vd500_zoomout" + ('%05d' % n) + ".png"
lwr.SetFileName(filename)
# lwr.Write()
def regularGridGenerators(far, Nmax):
# REGULAR GRID
rows = int(math.sqrt(Nmax))
print "rows= ", rows
gpos = [-0.7 * far, 1.4 * far / float(rows - 1)] # start, stride
plist = []
for n in range(rows):
for m in range(rows):
x = gpos[0] + gpos[1] * n
y = gpos[0] + gpos[1] * m
# rotation
# alfa = 0
# xt=x
# yt=y
# x = xt*math.cos(alfa)-yt*math.sin(alfa)
# y = xt*math.sin(alfa)+yt*math.cos(alfa)
plist.append(ovd.Point(x, y))
random.shuffle(plist)
return plist
def randomGenerators(far, Nmax):
pradius = (1.0 / math.sqrt(2)) * far
plist = []
for n in range(Nmax):
x = -pradius + 2 * pradius * random.random()
y = -pradius + 2 * pradius * random.random()
plist.append(ovd.Point(x, y))
return plist
def circleGenerators(far, Nmax):
# POINTS ON A CIRCLE
# """
# cpos=[50,50]
# npts = 100
dalfa = float(2 * math.pi) / float(Nmax - 1)
# dgamma= 10*2*math.pi/npts
# alfa=0
# ofs=10
plist = []
radius = 0.81234 * float(far)
for n in range(Nmax):
x = float(radius) * math.cos(float(n) * float(dalfa))
y = float(radius) * math.sin(float(n) * float(dalfa))
plist.append(ovd.Point(x, y))
# random.shuffle(plist)
return plist
if __name__ == "__main__":
# print ocl.revision()
myscreen = ovdvtk.VTKScreen(width=1024, height=720) # (width=1920, height=1080)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 4
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
# vod.clearance_disk=1
vod.vertexRadius = 0.005
vod.textScale = 0.02
Nmax = 20
plist = randomGenerators(far, Nmax)
# plist = regularGridGenerators(far, Nmax)
# plist = circleGenerators(far, Nmax)
# plist = randomGenerators(far, Nmax)
# plist = []
# plist.append( ovd.Point(0.0,0.1) )
# plist.append( ovd.Point(0,0.9) )
# plist.append( ovd.Point(-0.15, -0.15) )
# + regularGridGenerators(far, Nmax) + circleGenerators(far, Nmax)
# plist = [ovd.Point(0,0)]
t_before = time.time()
n = 0
id_list = []
for p in plist:
print n, " adding ", p
id_list.append(vd.addVertexSite(p))
n = n + 1
Nsegs = 0
# print "returned: ",vd.addLineSite(69,105,10)
"""
vd.addLineSite(83,35)
vd.addLineSite(63,153)
vd.addLineSite(48,20)
vd.addLineSite(74,143)
vd.addLineSite(125,173)
vd.addLineSite(165,91)
"""
# segs=[]
# for n in range(Nsegs*2):
# ids.append( id_list[n] )
# segs.append( [17,13] )
# segs.append( [21,34] )
# segs.append( [26,44] )
# id1 = id_list[0]
# id2 = id_list[1]
# id3 = id_list[2]
# id4 = id_list[3]
# for seg in segs:
# id1= seg[0]
# id2= seg[1]
# print "add segment ",id1, " to ", id2
# vd.addLineSite( id1, id2 , 20)
# vd.addLineSite( id3, id4 )
t_after = time.time()
calctime = t_after - t_before
if Nmax == 0:
Nmax = 1
print " VD done in ", calctime, " s, ", calctime / Nmax, " s per generator"
vod.setAll()
myscreen.render()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
| lgpl-2.1 | 1,067,751,821,632,725,200 | 24.80814 | 95 | 0.573327 | false |
ebrelsford/django-phillydata | phillydata/opa/migrations/0001_initial.py | 1 | 3517 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('owners', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccountOwner',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=256, verbose_name='name')),
('owner', models.ForeignKey(verbose_name='owner', blank=True, to='owners.Owner', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BillingAccount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('external_id', models.CharField(help_text='The OPA account number (also called "BRT number")', unique=True, max_length=50, verbose_name='external id')),
('property_address', models.CharField(help_text='The address of the property this account is associated with', max_length=300, null=True, verbose_name='property address', blank=True)),
('improvement_description', models.CharField(help_text='The improvement description according to OPA', max_length=300, null=True, verbose_name='improvement description', blank=True)),
('sale_date', models.DateField(help_text='The date of the last sale of this property according to the OPA', null=True, verbose_name='sale date', blank=True)),
('land_area', models.DecimalField(decimal_places=3, max_digits=20, blank=True, help_text='The land area of the property according to the OPA in square feet', null=True, verbose_name='land area (sq ft)')),
('improvement_area', models.IntegerField(help_text='The improvement area of the property according to the OPA', null=True, verbose_name='improvement area', blank=True)),
('assessment', models.DecimalField(decimal_places=2, max_digits=20, blank=True, help_text='The assessment of the property according to the OPA', null=True, verbose_name='assessment')),
('mailing_name', models.CharField(help_text='The name on the mailing address for this account.', max_length=300, null=True, verbose_name='mailing name', blank=True)),
('mailing_address', models.CharField(help_text='The mailing address for this account.', max_length=300, null=True, verbose_name='mailing address', blank=True)),
('mailing_postal_code', models.CharField(max_length=10, null=True, verbose_name='mailing postal code', blank=True)),
('mailing_city', models.CharField(max_length=50, null=True, verbose_name='mailing city', blank=True)),
('mailing_state_province', models.CharField(max_length=40, null=True, verbose_name='mailing state/province', blank=True)),
('mailing_country', models.CharField(default=b'USA', max_length=40, null=True, verbose_name='mailing country', blank=True)),
('last_updated', models.DateTimeField(auto_now=True, verbose_name='last updated')),
('account_owner', models.ForeignKey(verbose_name='account owner', blank=True, to='opa.AccountOwner', null=True)),
],
options={
},
bases=(models.Model,),
),
]
| bsd-3-clause | -6,894,110,242,630,896,000 | 70.77551 | 220 | 0.637475 | false |
ncullen93/pyBN | pyBN/inference/marginal_exact/exact_bp.py | 1 | 2676 |
__author__ = """N. Cullen <[email protected]>"""
from pyBN.classes.factor import Factor
from pyBN.classes.factorization import Factorization
from pyBN.utils.graph import *
from copy import deepcopy, copy
import numpy as np
import json
def exact_bp(bn, target=None, evidence=None, downward_pass=False):
"""
Perform Belief Propagation (Message Passing) over a Clique Tree. This
is sometimes referred to as the "Junction Tree Algorithm" or
the "Hugin Algorithm".
It involves an Upward Pass (see [1] pg. 353) along with
Downward Pass (Calibration) ([1] pg. 357) if the target involves
multiple random variables - i.e. is a list
Steps Involved:
1. Build a Clique Tree from a Bayesian Network
a. Moralize the BN
b. Triangulate the graph
c. Find maximal cliques and collapse into nodes
d. Create complete graph and make edge weights = sepset cardinality
e. Using Max Spanning Tree to create a tree of cliques
2. Assign each factor to only one clique
3. Compute the initial potentials of each clique
- multiply all of the clique's factors together
4. Perform belief propagation based on message passing protocol.
Arguments
---------
*bn* : a BayesNet object
Returns
-------
Notes
-----
"""
# 1: Moralize the graph
# 2: Triangluate
# 3: Build a clique tree using max spanning
# 4: Propagation of probabilities using message passing
# creates clique tree and assigns factors, thus satisfying steps 1-3
ctree = CliqueTree(bn) # might not be initialized?
#G = ctree.G
#cliques = copy.copy(ctree.V)
# select a clique as root where target is in scope of root
root = ctree.V[0]
if target is not None:
for v in ctree.V:
if target in ctree[v].scope:
root = v
break
clique_ordering = ctree.dfs_postorder(root=root)
# UPWARD PASS
# send messages up the tree from the leaves to the single root
for i in clique_ordering:
#clique = ctree[i]
for j in ctree.parents(i):
ctree[i] >> ctree[j]
#clique.send_message(ctree[j])
# if root node, collect its beliefs
#if len(ctree.parents(i)) == 0:
#ctree[root].collect_beliefs()
ctree[root].collect_beliefs()
marginal_target = ctree[root].marginalize_over(target)
# DOWNWARD PASS
if downward_pass == True:
# send messages down the tree from the root to the leaves
# (not needed unless *target* involves more than one variable)
new_ordering = list(reversed(clique_ordering))
for j in new_ordering:
for i in ctree.children(j):
ctree[j] >> ctree[i]
# if leaf node, collect its beliefs
if len(ctree.children(j)) == 0:
ctree[j].collect_beliefs()
return marginal_target
# beliefs hold the answers | mit | -26,576,409,045,976,944 | 26.885417 | 70 | 0.705531 | false |
clody23/MToolBox | MToolBox/mt-classifier.py | 1 | 13234 | #!/usr/bin/env python
import getopt, sys, re, os, glob, csv
from classifier import tree, NGclassify, consts, datatypes, parse_mhcs
from bioinf.seqs import SeqList
import io_modules.csv
import io_modules.old_table
import io_modules.serialize
import os.path
# folder where to find data for haplogroup classification and functional annotation
data_file = os.path.dirname(sys.argv[0])
def usage_old():
print """\nAssigns haplogroup to contigs and performs functional annotation
Options:
-i Contig file [mtDNAassembly-Contigs.fasta]
-g GMAP executable PATH [/usr/local/bin/gmap]
-D GMAP mt sequences database location [/usr/local/share/gmapdb]
-m GMAP mt sequences database [mt_mhcss]
-t GMAP threads [2]
-b basename for output files
"""
def usage():
print """\nAssigns haplogroup to contigs and performs functional annotation
Options:
-i Contig file [mtDNAassembly-Contigs.fasta]
-m MUSCLE executable PATH [/usr/local/bin/muscle]
-b basename for output files
-s file with most reliable haplogroup prediction
"""
def pickle_csv(csvfile, pickle_fname=None):
tree_file = csv.reader(open(csvfile, 'rb'))
if pickle_fname is None:
pickle_fname = csvfile + '.pickle'
aplo_list = io_modules.csv.parse_csv(tree_file)
htree = tree.HaplogroupTree(aplo_list=aplo_list)
pickle_file = open(pickle_fname, 'wb')
pickle_file.write(htree.serialize())
def write_old_table(pickle_fname, out_fname):
htree = tree.HaplogroupTree(pickle_data=open(pickle_fname, 'rb').read())
fh = csv.writer(open(out_fname, 'wb'))
for haplo_name in htree:
io_modules.old_table.write_haplogroup(fh, '', htree[haplo_name])
def parse_gmapf9_line(line):
parsed = line.split('\t')
last_field = re.findall(r"[\w']+", parsed[2])
seq_nuc = parsed[1].partition(' ')[2]
seq_index = parsed[1].partition(' ')[0]
ref_pos = int(last_field[1])
ref_nuc = parsed[2][-1]
return ref_pos, ref_nuc, seq_nuc, seq_index
def parse_gmapf9_file(inhandle):
contigs_mappings = [[]]
h = inhandle.readlines()
c = 0
mutations = []
while c < len(h):
# end coordinate of last contig
if c == len(h)-1:
contigs_mappings[-1].append(parse_gmapf9_line(h[c])[0])
if h[c][0] != '>':
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
# insertion
if ref_nuc == ' ' and seq_nuc != ' ':
# gmap assigns the position of the next nucleotide to the insertion
pos_ins = ref_pos - 1
ins = [seq_nuc]
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
while c < len(h) and (ref_nuc == ' ' and seq_nuc != ' '):
ins.append(seq_nuc)
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
mut = datatypes.Insertion("%d.%s" % (pos_ins, ''.join(ins)))
mutations.append(mut)
#print "%d.%s" % (pos_ins, ''.join(ins))
# deletion
elif ref_nuc != ' ' and seq_nuc == ' ':
pos_del = ref_pos
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
while c < len(h) and (ref_nuc != ' ' and seq_nuc == ' '):
c += 1
ref_pos, ref_nuc, seq_nuc, seq_index = parse_gmapf9_line(h[c])
if pos_del == ref_pos-1:
print "%dd" % (pos_del)
mut = datatypes.Deletion("%dd" % pos_del)
mutations.append(mut)
else:
print "%d-%dd" % (pos_del, ref_pos-1)
mut = datatypes.Deletion("%d-%dd" % (pos_del, ref_pos-1))
mutations.append(mut)
# mismatch
elif ref_nuc != seq_nuc:
if seq_nuc != 'N':
# Transition
if (ref_nuc in consts.PUR and seq_nuc in consts.PUR) or (ref_nuc in consts.PYR and seq_nuc in consts.PYR):
print "%d%s" % (ref_pos, seq_nuc)
mut = datatypes.Transition(ref_pos)
mutations.append(mut)
# Transversion
if (ref_nuc in consts.PUR and seq_nuc in consts.PYR) or (ref_nuc in consts.PYR and seq_nuc in consts.PUR):
mut = datatypes.Transversion("%d%s" % (ref_pos, seq_nuc))
mutations.append(mut)
c += 1
else:
c += 1
else:
# first contig
if len(contigs_mappings) == 1 and len(contigs_mappings[-1]) == 0:
contigs_mappings[-1].append(parse_gmapf9_line(h[c+1])[0])
# all the others
else:
contigs_mappings[-1].append(parse_gmapf9_line(h[c-1])[0])
contigs_mappings.append([parse_gmapf9_line(h[c+1])[0]])
c += 1
# don't know if contig coordinate sorting is needed but I'll do anyway
contigs_mappings.sort()
return mutations, contigs_mappings
def merge_tables(f, g, h):
fgh = f + g + h
mergedlist = []
for jj in fgh:
if jj not in mergedlist:
mergedlist.append(jj)
o = []
o.append(["", "RSRS", "MHCS", "rCRS"])
y = "yes"
n = ""
for i in mergedlist:
if i in f and i in g and i in h:
o.append([i.pprint(),y,y,y])
elif i in f and i in g:
o.append([i.pprint(),y,y,n])
elif i in f and i in h:
o.append([i.pprint(),y,n,y])
elif i in g and i in h:
o.append([i.pprint(),n,y,y])
elif i in f:
o.append([i.pprint(),y,n,n])
elif i in g:
o.append([i.pprint(),n,y,n])
elif i in h:
o.append([i.pprint(),n,n,y])
return o
def align_sequence(muscle_exe, sequence, rif=None, ):
"""sequence is a datatypes.Sequence, rif"""
if rif is None:
rif = datatypes.Sequence('RSRS', consts.RCRS)
seq_diff = NGclassify.SequenceDiff()
#print "Aligning sequence %s" % sequence.name
seq_diff.gen_diff(muscle_exe, rif, datatypes.Sequence(sequence.name, str(sequence)))
#print "-"*30
return seq_diff
def h_analysis(htrees, seq_diff, regions, mhcs_dict):
a = NGclassify.Classify()
#print "Classification of sequence %s" % seq_diff.obj.name
for htree, name in htrees:
print "Classification according to tree:", name
a.classify_by_tree(htree, seq_diff, regions)
#print "start is ", seq_diff.start
#print "end is ", seq_diff.end
#print "haplo_stats: ", a.haplo_stats
print "genome_state is ", a.get_genome_state()
(haplo_stats_sorted, haplo_best) = a.prediction_sorting()
print haplo_best
#print "haplo_stats_sorted is:\n", haplo_stats_sorted
print "="*20
#print "haplo_best is: ", haplo_best
#print "finding MHCS for sequence %s" % seq_diff.obj.name
mhcss = a.get_mhcss(mhcs_dict)
#print "MHCS ID for sequence %s is %s" % (seq_diff.obj.name, ','.join(list(mhcss)))
# PROVA PRINT
# print "stat_list is:"
# print type(a.__dict__['stat_list'])
#print a.__dict__
print '-'*30
#print a.seq_diff.obj.name
#print a.haplo_stats
#pdb.set_trace()
return a
def load_sequences(fname):
a = SeqList()
a.load_file(fname)
print "Loaded %d contig sequences" % len(a)
return a
def write_output(class_obj, seq_diff, seq_diff_mhcs, seq_diff_rcrs, merged_tables, outfile):
print "Writing results for sequence %s" % outfile
class_obj.pprint(open(outfile + '.csv', 'w'))
class_obj.pprint_sorted(open(outfile + '.sorted.csv', 'w'))
#seq_diff.print_alg(open(outfile + '_alg.txt','w'))
#seq_diff.pprint(open(outfile + '_diff.txt','w'))
#seq_diff_mhcs.pprint(open(outfile + '_mhcs_diff.txt','w'))
#seq_diff_mhcs.print_alg(open(outfile + '_mhcs_alg.txt','w'))
#seq_diff_rcrs.pprint(open(outfile + '_rcrs_diff.txt','w'))
#seq_diff_rcrs.print_alg(open(outfile + '_rcrs_alg.txt','w'))
merged_tables_file = open(outfile + '_merged_diff.csv', 'w')
for row in merged_tables:
merged_tables_file.write(','.join(row)+'\n')
def main_mt_hpred():
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:m:b:s:")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit()
#print opts, args
contig_file = 'mtDNAassembly-contigs.fasta'
muscle_exe='/usr/local/bin/muscle'
basename='mtDNAassembly-contigs'
best_results_file = 'mt_classification_best_results.csv'
#print opts
for o,a in opts:
#print "option", o, "argument", a
if o == "-h":
usage()
sys.exit()
elif o == "-i": contig_file = a
elif o == "-m": muscle_exe = a
elif o == "-b": basename = a
elif o == "-s": best_results_file = a
else:
assert False, "Unhandled option."
print "Your best results file is ", best_results_file
# sample name
f = os.path.abspath(contig_file)
#sample_name = f.split('/')[-2].split('_')[-1]
sample_name = contig_file.split('-')[0]
# haplogroup tree parsing
htrees = [(tree.HaplogroupTree(pickle_data=open(data_file + '/data/phylotree_r16.pickle', 'rb').read()), data_file + '/data/phylotree_r16.pickle')]
# mhcs parsing
mhcs_dict = parse_mhcs.parse2mhcs_dict(data_file + '/data/mhcs.tab')
print "\nLoading contig sequences from file %s" % contig_file
contig_array = load_sequences(contig_file)
contig_array_seqdiff = [] # lista di liste
contig_total_seqdiff = [] # lista di varianti
contig_array_mappings = []
print "\nAligning Contigs to mtDNA reference genome...\n"
# update each contig's SeqDiff
for x,contig in enumerate(contig_array):
if x == 0:
contig_seq_diff = align_sequence(muscle_exe, contig)
contig_seq_diff.find_segment() # avoid having long gaps at 5' and 3' (not actual gaps but due to the alignment)
contig_seq_diff.regions.append([contig_seq_diff.start, contig_seq_diff.end])
else:
incoming_seqdiff = align_sequence(muscle_exe, contig)
incoming_seqdiff.find_segment()
contig_seq_diff.diff_list.extend(incoming_seqdiff.diff_list)
contig_seq_diff.regions.append([incoming_seqdiff.start, incoming_seqdiff.end])
print "\nSequence haplogroup assignment\n"
seq_classify = h_analysis(htrees, contig_seq_diff, contig_seq_diff.regions, mhcs_dict)
seq_classify.sample_name = sample_name
#print "\nSequence functional annotation\n"
print "Contig alignment to MHCS and rCRS"
m = list(seq_classify.mhcss)[0]
print "Aligning contigs to MHCS SeqDiff object"
its_mhcs = datatypes.Sequence(m, mhcs_dict[m])
#contig_mhcs_total_seqdiff = []
for x, contig in enumerate(contig_array):
if x == 0:
contig_mhcs_seq_diff = align_sequence(muscle_exe, contig, its_mhcs)
contig_mhcs_seq_diff.find_segment()
contig_mhcs_seq_diff.regions.append([contig_seq_diff.start, contig_seq_diff.end])
else:
incoming_mhcs_seqdiff = align_sequence(muscle_exe, contig, its_mhcs)
incoming_mhcs_seqdiff.find_segment()
contig_mhcs_seq_diff.diff_list.extend(incoming_mhcs_seqdiff.diff_list)
contig_mhcs_seq_diff.regions.append([incoming_mhcs_seqdiff.start, incoming_mhcs_seqdiff.end])
print "rCRS SeqDiff object"
rcrs = datatypes.Sequence('rCRS', consts.rcrs)
#contig_rcrs_total_seqdiff = []
for x, contig in enumerate(contig_array):
if x == 0:
contig_rcrs_seq_diff = align_sequence(muscle_exe, contig, rcrs)
contig_rcrs_seq_diff.find_segment()
contig_rcrs_seq_diff.regions.append([contig_seq_diff.start, contig_seq_diff.end])
else:
incoming_rcrs_seqdiff = align_sequence(muscle_exe, contig, rcrs)
incoming_rcrs_seqdiff.find_segment()
contig_rcrs_seq_diff.diff_list.extend(incoming_rcrs_seqdiff.diff_list)
contig_rcrs_seq_diff.regions.append([incoming_rcrs_seqdiff.start, incoming_rcrs_seqdiff.end])
# try gathering diff from reference sequences
#print "type(seq_diff) is", type(seq_diff.diff_list)
print "Merging seq_diffs..."
mergedtables = merge_tables(contig_seq_diff.diff_list, contig_mhcs_seq_diff.diff_list, contig_rcrs_seq_diff.diff_list)
#print mergedtables
# OUTPUTS
write_output(seq_classify, contig_seq_diff.diff_list, contig_mhcs_seq_diff.diff_list, contig_rcrs_seq_diff.diff_list, mergedtables, basename)
#open(os.path.join(folder,'mt_classification_best_results'), 'a').write(','.join([seq_diff.obj.name, ';'.join([i[0] for i in class_obj.haplo_best.items()])])+'\n')
#open(os.path.join('../', best_results_file), 'a').write(','.join([seq_classify.sample_name, ';'.join([i[0] for i in seq_classify.haplo_best.items()])])+'\n')
open(os.path.join('../', best_results_file), 'a').write(','.join([basename, ';'.join([i[0] for i in seq_classify.haplo_best.items()])])+'\n')
#align_cmd = '%s -D %s -d %s -c chrRSRS -f 9 -B 5 -t 2 %s > %s.coords' % (gmapexe, gmapdb, mtdb, contig_file, basename)
#print align_cmd
# os.system(align_cmd) DON'T YOU FORGET ABOUT ME!!!
# Parsing gmap output
#mutations, contigs_mappings = parse_gmapf9_file(open("%s.coords" % basename, 'r'))
#print "mutations, ", mutations
#print "contig mappings: "
#for i in contigs_mappings:
# print i
if __name__ == "__main__":
main_mt_hpred()
# path = os.getcwd()
# for infile in glob.glob(os.path.join(path, 'OUT_*')):
# main_mt_hpred()
# print "\nHERE COMES THE FUNCTIONAL ANNOTATION...\n"
# path = os.getcwd()
# for infile in glob.glob(os.path.join(path, folder, '*', '*_merged_diff.csv')):
# (PATH, FILENAME) = os.path.split(infile)
# print infile
# diff_file = infile
# file_file = os.path.join(data_file, 'patho_table.txt')
# site_file = os.path.join(data_file, 'sitevar_modified.txt')
# bestres_file = os.path.join(path, 'mt_classification_best_results')
# haptab_file = os.path.join(data_file, 'haplogroups.txt')
# variants_functional_annotation.main_functional_analysis(diff_file, file_file, site_file, bestres_file, haptab_file, PATH, FILENAME)
| gpl-3.0 | -3,761,935,509,628,710,000 | 37.58309 | 164 | 0.65241 | false |
ianrust/coinbase_autotrader | automated_bittrader.py | 1 | 4459 | import json,urllib2,csv,time,smtplib,string,os
os.chdir('/home/ian/Documents')
# Buy and sell urls
sell_url = "https://coinbase.com/api/v1/sells"
buy_url = "https://coinbase.com/api/v1/buys"
sell_price_url = "https://coinbase.com/api/v1/prices/sell"
buy_price_url = "https://coinbase.com/api/v1/prices/buy"
headers = {'content-type': 'application/json'}
price_payload={'qty':1.0}
# gmail login info
gmailUser='[email protected]'
gmailPassword='' #password omitting *facepalm*
#function for interacting with coinbase
def req_and_ret(url,req_input,header,url_type='GET'):
if url_type=='POST':
url = urllib2.Request(url, json.dumps(req_input), header)
f = urllib2.urlopen(url)
json_response = f.read()
list_response = json.loads(json_response)
f.close()
return list_response,json_response
#Reading in current state
with open('trader_state.csv','r') as trader_state:
trader_state_csv=csv.reader(trader_state,delimiter=',')
for line in trader_state_csv:
if line[0]=='api_key':
vars()[line[0]]=line[1]
else:
vars()[line[0]]=float(line[1])
trader_state.close()
#Get Current Bitcoin Prices for buy/sell
buy_price_response,throwaway = req_and_ret(buy_price_url,price_payload,headers)
buy_price=buy_price_response['subtotal']['amount']
sell_price_response,throwaway = req_and_ret(sell_price_url,price_payload,headers)
sell_price=sell_price_response['subtotal']['amount']
# Assembling Message
transaction_payload = {'api_key':api_key,'qty':amount_to_trade}
# Decide to make transaction
transaction_type=''
make_transaction=False
current_unix_time=time.time()
if current_unix_time-time_between_transactions>last_transaction_time:
#decide on type of transaction
if coins==amount_to_trade and sell_price>=(1.0+percent_swing)*last_price:
transaction_type='sell'
make_transaction=True
elif coins==0 and buy_price<=(1.0-percent_swing)*last_price:
transaction_type='buy'
make_transaction=True
#transact
success=False
transaction_response={'success':'False'}
trans_resp_string=''
last_price_new=last_price
coins_new=coins
if make_transaction:
if transaction_type=='sell':
transaction_response,trans_resp_string=req_and_ret(sell_url,transaction_payload,headers,'POST')
coins_new=0
last_price_new=sell_price
else:
transaction_response,trans_resp_string=req_and_ret(buy_url,transaction_payload,headers,'POST')
coins_new=amount_to_trade
last_price_new=buy_price
success=transaction_response['success']
errors=''
if not success:
errors=transaction_response['errors']
# if there are problems, send an email to Ian Rust. Likewise, if there is a succesful transaction, tell Ian Rust
subject=""
to_addr="[email protected]"
from_addr="[email protected]"
text=''
mailServer = smtplib.SMTP('smtp.gmail.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
if make_transaction:
if not success:
subject="Got Problems With Your Bitcoin Trader"
text="Hello Sir \n\n I just had trouble making an api based "+transaction_type+" bitcoin transaction on coinbase. Coinbase gave the following error: \r\n "+str(errors)+"\r\n You have 1 day from the time these email was sent to fix the problem. \n\n Yours Truly, \n\n RPI BitTrader \r\n PS This is the whole response: \r\n" +str(trans_resp_string)
else:
subject="Successful "+transaction_type+" On the Part of Your Bitcoin Trader"
text="Hello Sir \n\n I just made a "+transaction_type+" order successfully on coinbase. \r\n The price was "+str(last_price)+" for "+str(amount_to_trade)+"BTC \n\n Yours Truly, \n\n RPI BitTrader"
body=string.join(("From: %s" % from_addr,"To: %s" % to_addr,"Subject: %s" % subject ,"",text), "\r\n")
mailServer.sendmail(from_addr, [to_addr], body)
mailServer.close()
# record the state
with open('trader_state.csv','w') as trader_state:
last_transaction_time_towrite=last_transaction_time
last_price_towrite=last_price
coins_towrite=coins
if make_transaction and success:
last_transaction_time_towrite=current_unix_time
last_price_towrite=last_price_new
coins_towrite=coins_new
trader_state.write('last_price,'+str(last_price_towrite)+'\nlast_transaction_time,'+str(int(last_transaction_time_towrite))+'\ncoins,'+str(coins_towrite)+'\namount_to_trade,'+str(amount_to_trade)+'\npercent_swing,'+str(percent_swing)+'\ntime_between_transactions,'+str(time_between_transactions)+'\napi_key,'+str(api_key)+'\nlast_check_time,'+str(int(current_unix_time)))
| mit | 3,535,470,929,085,376,500 | 35.85124 | 372 | 0.742095 | false |
EmanueleCannizzaro/scons | src/engine/SCons/Tool/JavaCommonTests.py | 1 | 14898 | #
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/JavaCommonTests.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import sys
import unittest
import TestUnit
import SCons.Scanner.IDL
import SCons.Tool.JavaCommon
# Adding trace=trace to any of the parse_jave() calls below will cause
# the parser to spit out trace messages of the tokens it sees and the
# attendant transitions.
def trace(token, newstate):
from SCons.Debug import Trace
statename = newstate.__class__.__name__
Trace('token = %s, state = %s\n' % (repr(token), statename))
class parse_javaTestCase(unittest.TestCase):
def test_bare_bones(self):
"""Test a bare-bones class"""
input = """\
package com.sub.bar;
public class Foo
{
public static void main(String[] args)
{
/* This tests a former bug where strings would eat later code. */
String hello1 = new String("Hello, world!");
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir == os.path.join('com', 'sub', 'bar'), pkg_dir
assert classes == ['Foo'], classes
def test_dollar_sign(self):
"""Test class names with $ in them"""
input = """\
public class BadDep {
public void new$rand () {}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['BadDep'], classes
def test_inner_classes(self):
"""Test parsing various forms of inner classes"""
input = """\
class Empty {
}
interface Listener {
public void execute();
}
public
class
Test implements Listener {
class Inner {
void go() {
use(new Listener() {
public void execute() {
System.out.println("In Inner");
}
});
}
String s1 = "class A";
String s2 = "new Listener() { }";
/* class B */
/* new Listener() { } */
}
class Inner2 {
Inner2() { Listener l = new Listener(); }
}
/* Make sure this class doesn't get interpreted as an inner class of the previous one, when "new" is used in the previous class. */
class Inner3 {
}
public static void main(String[] args) {
new Test().run();
}
void run() {
use(new Listener() {
public void execute() {
use(new Listener( ) {
public void execute() {
System.out.println("Inside execute()");
}
});
}
});
new Inner().go();
}
void use(Listener l) {
l.execute();
}
}
class Private {
void run() {
new Listener() {
public void execute() {
}
};
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert pkg_dir is None, pkg_dir
expect = [
'Empty',
'Listener',
'Test$1',
'Test$Inner',
'Test$Inner2',
'Test$Inner3',
'Test$2',
'Test$3',
'Test',
'Private$1',
'Private',
]
assert classes == expect, classes
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert pkg_dir is None, pkg_dir
expect = [
'Empty',
'Listener',
'Test$Inner$1',
'Test$Inner',
'Test$Inner2',
'Test$Inner3',
'Test$1',
'Test$1$1',
'Test',
'Private$1',
'Private',
]
assert classes == expect, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '5')
assert pkg_dir is None, pkg_dir
expect = [
'Empty',
'Listener',
'Test$Inner$1',
'Test$Inner',
'Test$Inner2',
'Test$Inner3',
'Test$1',
'Test$1$1',
'Test',
'Private$1',
'Private',
]
assert classes == expect, (expect, classes)
def test_comments(self):
"""Test a class with comments"""
input = """\
package com.sub.foo;
import java.rmi.Naming;
import java.rmi.RemoteException;
import java.rmi.RMISecurityManager;
import java.rmi.server.UnicastRemoteObject;
public class Example1 extends UnicastRemoteObject implements Hello {
public Example1() throws RemoteException {
super();
}
public String sayHello() {
return "Hello World!";
}
public static void main(String args[]) {
if (System.getSecurityManager() == null) {
System.setSecurityManager(new RMISecurityManager());
}
// a comment
try {
Example1 obj = new Example1();
Naming.rebind("//myhost/HelloServer", obj);
System.out.println("HelloServer bound in registry");
} catch (Exception e) {
System.out.println("Example1 err: " + e.getMessage());
e.printStackTrace();
}
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir == os.path.join('com', 'sub', 'foo'), pkg_dir
assert classes == ['Example1'], classes
def test_arrays(self):
"""Test arrays of class instances"""
input = """\
public class Test {
MyClass abc = new MyClass();
MyClass xyz = new MyClass();
MyClass _array[] = new MyClass[] {
abc,
xyz
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['Test'], classes
def test_backslash(self):
"""Test backslash handling"""
input = """\
public class MyTabs
{
private class MyInternal
{
}
private final static String PATH = "images\\\\";
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['MyTabs$MyInternal', 'MyTabs'], classes
def test_enum(self):
"""Test the Java 1.5 enum keyword"""
input = """\
package p;
public enum a {}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir == 'p', pkg_dir
assert classes == ['a'], classes
def test_anon_classes(self):
"""Test anonymous classes"""
input = """\
public abstract class TestClass
{
public void completed()
{
new Thread()
{
}.start();
new Thread()
{
}.start();
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['TestClass$1', 'TestClass$2', 'TestClass'], classes
def test_closing_bracket(self):
"""Test finding a closing bracket instead of an anonymous class"""
input = """\
class TestSCons {
public static void main(String[] args) {
Foo[] fooArray = new Foo[] { new Foo() };
}
}
class Foo { }
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['TestSCons', 'Foo'], classes
def test_dot_class_attributes(self):
"""Test handling ".class" attributes"""
input = """\
public class Test extends Object
{
static {
Class c = Object[].class;
Object[] s = new Object[] {};
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert classes == ['Test'], classes
input = """\
public class A {
public class B {
public void F(Object[] o) {
F(new Object[] {Object[].class});
}
public void G(Object[] o) {
F(new Object[] {});
}
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input)
assert pkg_dir is None, pkg_dir
assert classes == ['A$B', 'A'], classes
def test_anonymous_classes_with_parentheses(self):
"""Test finding anonymous classes marked by parentheses"""
input = """\
import java.io.File;
public class Foo {
public static void main(String[] args) {
File f = new File(
new File("a") {
public String toString() {
return "b";
}
} to String()
) {
public String toString() {
return "c";
}
};
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert classes == ['Foo$1', 'Foo$2', 'Foo'], classes
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert classes == ['Foo$1', 'Foo$1$1', 'Foo'], classes
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '6')
assert classes == ['Foo$1', 'Foo$1$1', 'Foo'], classes
def test_nested_anonymous_inner_classes(self):
"""Test finding nested anonymous inner classes"""
input = """\
// import java.util.*;
public class NestedExample
{
public NestedExample()
{
Thread t = new Thread() {
public void start()
{
Thread t = new Thread() {
public void start()
{
try {Thread.sleep(200);}
catch (Exception e) {}
}
};
while (true)
{
try {Thread.sleep(200);}
catch (Exception e) {}
}
}
};
}
public static void main(String argv[])
{
NestedExample e = new NestedExample();
}
}
"""
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
expect = [ 'NestedExample$1', 'NestedExample$2', 'NestedExample' ]
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
expect = [ 'NestedExample$1', 'NestedExample$1$1', 'NestedExample' ]
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '6')
expect = [ 'NestedExample$1', 'NestedExample$1$1', 'NestedExample' ]
assert expect == classes, (expect, classes)
def test_private_inner_class_instantiation(self):
"""Test anonymous inner class generated by private instantiation"""
input = """\
class test
{
test()
{
super();
new inner();
}
static class inner
{
private inner() {}
}
}
"""
# This is what we *should* generate, apparently due to the
# private instantiation of the inner class, but don't today.
#expect = [ 'test$1', 'test$inner', 'test' ]
# What our parser currently generates, which doesn't match
# what the Java compiler actually generates.
expect = [ 'test$inner', 'test' ]
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert expect == classes, (expect, classes)
def test_floating_point_numbers(self):
"""Test floating-point numbers in the input stream"""
input = """
// Broken.java
class Broken
{
/**
* Detected.
*/
Object anonymousInnerOK = new Runnable() { public void run () {} };
/**
* Detected.
*/
class InnerOK { InnerOK () { } }
{
System.out.println("a number: " + 1000.0 + "");
}
/**
* Not detected.
*/
Object anonymousInnerBAD = new Runnable() { public void run () {} };
/**
* Not detected.
*/
class InnerBAD { InnerBAD () { } }
}
"""
expect = ['Broken$1', 'Broken$InnerOK', 'Broken$2', 'Broken$InnerBAD', 'Broken']
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.4')
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.5')
assert expect == classes, (expect, classes)
def test_genercis(self):
"""Test that generics don't interfere with detecting anonymous classes"""
input = """\
import java.util.Date;
import java.util.Comparator;
public class Foo
{
public void foo()
{
Comparator<Date> comp = new Comparator<Date>()
{
static final long serialVersionUID = 1L;
public int compare(Date lhs, Date rhs)
{
return 0;
}
};
}
}
"""
expect = [ 'Foo$1', 'Foo' ]
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '1.6')
assert expect == classes, (expect, classes)
pkg_dir, classes = SCons.Tool.JavaCommon.parse_java(input, '6')
assert expect == classes, (expect, classes)
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ parse_javaTestCase ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 2,604,112,111,457,939,000 | 24.597938 | 133 | 0.541616 | false |
drcoms/jlu-drcom-client | jlu-drcom-py3/newclinet-py3.py | 1 | 12355 | #!/usr/bin/env python
# coding: utf-8
# license: AGPL-V3
import re
import socket
import struct
import time
from hashlib import md5
import sys
import os
import random
import platform
# CONFIG
server = '10.100.61.3'
username = b'XXXXX' # 用户名
password = b'XXXXX' # 密码
host_ip = '100.100.100.100' # ip地址
mac = 0x112288776655 # mac地址
host_name = b'YOURPCNAME' # 计算机名
host_os = b'Windows 10' # 操作系统
CONTROLCHECKSTATUS = b'\x20'
ADAPTERNUM = b'\x03'
IPDOG = b'\x01'
PRIMARY_DNS = '10.10.10.10'
dhcp_server = '0.0.0.0'
AUTH_VERSION = b'\x68\x00'
KEEP_ALIVE_VERSION = b'\xdc\x02'
nic_name = '' # Indicate your nic, e.g. 'eth0.2'.nic_name
bind_ip = '0.0.0.0'
# CONFIG_END
keep_alive_times = 0
class ChallengeException (Exception):
def __init__(self):
pass
class LoginException (Exception):
def __init__(self):
pass
def bind_nic():
try:
import fcntl
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
return get_ip_address(nic_name)
except ImportError as e:
print('Indicate nic feature need to be run under Unix based system.')
return '0.0.0.0'
except IOError as e:
print(nic_name + ' is unacceptable !')
return '0.0.0.0'
finally:
return '0.0.0.0'
if nic_name != '':
bind_ip = bind_nic()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((bind_ip, 61440))
s.settimeout(3)
SALT = ''
IS_TEST = True
# specified fields based on version
CONF = "/etc/drcom.conf"
UNLIMITED_RETRY = True
EXCEPTION = False
DEBUG = False # log saves to file
LOG_PATH = '/var/log/drcom_client.log'
if IS_TEST:
DEBUG = True
LOG_PATH = 'drcom_client.log'
def log(*args, **kwargs):
print(*args, **kwargs)
if DEBUG and platform.uname().system != 'Windows':
with open(LOG_PATH,'a') as f:
f.write(s + '\n')
def challenge(svr, ran):
while True:
t = struct.pack("<H", int(ran) % (0xFFFF))
s.sendto(b"\x01\x02" + t + b"\x09" + b"\x00"*15, (svr, 61440))
try:
data, address = s.recvfrom(1024)
log('[challenge] recv', data.hex())
except:
log('[challenge] timeout, retrying...')
continue
if address == (svr, 61440):
break
else:
log(f"Wrong address: {address}")
exit()
log('[DEBUG] challenge:\n' + data.hex())
if data[0] != 2:
raise ChallengeException
log('[challenge] challenge packet sent.')
return data[4:8]
def md5sum(s):
m = md5()
m.update(s)
return m.digest()
def dump(n):
s = '%x' % n
if len(s) & 1:
s = '0' + s
return bytes.fromhex(s)
def ror(md5 : bytes, pwd : bytes):
ret = b''
for i in range(len(pwd)):
x = md5[i] ^ pwd[i]
ret += (((x << 3) & 0xFF) + (x >> 5)).to_bytes(1, 'big')
return ret
def keep_alive_package_builder(number, random, tail: bytes, type=1, first=False):
data = b'\x07' + number.to_bytes(1, 'big') + b'\x28\x00\x0b' + type.to_bytes(1, 'big')
if first:
data += b'\x0f\x27'
else:
data += KEEP_ALIVE_VERSION
data += b'\x2f\x12' + b'\x00' * 6
data += tail
data += b'\x00' * 4
#data += struct.pack("!H",0xdc02)z
if type == 3:
foo = b''.join([int(i).to_bytes(1, 'big') for i in host_ip.split('.')]) # host_ip
# CRC
# edited on 2014/5/12, filled zeros to checksum
# crc = packet_CRC(data+foo)
crc = b'\x00' * 4
#data += struct.pack("!I",crc) + foo + b'\x00' * 8
data += crc + foo + b'\x00' * 8
else: # packet type = 1
data += b'\x00' * 16
return data
def keep_alive2(*args):
tail = b''
packet = b''
svr = server
ran = random.randint(0, 0xFFFF)
ran += random.randint(1, 10)
# 2014/10/15 add by latyas, maybe svr sends back a file packet
svr_num = 0
packet = keep_alive_package_builder(svr_num, dump(ran), b'\x00'*4, 1, True)
while True:
log('[keep-alive2] send1', packet.hex())
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep-alive2] recv1', data.hex())
if data.startswith(b'\x07\x00\x28\x00') or data.startswith(b'\x07' + svr_num.to_bytes(1, 'big') + b'\x28\x00'):
break
elif data[0] == 0x07 and data[2] == 0x10:
log('[keep-alive2] recv file, resending..')
svr_num = svr_num + 1
packet = keep_alive_package_builder(
svr_num, dump(ran), b'\x00'*4, 1, False)
else:
log('[keep-alive2] recv1/unexpected', data.hex())
#log('[keep-alive2] recv1',data.hex())
ran += random.randint(1, 10)
packet = keep_alive_package_builder(svr_num, dump(ran), b'\x00' * 4, 1, False)
log('[keep-alive2] send2', packet.hex())
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == 7:
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv2/unexpected', data.hex())
log('[keep-alive2] recv2', data.hex())
tail = data[16:20]
ran += random.randint(1, 10)
packet = keep_alive_package_builder(svr_num, dump(ran), tail, 3, False)
log('[keep-alive2] send3', packet.hex())
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == 7:
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv3/unexpected', data.hex())
log('[keep-alive2] recv3', data.hex())
tail = data[16:20]
log("[keep-alive2] keep-alive2 loop was in daemon.")
i = svr_num
while True:
try:
ran += random.randint(1, 10)
packet = keep_alive_package_builder(i, dump(ran), tail, 1, False)
#log('DEBUG: keep_alive2,packet 4\n',packet.hex())
log('[keep_alive2] send', str(i), packet.hex())
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep_alive2] recv', data.hex())
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 4 return\n',data.hex())
ran += random.randint(1, 10)
packet = keep_alive_package_builder(i+1, dump(ran), tail, 3, False)
#log('DEBUG: keep_alive2,packet 5\n',packet.hex())
s.sendto(packet, (svr, 61440))
log('[keep_alive2] send', str(i+1), packet.hex())
data, address = s.recvfrom(1024)
log('[keep_alive2] recv', data.hex())
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 5 return\n',data.hex())
i = (i+2) % 0xFF
time.sleep(20)
keep_alive1(*args)
except:
continue
def checksum(s):
ret = 1234
for i in re.findall(b'....', s):
ret ^= int(i[::-1].hex(), 16)
ret = (1968 * ret) & 0xffffffff
return struct.pack('<I', ret)
def mkpkt(salt, usr, pwd, mac):
data = b'\x03\x01\x00'+ (len(usr)+20).to_bytes(1, 'big')
data += md5sum(b'\x03\x01'+salt+pwd)
data += usr.ljust(36, b'\x00')
data += CONTROLCHECKSTATUS
data += ADAPTERNUM
data += dump(int(data[4:10].hex(), 16) ^
mac).rjust(6, b'\x00') # mac xor md51
data += md5sum(b"\x01" + pwd + salt + b'\x00'*4) # md52
data += b'\x01' # number of ip
data += b''.join([int(x).to_bytes(1,'big') for x in host_ip.split('.')])
data += b'\x00'*4 # your ipaddress 2
data += b'\x00'*4 # your ipaddress 3
data += b'\x00'*4 # your ipaddress 4
data += md5sum(data + b'\x14\x00\x07\x0b')[:8] # md53
data += IPDOG
data += b'\x00'*4 # delimeter
data += host_name.ljust(32, b'\x00')
data += b''.join([ int(i).to_bytes(1, 'big') for i in PRIMARY_DNS.split('.')]) # primary dns
data += b''.join([ int(i).to_bytes(1, 'big') for i in dhcp_server.split('.')]) # DHCP dns
data += b'\x00\x00\x00\x00' # secondary dns:0.0.0.0
data += b'\x00' * 8 # delimeter
data += b'\x94\x00\x00\x00' # unknow
data += b'\x06\x00\x00\x00' # os major
data += b'\x02\x00\x00\x00' # os minor
data += b'\xf0\x23\x00\x00' # OS build
data += b'\x02\x00\x00\x00' # os unknown
data += b'\x44\x72\x43\x4f\x4d\x00\xcf\x07\x68'
data += b'\x00' * 55 # unknown string
data += b'\x33\x64\x63\x37\x39\x66\x35\x32\x31\x32\x65\x38\x31\x37\x30\x61\x63\x66\x61\x39\x65\x63\x39\x35\x66\x31\x64\x37\x34\x39\x31\x36\x35\x34\x32\x62\x65\x37\x62\x31'
data += b'\x00' * 24
data += AUTH_VERSION
data += b'\x00' + len(pwd).to_bytes(1, 'big')
data += ror(md5sum(b'\x03\x01'+salt+pwd), pwd)
data += b'\x02\x0c'
data += checksum(data+b'\x01\x26\x07\x11\x00\x00'+dump(mac))
data += b'\x00\x00' # delimeter
data += dump(mac)
if (len(pwd) / 4) != 4:
data += b'\x00' * (len(pwd) // 4) # strange。。。
data += b'\x60\xa2' # unknown, filled numbers randomly =w=
data += b'\x00' * 28
log('[mkpkt]', data.hex())
return data
def login(usr, pwd, svr):
global SALT
i = 0
while True:
salt = challenge(svr, time.time()+random.randint(0xF, 0xFF))
SALT = salt
log('[salt] ', SALT)
packet = mkpkt(salt, usr, pwd, mac) #生成数据包
log('[login] send', packet.hex())
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[login] recv', data.hex())
log('[login] packet sent.')
if address == (svr, 61440):
if data[0] == 4:
log('[login] loged in')
break
else:
log(f'[login] login failed. data[0] = {data[0]} type={type(data[0])}')
exit(2)
else:
if i >= 5 and UNLIMITED_RETRY == False:
log('[login] exception occured.')
sys.exit(1)
else:
exit(2)
log('[login] login sent')
# 0.8 changed:
return data[23:39]
# return data[-22:-6]
def keep_alive1(salt, tail, pwd, svr):
foo = struct.pack('!H', int(time.time()) % 0xFFFF)
data = b'\xff' + md5sum(b'\x03\x01'+salt+pwd) + b'\x00\x00\x00'
data += tail
data += foo + b'\x00\x00\x00\x00'
log('[keep_alive1] send', data.hex())
s.sendto(data, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == 7:
break
else:
log('[keep-alive1]recv/not expected', data.hex())
log('[keep-alive1] recv', data.hex())
def empty_socket_buffer():
# empty buffer for some fucking schools
log('starting to empty socket buffer')
try:
while True:
data, address = s.recvfrom(1024)
log('recived sth unexpected', data.hex())
if s == '':
break
except socket.timeout as timeout_err:
# get exception means it has done.
log(f'exception in empty_socket_buffer {timeout_err}')
log('emptyed')
def daemon():
if(platform.uname().system != 'Windows'):
with open('/var/run/jludrcom.pid', 'w') as f:
f.write(str(os.getpid()))
def main():
if not IS_TEST:
daemon()
execfile(CONF, globals())
log("auth svr:", server, "\nusername:", username ,
"\npassword:", password, "\nmac:", str(hex(mac)))
log(bind_ip)
# 流程 login -> keep alive
while True:
try:
package_tail = login(username, password, server)
except LoginException:
log("登录失败!")
break
log('package_tail', package_tail.hex())
# keep_alive1 is fucking bullshit!
# ↑↑↑ 附议 ↑↑↑
empty_socket_buffer()
keep_alive1(SALT, package_tail, password, server)
keep_alive2(SALT, package_tail, password, server)
if __name__ == "__main__":
main()
| agpl-3.0 | -4,541,838,971,522,397,000 | 30.159898 | 175 | 0.531237 | false |
lizardsystem/lizard-layers | lizard_layers/migrations/0005_auto__add_field_areavalue_flag__add_field_areavalue_comment.py | 1 | 13178 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AreaValue.flag'
db.add_column('lizard_layers_areavalue', 'flag', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'AreaValue.comment'
db.add_column('lizard_layers_areavalue', 'comment', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'AreaValue.flag'
db.delete_column('lizard_layers_areavalue', 'flag')
# Deleting field 'AreaValue.comment'
db.delete_column('lizard_layers_areavalue', 'comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'area_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'dt_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 3, 28, 11, 32, 38, 519893)'}),
'dt_latestchanged': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'dt_latestsynchronized': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'areasort': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'areasort_krw': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'dt_latestchanged_krw': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'edited_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'edited_by': ('django.db.models.fields.TextField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'surface': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '1', 'blank': 'True'}),
'watertype_krw': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_fewsnorm.parametercache': {
'Meta': {'ordering': "('ident',)", 'object_name': 'ParameterCache'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_layers.areavalue': {
'Meta': {'object_name': 'AreaValue'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'flag': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_layers.ValueType']", 'null': 'True', 'blank': 'True'})
},
'lizard_layers.parametertype': {
'Meta': {'object_name': 'ParameterType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measuring_rod': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasuringRod']", 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ParameterCache']", 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_layers.ValueType']", 'null': 'True', 'blank': 'True'})
},
'lizard_layers.servermapping': {
'Meta': {'object_name': 'ServerMapping'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'external_server': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'relative_path': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'lizard_layers.valuetype': {
'Meta': {'object_name': 'ValueType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measuringrod': {
'Meta': {'object_name': 'MeasuringRod'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measuring_rod': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'measuring_rod_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasuringRod']", 'null': 'True', 'blank': 'True'}),
'sign': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'sub_measuring_rod': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['lizard_layers']
| gpl-3.0 | -7,289,027,439,874,857,000 | 76.976331 | 182 | 0.550159 | false |
fametrano/BitcoinBlockchainTechnology | btclib/rfc6979.py | 1 | 4053 | #!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Deterministic generation of the ephemeral key following RFC6979.
https://tools.ietf.org/html/rfc6979:
ECDSA and ECSSA need to produce, for each signature generation,
a fresh random value (ephemeral key, hereafter designated as k).
For effective security, k must be chosen randomly and uniformly
from a set of modular integers, using a cryptographically secure
process. Even slight biases in that process may be turned into
attacks on the signature schemes.
The need for a cryptographically secure source of randomness proves
to be a hindranceand and makes implementations harder to test.
Moreover, reusing the same ephemeral key for a different message
signed with the same private key reveal the private key!
RFC6979 turns ECDSA into deterministic schemes by using a
deterministic process for generating the "random" value k.
The process fulfills the cryptographic characteristics in order to
maintain the properties of verifiability and unforgeability
expected from signature schemes; namely, for whoever does not know
the signature private key, the mapping from input messages to the
corresponding k values is computationally indistinguishable from
what a randomly and uniformly chosen function (from the set of
messages to the set of possible k values) would return.
"""
import hmac
from hashlib import sha256
from .alias import HashF, PrvKey, String
from .curve import Curve
from .curves import secp256k1
from .to_prvkey import int_from_prvkey
from .utils import int_from_bits
def rfc6979(
msg: String, prvkey: PrvKey, ec: Curve = secp256k1, hf: HashF = sha256
) -> int:
"""Return a deterministic ephemeral key following RFC 6979."""
# the following is strictly equivalent to dsa._challenge
if isinstance(msg, str):
msg = msg.encode()
# Steps numbering follows SEC 1 v.2 section 4.1.3
h = hf()
h.update(msg)
mhd = h.digest() # 4
# leftmost ec.nlen bits %= ec.n
c = int_from_bits(mhd, ec.nlen) % ec.n # 5
q = int_from_prvkey(prvkey, ec)
return _rfc6979(c, q, ec, hf)
def _rfc6979(c: int, q: int, ec: Curve, hf: HashF) -> int:
# https://tools.ietf.org/html/rfc6979 section 3.2
# c = hf(m) # 3.2.a
# convert the private key q to an octet sequence of size nsize
bprv = q.to_bytes(ec.nsize, "big")
# truncate and/or expand c: encoding size is driven by nsize
bc = c.to_bytes(ec.nsize, "big")
bprvbm = bprv + bc
hsize = hf().digest_size
V = b"\x01" * hsize # 3.2.b
K = b"\x00" * hsize # 3.2.c
K = hmac.new(K, V + b"\x00" + bprvbm, hf).digest() # 3.2.d
V = hmac.new(K, V, hf).digest() # 3.2.e
K = hmac.new(K, V + b"\x01" + bprvbm, hf).digest() # 3.2.f
V = hmac.new(K, V, hf).digest() # 3.2.g
while True: # 3.2.h
T = b"" # 3.2.h.1
while len(T) < ec.nsize: # 3.2.h.2
V = hmac.new(K, V, hf).digest()
T += V
# The following line would introduce a bias
# k = int.from_bytes(T, 'big') % ec.n
# In general, taking a uniformly random integer (like those
# obtained from a hash function in the random oracle model)
# modulo the curve order n would produce a biased result.
# However, if the order n is sufficiently close to 2^hlen,
# then the bias is not observable: e.g.
# for secp256k1 and sha256 1-n/2^256 it is about 1.27*2^-128
k = int_from_bits(T, ec.nlen) # candidate k # 3.2.h.3
if 0 < k < ec.n: # acceptable values for k
return k # successful candidate
K = hmac.new(K, V + b"\x00", hf).digest()
V = hmac.new(K, V, hf).digest()
| mit | -3,713,050,999,322,462,700 | 37.6 | 77 | 0.668887 | false |
spaghetti-/rosdep | src/rosdep2/platforms/arch.py | 1 | 3009 | #!/usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/[email protected]
import subprocess
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
ARCH_OS_NAME = 'arch'
PACMAN_INSTALLER = 'pacman'
def register_installers(context):
context.set_installer(PACMAN_INSTALLER, PacmanInstaller())
def register_platforms(context):
context.add_os_installer_key(ARCH_OS_NAME, SOURCE_INSTALLER)
context.add_os_installer_key(ARCH_OS_NAME, PACMAN_INSTALLER)
context.set_default_os_installer_key(ARCH_OS_NAME, lambda self: PACMAN_INSTALLER)
def pacman_detect_single(p):
return not subprocess.call(['pacman', '-T', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def pacman_detect(packages):
return [p for p in packages if pacman_detect_single(p)]
class PacmanInstaller(PackageManagerInstaller):
def __init__(self):
super(PacmanInstaller, self).__init__(pacman_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
command = ['pacman', '-S']
if not interactive:
command.append('--noconfirm')
if not reinstall:
command.append('--needed')
if quiet:
command.append('-q')
return [self.elevate_priv(command + packages)]
| bsd-3-clause | 6,367,772,519,631,742,000 | 40.791667 | 99 | 0.730808 | false |
vlas-sokolov/multicube | multicube/astro_toolbox.py | 1 | 5386 | import numpy as np
from astropy.io import fits
import os
# TODO: rewrite this to have multiple components generated here.
# Having a bundle of test filaments would be very nice.
def make_test_cube(shape=(30,9,9), outfile='test.fits',
sigma=None, seed=0, writeSN=False ):
"""
Generates a simple gaussian cube with noise of
given shape and writes it as a fits file.
"""
from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel
if sigma is None:
sigma1d, sigma2d = shape[0]/10., np.mean(shape[1:])/5.
else:
sigma1d, sigma2d = sigma
gauss1d = Gaussian1DKernel(stddev=sigma1d, x_size=shape[0])
gauss2d = Gaussian2DKernel(stddev=sigma2d, x_size=shape[1],
y_size=shape[2])
signal_cube = gauss1d.array[:,None,None] * gauss2d.array
signal_cube=signal_cube/signal_cube.max()
# adding noise:
np.random.seed(seed)
noise_cube = (np.random.random(signal_cube.shape)-.5)* \
np.median(signal_cube.std(axis=0))
test_cube = signal_cube+noise_cube
true_rms = noise_cube.std()
# making a simple header for the test cube:
test_hdu = fits.PrimaryHDU(test_cube)
# the strange cdelt values are a workaround
# for what seems to be a bug in wcslib:
# https://github.com/astropy/astropy/issues/4555
cdelt1, cdelt2, cdelt3 = -(4e-3+1e-8), 4e-3+1e-8, -0.1
keylist = {'CTYPE1': 'RA---GLS', 'CTYPE2': 'DEC--GLS', 'CTYPE3': 'VRAD',
'CDELT1': cdelt1, 'CDELT2': cdelt2, 'CDELT3': cdelt3,
'CRVAL1': 0, 'CRVAL2': 0, 'CRVAL3': 5,
'CRPIX1': 9, 'CRPIX2': 0, 'CRPIX3': 5,
'CUNIT1': 'deg', 'CUNIT2': 'deg', 'CUNIT3': 'km s-1',
'BUNIT' : 'K', 'EQUINOX': 2000.0}
# write out some values used to generate the cube:
keylist['SIGMA' ] = abs(sigma1d*cdelt3), 'in units of CUNIT3'
keylist['RMSLVL'] = true_rms
keylist['SEED' ] = seed
test_header = fits.Header()
test_header.update(keylist)
test_hdu = fits.PrimaryHDU(data=test_cube, header=test_header)
test_hdu.writeto(outfile, clobber=True, checksum=True)
if writeSN:
signal_hdu = fits.PrimaryHDU(data=signal_cube, header=test_header)
noise_hdu = fits.PrimaryHDU(data=noise_cube , header=test_header)
signame, noiname = [outfile.split('.fits')[0]+'-'+i+'.fits'
for i in ['signal','noise']]
signal_hdu.writeto(signame, clobber=True, checksum=True)
noise_hdu.writeto( noiname, clobber=True, checksum=True)
def download_test_cube(outfile='test.fits'):
"""
Downloads a sample fits file from Dropbox (325kB).
"""
from astropy.utils.data import download_file
test_cube_url = 'https://db.tt/i0jWA7DU'
tmp_path = download_file(test_cube_url)
try:
os.rename(tmp_path, outfile)
except OSError:
# os.rename doesn't like cross-device links
import shutil
shutil.move(tmp_path, outfile)
def get_ncores():
"""
Try to get the number of cpu cores
"""
try:
import multiprocessing
ncores = multiprocessing.cpu_count()
except ImportError:
ncores = 1
return ncores
def in_ipynb():
"""
Taken from Adam Ginsburg's SO answer here:
http://stackoverflow.com/a/24937408/4118756
"""
try:
cfg = get_ipython().config
if cfg['IPKernelApp']['parent_appname'] == 'ipython-notebook':
return True
else:
return False
except NameError:
return False
def tinker_ring_parspace(parseed, xy_shape, parindices=[], paramps=[]):
"""
An oscilating radial structure is intruduced to selected parameters.
"""
xy_pars = np.empty((len(parseed),) + xy_shape)
xy_pars[:] = np.array(parseed)[:,None,None]
yarr, xarr = np.indices(xy_shape)
cent = (np.array(xy_shape)-1)/2.
arm = (min(xy_shape)-1)/2.
dist_norm = np.sqrt(((np.array([xarr,yarr]) -
cent[:,None,None])**2).sum(axis=0)) / arm
# a pretty distort function
c = 1.5*np.pi # normalization constant for radial distance
f = lambda x: (np.sinc(x*c)**2 + np.cos(x*c)**2)
for par_idx, par_amp in zip(parindices, paramps):
xy_pars[par_idx] += (f(dist_norm)-1) * par_amp
return xy_pars
def write_skycoord_table(data, cube_ref, **kwargs):
"""
Writes out a text file with flattened coordinates of the cube
stacked with input array data. Additional arguments are passed
to astropy's text writing function.
TODO: add a useful `names` keyword?
See astropy.io.ascii.write docstring for more info.
Parameters
----------
data : array-like structure of the same xy-grid as cube_ref.
cube_ref : a cube file to get the coordinate grid from.
"""
from astropy.table import Table
from astropy.io import ascii
from spectral_cube import SpectralCube
cube = SpectralCube.read(cube_ref)
flat_coords = [cube.spatial_coordinate_map[i].flatten() for i in [1,0]]
# TODO: finish this up for multiple components
#n_repeat = np.prod(np.array(data).shape)%np.prod(cube.shape[1:])+1
table = Table(np.vstack(flat_coords +
[np.array(xy_slice).flatten() for xy_slice in data]).T)
ascii.write(table, **kwargs)
| mit | 4,295,004,884,687,607,000 | 34.202614 | 76 | 0.615856 | false |
mitodl/micromasters | micromasters/settings.py | 1 | 21620 | """
Django settings for MicroMasters.
"""
import logging
import os
import platform
from urllib.parse import urljoin
import dj_database_url
from celery.schedules import crontab
from django.core.exceptions import ImproperlyConfigured
from micromasters.envs import (
get_any,
get_bool,
get_int,
get_list_of_str,
get_string,
)
from micromasters.sentry import init_sentry
VERSION = "0.199.4"
# initialize Sentry before doing anything else so we capture any config errors
ENVIRONMENT = get_string('MICROMASTERS_ENVIRONMENT', 'dev')
SENTRY_DSN = get_string("SENTRY_DSN", "")
SENTRY_LOG_LEVEL = get_string("SENTRY_LOG_LEVEL", "ERROR")
init_sentry(
dsn=SENTRY_DSN, environment=ENVIRONMENT, version=VERSION, log_level=SENTRY_LOG_LEVEL
)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_string(
'SECRET_KEY',
'36boam8miiz0c22il@3&gputb=wrqr2plah=0#0a_bknw9(2^r'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = get_bool('DEBUG', False)
if DEBUG:
# Disabling the protection added in 1.10.3 against a DNS rebinding vulnerability:
# https://docs.djangoproject.com/en/1.10/releases/1.10.3/#dns-rebinding-vulnerability-when-debug-true
# Because we never debug against production data, we are not vulnerable
# to this problem.
ALLOWED_HOSTS = ['*']
else:
ALLOWED_HOSTS = get_list_of_str('ALLOWED_HOSTS', [])
SECURE_SSL_REDIRECT = get_bool('MICROMASTERS_SECURE_SSL_REDIRECT', True)
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': [
r'.+\.hot-update\.+',
r'.+\.js\.map'
]
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'server_status',
'social_django',
# WAGTAIL
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.contrib.table_block',
'wagtail.contrib.legacy.richtext',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
# Hijack
'hijack',
'compat',
'hijack_admin',
# other third party APPS
'rolepermissions',
'corsheaders',
# Our INSTALLED_APPS
'backends',
'cms',
'courses',
'dashboard',
'discussions',
'ecommerce',
'exams',
'financialaid',
'grades',
'mail',
'profiles',
'roles',
'search',
'ui',
'seed_data',
'selenium_tests',
)
DISABLE_WEBPACK_LOADER_STATS = get_bool("DISABLE_WEBPACK_LOADER_STATS", False)
if not DISABLE_WEBPACK_LOADER_STATS:
INSTALLED_APPS += ('webpack_loader',)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
# enable the nplusone profiler only in debug mode
if DEBUG:
INSTALLED_APPS += (
'nplusone.ext.django',
)
MIDDLEWARE += (
'nplusone.ext.django.NPlusOneMiddleware',
)
AUTHENTICATION_BACKENDS = (
'backends.edxorg.EdxOrgOAuth2',
# the following needs to stay here to allow login of local users
'django.contrib.auth.backends.ModelBackend',
)
SESSION_ENGINE = get_string('SESSION_ENGINE', 'django.contrib.sessions.backends.signed_cookies')
SESSION_COOKIE_NAME = get_string('SESSION_COOKIE_NAME', 'sessionid')
EDXORG_BASE_URL = get_string('EDXORG_BASE_URL', 'https://courses.edx.org/')
SOCIAL_AUTH_EDXORG_KEY = get_string('EDXORG_CLIENT_ID', '')
SOCIAL_AUTH_EDXORG_SECRET = get_string('EDXORG_CLIENT_SECRET', '')
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'backends.pipeline_api.check_edx_verified_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
# the following custom pipeline func goes before load_extra_data
'backends.pipeline_api.set_last_update',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'backends.pipeline_api.update_profile_from_edx',
)
SOCIAL_AUTH_EDXORG_AUTH_EXTRA_ARGUMENTS = {
'access_type': 'offline',
'approval_prompt': 'auto'
}
SOCIAL_AUTH_EDXORG_EXTRA_DATA = ['updated_at']
LOGIN_REDIRECT_URL = '/dashboard'
LOGOUT_REDIRECT_URL = '/'
LOGIN_URL = '/'
LOGIN_ERROR_URL = '/'
OAUTH_MAINTENANCE_MODE = get_bool('OAUTH_MAINTENANCE_MODE', False)
ROOT_URLCONF = 'micromasters.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR + '/templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
'ui.context_processors.api_keys',
'ui.context_processors.do_not_track',
],
},
},
]
WSGI_APPLICATION = 'micromasters.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Uses DATABASE_URL to configure with sqlite default:
# For URL structure:
# https://github.com/kennethreitz/dj-database-url
DEFAULT_DATABASE_CONFIG = dj_database_url.parse(
get_string(
'DATABASE_URL',
'sqlite:///{0}'.format(os.path.join(BASE_DIR, 'db.sqlite3'))
)
)
DEFAULT_DATABASE_CONFIG['CONN_MAX_AGE'] = get_int('MICROMASTERS_DB_CONN_MAX_AGE', 0)
# If True, disables server-side database cursors to prevent invalid cursor errors when using pgbouncer
DEFAULT_DATABASE_CONFIG["DISABLE_SERVER_SIDE_CURSORS"] = get_bool(
"MICROMASTERS_DB_DISABLE_SS_CURSORS", True
)
if get_bool('MICROMASTERS_DB_DISABLE_SSL', False):
DEFAULT_DATABASE_CONFIG['OPTIONS'] = {}
else:
DEFAULT_DATABASE_CONFIG['OPTIONS'] = {'sslmode': 'require'}
DATABASES = {
'default': DEFAULT_DATABASE_CONFIG
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Serve static files with dj-static
STATIC_URL = '/static/'
CLOUDFRONT_DIST = get_string('CLOUDFRONT_DIST', None)
if CLOUDFRONT_DIST:
STATIC_URL = urljoin('https://{dist}.cloudfront.net'.format(dist=CLOUDFRONT_DIST), STATIC_URL)
# Configure Django Storages to use Cloudfront distribution for S3 assets
AWS_S3_CUSTOM_DOMAIN = '{dist}.cloudfront.net'.format(dist=CLOUDFRONT_DIST)
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
],
'EXCEPTION_HANDLER': 'micromasters.utils.custom_exception_handler'
}
# Request files from the webpack dev server
USE_WEBPACK_DEV_SERVER = get_bool('MICROMASTERS_USE_WEBPACK_DEV_SERVER', False)
WEBPACK_DEV_SERVER_HOST = get_string('WEBPACK_DEV_SERVER_HOST', '')
WEBPACK_DEV_SERVER_PORT = get_int('WEBPACK_DEV_SERVER_PORT', 8078)
# Important to define this so DEBUG works properly
INTERNAL_IPS = (get_string('HOST_IP', '127.0.0.1'), )
# Configure e-mail settings
EMAIL_BACKEND = get_string('MICROMASTERS_EMAIL_BACKEND', 'django.core.mail.backends.smtp.EmailBackend')
EMAIL_HOST = get_string('MICROMASTERS_EMAIL_HOST', 'localhost')
EMAIL_PORT = get_int('MICROMASTERS_EMAIL_PORT', 25)
EMAIL_HOST_USER = get_string('MICROMASTERS_EMAIL_USER', '')
EMAIL_HOST_PASSWORD = get_string('MICROMASTERS_EMAIL_PASSWORD', '')
EMAIL_USE_TLS = get_bool('MICROMASTERS_EMAIL_TLS', False)
EMAIL_SUPPORT = get_string('MICROMASTERS_SUPPORT_EMAIL', '[email protected]')
DEFAULT_FROM_EMAIL = get_string('MICROMASTERS_FROM_EMAIL', 'webmaster@localhost')
ECOMMERCE_EMAIL = get_string('MICROMASTERS_ECOMMERCE_EMAIL', '[email protected]')
MAILGUN_URL = get_string('MAILGUN_URL', None)
if not MAILGUN_URL:
raise ImproperlyConfigured("MAILGUN_URL not set")
MAILGUN_KEY = get_string('MAILGUN_KEY', None)
if not MAILGUN_KEY:
raise ImproperlyConfigured("MAILGUN_KEY not set")
MAILGUN_BATCH_CHUNK_SIZE = get_int('MAILGUN_BATCH_CHUNK_SIZE', 1000)
MAILGUN_RECIPIENT_OVERRIDE = get_string('MAILGUN_RECIPIENT_OVERRIDE', None)
MAILGUN_FROM_EMAIL = get_string('MAILGUN_FROM_EMAIL', '[email protected]')
MAILGUN_BCC_TO_EMAIL = get_string('MAILGUN_BCC_TO_EMAIL', '[email protected]')
# e-mail configurable admins
ADMIN_EMAIL = get_string('MICROMASTERS_ADMIN_EMAIL', '')
if ADMIN_EMAIL != '':
ADMINS = (('Admins', ADMIN_EMAIL),)
else:
ADMINS = ()
# Logging configuration
LOG_LEVEL = get_string('MICROMASTERS_LOG_LEVEL', 'INFO')
DJANGO_LOG_LEVEL = get_string('DJANGO_LOG_LEVEL', 'INFO')
ES_LOG_LEVEL = get_string('ES_LOG_LEVEL', 'INFO')
# For logging to a remote syslog host
LOG_HOST = get_string('MICROMASTERS_LOG_HOST', 'localhost')
LOG_HOST_PORT = get_int('MICROMASTERS_LOG_HOST_PORT', 514)
HOSTNAME = platform.node().split('.')[0]
# nplusone profiler logger configuration
NPLUSONE_LOGGER = logging.getLogger('nplusone')
NPLUSONE_LOG_LEVEL = logging.ERROR
# paramiko logger configuration
# default log level to critical to silence everything
PARAMIKO_LOG_LEVEL = get_string('PARAMIKO_LOG_LEVEL', 'CRITICAL')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'formatters': {
'verbose': {
'format': (
'[%(asctime)s] %(levelname)s %(process)d [%(name)s] '
'%(filename)s:%(lineno)d - '
'[{hostname}] - %(message)s'
).format(hostname=HOSTNAME),
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'syslog': {
'level': LOG_LEVEL,
'class': 'logging.handlers.SysLogHandler',
'facility': 'local7',
'formatter': 'verbose',
'address': (LOG_HOST, LOG_HOST_PORT)
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'django': {
'propagate': True,
'level': DJANGO_LOG_LEVEL,
'handlers': ['console', 'syslog'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': DJANGO_LOG_LEVEL,
'propagate': True,
},
'urllib3': {
'level': ES_LOG_LEVEL,
},
'elasticsearch': {
'level': ES_LOG_LEVEL,
},
'nplusone': {
'handlers': ['console'],
'level': 'ERROR',
},
'paramiko': {
'level': PARAMIKO_LOG_LEVEL,
},
},
'root': {
'handlers': ['console', 'syslog'],
'level': LOG_LEVEL,
},
}
# CORS
CORS_ORIGIN_WHITELIST = get_list_of_str("MICROMASTERS_CORS_ORIGIN_WHITELIST", [])
CORS_ALLOW_CREDENTIALS = True
# server-status
STATUS_TOKEN = get_string("STATUS_TOKEN", "")
HEALTH_CHECK = ['CELERY', 'REDIS', 'POSTGRES', 'ELASTIC_SEARCH']
ADWORDS_CONVERSION_ID = get_string("ADWORDS_CONVERSION_ID", "")
GA_TRACKING_ID = get_string("GA_TRACKING_ID", "")
GOOGLE_API_KEY = get_string("GOOGLE_API_KEY", "")
GTM_CONTAINER_ID = get_string("GTM_CONTAINER_ID", "")
SL_TRACKING_ID = get_string("SL_TRACKING_ID", "")
REACT_GA_DEBUG = get_bool("REACT_GA_DEBUG", False)
# Hijack
HIJACK_ALLOW_GET_REQUESTS = True
HIJACK_LOGOUT_REDIRECT_URL = '/admin/auth/user'
# Wagtail
WAGTAIL_SITE_NAME = "MIT MicroMasters"
WAGTAILIMAGES_MAX_UPLOAD_SIZE = get_int('WAGTAILIMAGES_MAX_UPLOAD_SIZE', 20971620) # default 25 MB
MEDIA_ROOT = get_string('MEDIA_ROOT', '/var/media/')
MEDIA_URL = '/media/'
MICROMASTERS_USE_S3 = get_bool('MICROMASTERS_USE_S3', False)
AWS_ACCESS_KEY_ID = get_string('AWS_ACCESS_KEY_ID', False)
AWS_SECRET_ACCESS_KEY = get_string('AWS_SECRET_ACCESS_KEY', False)
AWS_STORAGE_BUCKET_NAME = get_string('AWS_STORAGE_BUCKET_NAME', False)
AWS_S3_FILE_OVERWRITE = get_bool('AWS_S3_FILE_OVERWRITE', False)
AWS_QUERYSTRING_AUTH = get_string('AWS_QUERYSTRING_AUTH', False)
# Provide nice validation of the configuration
if (
MICROMASTERS_USE_S3 and
(not AWS_ACCESS_KEY_ID or
not AWS_SECRET_ACCESS_KEY or
not AWS_STORAGE_BUCKET_NAME)
):
raise ImproperlyConfigured(
'You have enabled S3 support, but are missing one of '
'AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, or '
'AWS_STORAGE_BUCKET_NAME'
)
if MICROMASTERS_USE_S3:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
else:
# by default use django.core.files.storage.FileSystemStorage with
# overwrite feature
DEFAULT_FILE_STORAGE = 'storages.backends.overwrite.OverwriteStorage'
# Celery
USE_CELERY = True
# for the following variables keep backward compatibility for the environment variables
# the part after "or" can be removed after we replace the environment variables in production
CELERY_BROKER_URL = get_string(
"CELERY_BROKER_URL", get_string("REDISCLOUD_URL", None)
) or get_string("BROKER_URL", get_string("REDISCLOUD_URL", None))
CELERY_RESULT_BACKEND = get_string(
"CELERY_RESULT_BACKEND", get_string("REDISCLOUD_URL", None)
)
CELERY_TASK_ALWAYS_EAGER = get_bool("CELERY_TASK_ALWAYS_EAGER", False) or get_bool("CELERY_ALWAYS_EAGER", False)
CELERY_TASK_EAGER_PROPAGATES = (get_bool("CELERY_TASK_EAGER_PROPAGATES", True) or
get_bool("CELERY_EAGER_PROPAGATES_EXCEPTIONS", True))
CELERY_BEAT_SCHEDULE = {
'batch-update-user-data-every-6-hrs': {
'task': 'dashboard.tasks.batch_update_user_data',
'schedule': crontab(minute=0, hour='*/6')
},
'update-currency-exchange-rates-every-24-hrs': {
'task': 'financialaid.tasks.sync_currency_exchange_rates',
'schedule': crontab(minute=0, hour='3')
},
'authorize_exam_runs-every-1-hrs': {
'task': 'exams.tasks.authorize_exam_runs',
'schedule': crontab(minute=0, hour='*')
},
'generate-mm-course-certificates-every-1-hrs': {
'task': 'grades.tasks.generate_course_certificates_for_fa_students',
'schedule': crontab(minute=0, hour='*')
},
'discussions-sync-memberships-every-minute': {
'task': 'discussions.tasks.sync_channel_memberships',
'schedule': crontab(minute='*', hour='*')
},
'freeze-final-grades-every-24-hrs-few-times': {
'task': 'grades.tasks.find_course_runs_and_freeze_grades',
'schedule': crontab(minute='*/15', hour='16')
},
'create-combined-final-grade-every-1-hrs': {
'task': 'grades.tasks.create_combined_final_grades',
'schedule': crontab(minute=40, hour='*')
},
}
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'UTC'
# Celery parallel rate limit for batch_update_user_data
# This is the number of tasks per minute, each task updates data for 20 users
BATCH_UPDATE_RATE_LIMIT = get_string('BATCH_UPDATE_RATE_LIMIT', '5/m')
# django cache back-ends
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'local-in-memory-cache',
},
'redis': {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": CELERY_BROKER_URL,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"COMPRESSOR": "django_redis.compressors.zlib.ZlibCompressor",
},
},
}
# Elasticsearch
ELASTICSEARCH_DEFAULT_PAGE_SIZE = get_int('ELASTICSEARCH_DEFAULT_PAGE_SIZE', 50)
ELASTICSEARCH_URL = get_string("ELASTICSEARCH_URL", None)
if get_string("HEROKU_PARENT_APP_NAME", None) is not None:
ELASTICSEARCH_INDEX = get_string('HEROKU_APP_NAME', None)
else:
ELASTICSEARCH_INDEX = get_string('ELASTICSEARCH_INDEX', None)
if not ELASTICSEARCH_INDEX:
raise ImproperlyConfigured("Missing ELASTICSEARCH_INDEX")
ELASTICSEARCH_HTTP_AUTH = get_string("ELASTICSEARCH_HTTP_AUTH", None)
ELASTICSEARCH_SHARD_COUNT = get_int('ELASTICSEARCH_SHARD_COUNT', 5)
# django-role-permissions
ROLEPERMISSIONS_MODULE = 'roles.roles'
# edx
EDX_BATCH_UPDATES_ENABLED = get_bool("EDX_BATCH_UPDATES_ENABLED", True)
# Cybersource
CYBERSOURCE_ACCESS_KEY = get_string("CYBERSOURCE_ACCESS_KEY", None)
CYBERSOURCE_SECURITY_KEY = get_string("CYBERSOURCE_SECURITY_KEY", None)
CYBERSOURCE_SECURE_ACCEPTANCE_URL = get_string("CYBERSOURCE_SECURE_ACCEPTANCE_URL", None)
CYBERSOURCE_PROFILE_ID = get_string("CYBERSOURCE_PROFILE_ID", None)
CYBERSOURCE_REFERENCE_PREFIX = get_string("CYBERSOURCE_REFERENCE_PREFIX", None)
# Open Exchange Rates
OPEN_EXCHANGE_RATES_URL = get_string("OPEN_EXCHANGE_RATES_URL", "https://openexchangerates.org/api/")
OPEN_EXCHANGE_RATES_APP_ID = get_string("OPEN_EXCHANGE_RATES_APP_ID", "")
# Open Discussions
OPEN_DISCUSSIONS_API_USERNAME = get_string('OPEN_DISCUSSIONS_API_USERNAME', None)
OPEN_DISCUSSIONS_BASE_URL = get_string('OPEN_DISCUSSIONS_BASE_URL', None)
OPEN_DISCUSSIONS_COOKIE_DOMAIN = get_string('OPEN_DISCUSSIONS_COOKIE_DOMAIN', None)
OPEN_DISCUSSIONS_JWT_EXPIRES_DELTA = get_int('OPEN_DISCUSSIONS_JWT_EXPIRES_DELTA', 60*60)
OPEN_DISCUSSIONS_COOKIE_NAME = get_string('OPEN_DISCUSSIONS_COOKIE_NAME', None)
OPEN_DISCUSSIONS_JWT_SECRET = get_string('OPEN_DISCUSSIONS_JWT_SECRET', None)
OPEN_DISCUSSIONS_REDIRECT_URL = get_string('OPEN_DISCUSSIONS_REDIRECT_URL', None)
OPEN_DISCUSSIONS_REDIRECT_COMPLETE_URL = get_string('OPEN_DISCUSSIONS_REDIRECT_COMPLETE_URL', '/')
OPEN_DISCUSSIONS_SITE_KEY = get_string('OPEN_DISCUSSIONS_SITE_KEY', None)
if not OPEN_DISCUSSIONS_SITE_KEY:
raise ImproperlyConfigured("OPEN_DISCUSSIONS_SITE_KEY must be specified")
# features flags
def get_all_config_keys():
"""Returns all the configuration keys from both environment and configuration files"""
return list(os.environ.keys())
MM_FEATURES_PREFIX = get_string('MM_FEATURES_PREFIX', 'FEATURE_')
FEATURES = {
key[len(MM_FEATURES_PREFIX):]: get_any(key, None) for key
in get_all_config_keys() if key.startswith(MM_FEATURES_PREFIX)
}
MIDDLEWARE_FEATURE_FLAG_QS_PREFIX = get_string("MIDDLEWARE_FEATURE_FLAG_QS_PREFIX", None)
MIDDLEWARE_FEATURE_FLAG_COOKIE_NAME = get_string('MIDDLEWARE_FEATURE_FLAG_COOKIE_NAME', 'MM_FEATURE_FLAGS')
MIDDLEWARE_FEATURE_FLAG_COOKIE_MAX_AGE_SECONDS = get_int('MIDDLEWARE_FEATURE_FLAG_COOKIE_MAX_AGE_SECONDS', 60 * 60)
if MIDDLEWARE_FEATURE_FLAG_QS_PREFIX:
MIDDLEWARE = MIDDLEWARE + (
'ui.middleware.QueryStringFeatureFlagMiddleware',
'ui.middleware.CookieFeatureFlagMiddleware',
)
# django debug toolbar only in debug mode
if DEBUG:
INSTALLED_APPS += ('debug_toolbar', )
# it needs to be enabled before other middlewares
MIDDLEWARE = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
) + MIDDLEWARE
def show_toolbar(request):
"""
Custom function needed because of bug in wagtail.
Theoretically this bug has been fixed in django 1.10 and wagtail 1.6.3
so if we upgrade we should be able to change this function to just
return True.
"""
request.META["wsgi.multithread"] = True
request.META["wsgi.multiprocess"] = True
excluded_urls = ['/pages/preview/', '/pages/preview_loading/', '/edit/preview/']
excluded = any(request.path.endswith(url) for url in excluded_urls)
return not excluded
DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": show_toolbar, }
# Travis
IS_CI_ENV = get_bool('CI', False)
HUBSPOT_CONFIG = {
"HUBSPOT_ORGANIZATIONS_FORM_GUID": get_string(
name="HUBSPOT_ORGANIZATIONS_FORM_GUID",
default="1b63db1a-eb3a-45d6-82f1-c4b8f01835dc",
),
"HUBSPOT_PORTAL_ID": get_string(
name="HUBSPOT_PORTAL_ID", default="8677455"
),
}
| bsd-3-clause | -1,614,179,403,656,989,200 | 33.208861 | 115 | 0.673312 | false |
flavour/tldrmp | modules/s3db/survey.py | 1 | 135539 | # -*- coding: utf-8 -*-
""" Sahana Eden Survey Tool
@copyright: 2011-2013 (c) Sahana Software Foundation
@license: MIT
ADAT - Assessment Data Analysis Tool
For more details see the blueprint at:
http://eden.sahanafoundation.org/wiki/BluePrint/SurveyTool/ADAT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3SurveyTemplateModel",
"S3SurveyQuestionModel",
"S3SurveyFormatterModel",
"S3SurveySeriesModel",
"S3SurveyCompleteModel",
"S3SurveyTranslateModel",
"survey_template_represent",
"survey_series_represent",
"survey_answer_list_represent",
"survey_template_rheader",
"survey_series_rheader",
"survey_getAllSectionsForTemplate",
"survey_getAllQuestionsForTemplate",
"survey_buildQuestionnaireFromTemplate",
"survey_buildQuestionnaireFromSeries",
"survey_getTemplateFromSeries",
"survey_getAllTemplates",
"survey_getAllWidgetsForTemplate",
"survey_getWidgetFromQuestion",
"survey_getAllSectionsForSeries",
"survey_getAllSectionsForTemplate",
"survey_getQuestionFromCode",
"survey_getAllQuestionsForTemplate",
"survey_getAllQuestionsForSeries",
"survey_getAllQuestionsForComplete",
"survey_save_answers_for_series",
"survey_updateMetaData",
"survey_getAllAnswersForQuestionInSeries",
"survey_getQstnLayoutRules",
"survey_getSeries",
"survey_getSeriesName",
"survey_getAllSeries",
"survey_getAllTranslationsForTemplate",
"survey_getAllTranslationsForSeries",
"survey_build_template_summary",
"survey_serieslist_dataTable_post",
"survey_answerlist_dataTable_pre",
"survey_answerlist_dataTable_post",
"survey_json2py",
"survey_json2list",
]
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
from s3chart import S3Chart
from s3survey import survey_question_type, \
survey_analysis_type, \
_debug
# =============================================================================
def json2py(jsonstr):
"""
Utility function to convert a string in json to a python structure
"""
from xml.sax.saxutils import unescape
if not isinstance(jsonstr, str):
return jsonstr
try:
jsonstr = unescape(jsonstr, {"u'": '"'})
jsonstr = unescape(jsonstr, {"'": '"'})
pythonStructure = json.loads(jsonstr)
except:
_debug("ERROR: attempting to convert %s using modules/s3db/survey/json2py.py" % (jsonstr))
return jsonstr
else:
return pythonStructure
survey_json2py = json2py
# =============================================================================
def json2list(jsonstr):
"""
Used to modify a json string to a python list.
"""
if jsonstr == "":
valueList = []
else:
if jsonstr[0] == "[":
valueList = json2py(jsonstr)
else:
valueList = jsonstr.split(",")
if not isinstance(valueList, list):
valueList = [valueList]
return valueList
survey_json2list = json2list
# =============================================================================
class S3SurveyTemplateModel(S3Model):
"""
Template model
The template model is a container for the question model
"""
names = ["survey_template",
"survey_template_id",
"survey_section",
"survey_template_status",
]
def model(self):
T = current.T
db = current.db
template_status = {
1: T("Pending"),
2: T("Active"),
3: T("Closed"),
4: T("Master")
}
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# survey_template
#
# The template is the root table and acts as a container for
# the questions that will be used in a survey.
tablename = "survey_template"
table = define_table(tablename,
Field("name", "string", length=120,
notnull=True, unique=True,
label = T("Template Name"),
default="",
),
Field("description", "text", length=500,
label = T("Description"),
default=""),
Field("status", "integer",
label = T("Status"),
requires = IS_IN_SET(template_status,
zero=None),
default=1,
represent = lambda index: \
template_status[index],
readable=True,
writable=False),
# Standard questions which may belong to all template
# competion_qstn: who completed the assessment
Field("competion_qstn", "string", length=200,
label = T("Completion Question"),
),
# date_qstn: when it was completed (date)
Field("date_qstn", "string", length=200,
label = T("Date Question"),
),
# time_qstn: when it was completed (time)
Field("time_qstn", "string", length=200,
label = T("Time Question"),
),
# location_detail: json of the location question
# May consist of any of the following:
# L0, L1, L2, L3, L4, Lat, Lon
Field("location_detail", "string", length=200,
label = T("Location Detail"),
),
# The priority question is the default question used
# to determine the priority of each point on the map.
# The data is stored as the question code.
Field("priority_qstn", "string", length=16,
label = T("Default map question"),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add Assessment Template"),
title_display = T("Assessment Template Details"),
title_list = T("Assessment Templates"),
title_analysis_summary = T("Template Summary"),
title_update = T("Edit Assessment Template"),
title_question_details = T("Details of each question in the Template"),
subtitle_create = T("Add a new Assessment Template"),
subtitle_analysis_summary = T("Summary by Question Type - (The fewer text questions the better the analysis can be)"),
label_list_button = T("List Assessment Templates"),
label_create_button = T("Add a new Assessment Template"),
label_delete_button = T("Delete this Assessment Template"),
msg_record_created = T("Assessment Template added"),
msg_record_modified = T("Assessment Template updated"),
msg_record_deleted = T("Assessment Template deleted"),
msg_list_empty = T("No Assessment Templates"))
template_id = S3ReusableField("template_id", table,
sortby="name",
label=T("Template"),
requires = IS_ONE_OF(db,
"survey_template.id",
self.survey_template_represent,
),
represent = self.survey_template_represent,
ondelete = "CASCADE")
# Components
add_component("survey_series", survey_template="template_id")
add_component("survey_translate", survey_template = "template_id")
configure(tablename,
onvalidation = self.template_onvalidate,
onaccept = self.template_onaccept,
deduplicate = self.survey_template_duplicate,
)
# ---------------------------------------------------------------------
# survey_sections
#
# The questions can be grouped into sections this provides
# the description of the section and
# the position of the section within the template
tablename = "survey_section"
table = define_table(tablename,
Field("name", "string", length=120,
notnull=True,
default="",
),
Field("description", "text", length=500,
default="",
),
Field("posn", "integer",
),
Field("cloned_section_id", "integer",
readable=False,
writable=False,
),
template_id(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add Template Section"),
title_display = T("Template Section Details"),
title_list = T("Template Sections"),
title_update = T("Edit Template Section"),
subtitle_create = T("Add a new Template Section"),
label_list_button = T("List Template Sections"),
label_create_button = T("Add a new Template Section"),
label_delete_button = T("Delete this Template Section"),
msg_record_created = T("Template Section added"),
msg_record_modified = T("Template Section updated"),
msg_record_deleted = T("Template Section deleted"),
msg_list_empty = T("No Template Sections"))
configure(tablename, orderby = tablename+".posn",
deduplicate=self.survey_section_duplicate
)
# Pass names back to global scope (s3.*)
return Storage(
survey_template_id = template_id,
survey_template_status = template_status,
)
# -------------------------------------------------------------------------
@staticmethod
def template_onvalidate(form):
"""
It is not valid to re-import a template that already has a
status of Active or higher
"""
template_id = form.vars.id
table = current.s3db.survey_template
row = current.db(table.id == template_id).select(table.status,
limitby=(0, 1)
).first()
if row is not None and row.status > 1:
return False
return True
# -------------------------------------------------------------------------
@staticmethod
def addQuestion(template_id, name, code, notes, type, posn, metadata={}):
"""
"""
db = current.db
s3db = current.s3db
# Add the question to the database if it's not already there
qstntable = s3db.survey_question
query = (qstntable.name == name) & \
(qstntable.code == code)
record = db(query).select(qstntable.id, limitby=(0, 1)).first()
if record:
qstn_id = record.id
else:
qstn_id = qstntable.insert(name = name,
code = code,
notes = notes,
type = type
)
qstn_metadata_table = s3db.survey_question_metadata
for (descriptor, value) in metadata.items():
qstn_metadata_table.insert(question_id = qstn_id,
descriptor = descriptor,
value = value
)
# Add these questions to the section: "Background Information"
sectable = s3db.survey_section
section_name = "Background Information"
query = (sectable.name == section_name) & \
(sectable.template_id == template_id)
record = db(query).select(sectable.id, limitby=(0, 1)).first()
if record:
section_id = record.id
else:
section_id = sectable.insert(name = section_name,
template_id = template_id,
posn = 0 # special section with no position
)
# Add the question to the list of questions in the template
qstn_list_table = s3db.survey_question_list
query = (qstn_list_table.question_id == qstn_id) & \
(qstn_list_table.template_id == template_id)
record = db(query).select(qstntable.id, limitby=(0, 1)).first()
if not record:
qstn_list_table.insert(question_id = qstn_id,
template_id = template_id,
section_id = section_id,
posn = posn
)
# -------------------------------------------------------------------------
@staticmethod
def template_onaccept(form):
"""
All of the standard questions will now be generated
competion_qstn: who completed the assessment
date_qstn: when it was completed (date)
time_qstn: when it was completed (time)
location_detail: json of the location question
May consist of any of the following:
L0, L1, L2, L3, L4, Lat, Lon
for json entry a question will be generated
The code for each question will start with "STD-" followed by
the type of question.
"""
vars = form.vars
if vars.id:
template_id = vars.id
else:
return
addQuestion = S3SurveyTemplateModel.addQuestion
if vars.competion_qstn != None:
name = vars.competion_qstn
code = "STD-WHO"
notes = "Who completed the assessment"
type = "String"
posn = -10 # negative used to force these question to appear first
addQuestion(template_id, name, code, notes, type, posn)
if vars.date_qstn != None:
name = vars.date_qstn
code = "STD-DATE"
notes = "Date the assessment was completed"
type = "Date"
posn += 1
addQuestion(template_id, name, code, notes, type, posn)
if vars.time_qstn != None:
name = vars.time_qstn
code = "STD-TIME"
notes = "Time the assessment was completed"
type = "Time"
posn += 1
addQuestion(template_id, name, code, notes, type, posn)
if vars.location_detail != None:
locationList = json2py(vars.location_detail)
if len(locationList) > 0:
name = "The location P-code"
code = "STD-P-Code"
type = "String"
posn += 1
addQuestion(template_id, name, code, None, type, posn)
for loc in locationList:
if loc == "Lat":
name = "Latitude"
elif loc == "Lon":
name = "Longitude"
else:
name = loc
code = "STD-%s" % loc
if loc == "Lat" or loc == "Lon":
type = "Numeric"
metadata = {"Format": "nnn.nnnnnn"}
else:
type = "Location"
metadata = {}
posn += 1
addQuestion(template_id, name, code, "", type, posn, metadata)
# -------------------------------------------------------------------------
@staticmethod
def survey_template_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with a similar name, ignoring case
"""
if job.tablename == "survey_template":
table = job.table
data = job.data
name = "name" in data and data.name
query = table.name.lower().like('%%%s%%' % name.lower())
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def survey_section_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same name
- the same template
- and the same position within the template
- however if their is a record with position of zero then that record should be updated
"""
if job.tablename == "survey_section":
table = job.table
data = job.data
name = "name" in data and data.name
template = "template_id" in data and data.template_id
query = (table.name == name) & \
(table.template_id == template)
return duplicator(job, query)
# =============================================================================
def survey_template_represent(id, row=None):
"""
Display the template name rather than the id
"""
if row:
return row.name
elif not id:
return current.messages["NONE"]
table = current.s3db.survey_template
query = (table.id == id)
record = current.db(query).select(table.name,
limitby=(0, 1)).first()
try:
return record.name
except:
return current.messages.UNKNOWN_OPT
# =============================================================================
def survey_template_rheader(r, tabs=[]):
"""
The template rheader
"""
if r.representation == "html":
tablename, record = s3_rheader_resource(r)
if tablename == "survey_template" and record:
T = current.T
s3db = current.s3db
# Tabs
tabs = [(T("Basic Details"), "read"),
(T("Question Details"),"templateRead/"),
(T("Question Summary"),"templateSummary/"),
#(T("Sections"), "section"),
]
if current.auth.s3_has_permission("create", "survey_translate"):
tabs.append((T("Translate"),"translate"))
rheader_tabs = s3_rheader_tabs(r, tabs)
sectionTable = s3db.survey_section
qlistTable = s3db.survey_question_list
viewing = current.request.get_vars.get("viewing", None)
if viewing:
dummy, template_id = viewing.split(".")
else:
template_id = r.id
query = (qlistTable.template_id == template_id) & \
(qlistTable.section_id == sectionTable.id)
rows = current.db(query).select(sectionTable.id,
sectionTable.name,
orderby = qlistTable.posn)
tsection = TABLE(_class="survey-section-list")
lblSection = SPAN(T("Sections that are part of this template"),
_style="font-weight:bold;")
if (rows.__len__() == 0):
rsection = SPAN(T("As of yet, no sections have been added to this template."))
else:
rsection = TR()
count = 0
lastSection = ""
for section in rows:
if section.name == lastSection:
continue
rsection.append(TD(section.name))
# Comment out the following until templates can be built online
#rsection.append(TD(A(section.name,
# _href=URL(c="survey",
# f="section",
# args="%s" % section.id))))
lastSection = section.name
count += 1
if count % 4 == 0:
tsection.append(rsection)
rsection=TR()
tsection.append(rsection)
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Name")),
record.name,
TH("%s: " % T("Status")),
s3db.survey_template_status[record.status],
),
),
lblSection,
tsection,
rheader_tabs)
return rheader
return None
# =============================================================================
def survey_getTemplateFromSeries(series_id):
"""
Return the template data from the series_id passed in
@ToDo: Remove wrapper
"""
stable = current.s3db.survey_series
ttable = current.s3db.survey_template
query = (stable.id == series_id) & \
(ttable.id == stable.template_id)
row = current.db(query).select(ttable.ALL,
limitby=(0, 1)).first()
return row
# =============================================================================
def survey_getAllTemplates():
"""
Function to return all the templates on the database
@ToDo: Remove wrapper
"""
table = current.s3db.survey_template
rows = current.db(table).select()
return rows
# =============================================================================
def survey_getAllWidgetsForTemplate(template_id):
"""
Function to return the widgets for each question for the given
template. The widgets are returned in a dict with the key being
the question code.
"""
s3db = current.s3db
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.question_id == qsntable.id)
rows = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.type,
q_ltable.posn,
)
widgets = {}
for row in rows:
sqrow = row.survey_question
qstnType = sqrow.type
qstn_id = sqrow.id
qstn_code = sqrow.code
qstn_posn = row.survey_question_list.posn
widgetObj = survey_question_type[qstnType](qstn_id)
widgets[qstn_code] = widgetObj
widgetObj.question["posn"] = qstn_posn
question = {}
return widgets
# =============================================================================
def survey_getAllSectionsForSeries(series_id):
"""
Function to return the list of sections for the given series
The sections are returned in the order of their position in the
template.
The data on each section is held in a dict and is as follows:
section_id, name, template_id, and posn
"""
row = survey_getSeries(series_id)
template_id = row.template_id
return survey_getAllSectionsForTemplate(template_id)
# =============================================================================
def survey_buildQuestionnaireFromTemplate(template_id):
"""
Build a form displaying all the questions for a given template_id
@ToDo: Remove wrapper
"""
questions = survey_getAllQuestionsForTemplate(template_id)
return buildQuestionsForm(questions, readOnly=True)
# =============================================================================
def survey_getAllSectionsForTemplate(template_id):
"""
function to return the list of sections for the given template
The sections are returned in the order of their position in the
template.
The data on each section is held in a dict and is as follows:
section_id, name, template_id, and posn
"""
sectable = current.s3db.survey_section
query = (sectable.template_id == template_id)
rows = current.db(query).select(sectable.id,
sectable.name,
sectable.template_id,
sectable.posn,
orderby = sectable.posn)
sections = []
for sec in rows:
sections.append({"section_id": sec.id,
"name" : sec.name,
"template_id": sec.template_id,
"posn" : sec.posn
}
)
return sections
# =============================================================================
def survey_getWidgetFromQuestion(question_id):
"""
Function that gets the right widget for the question
"""
qtable = current.s3db.survey_question
query = (qtable.id == question_id)
question = current.db(query).select(qtable.type,
limitby=(0, 1)).first()
qstnType = question.type
widgetObj = survey_question_type[qstnType](question_id)
return widgetObj
# =============================================================================
def buildQuestionsForm(questions, complete_id=None, readOnly=False):
"""
Create the form, hard-coded table layout :(
"""
form = FORM()
table = None
sectionTitle = ""
for question in questions:
if sectionTitle != question["section"]:
if sectionTitle != "":
form.append(P())
form.append(HR(_width="90%"))
form.append(P())
div = DIV(_class="survey_scrollable")
table = TABLE()
div.append(table)
form.append(div)
table.append(TR(TH(question["section"],
_colspan="2"),
_class="survey_section"))
sectionTitle = question["section"]
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
if readOnly:
table.append(TR(TD(question["code"]),
TD(widgetObj.type_represent()),
TD(question["name"])
)
)
else:
if complete_id != None:
widgetObj.loadAnswer(complete_id, question["qstn_id"])
widget = widgetObj.display(question_id = question["qstn_id"])
if widget != None:
if isinstance(widget, TABLE):
table.append(TR(TD(widget, _colspan=2)))
else:
table.append(widget)
if not readOnly:
button = INPUT(_type="submit", _name="Save", _value=current.T("Save"))
form.append(button)
return form
# =============================================================================
def survey_build_template_summary(template_id):
"""
"""
from s3.s3data import S3DataTable
T = current.T
table = TABLE(_id="template_summary",
_class="dataTable display")
hr = TR(TH(T("Position")), TH(T("Section")))
qstnTypeList = {}
posn = 1
for (key, type) in survey_question_type.items():
if key == "Grid" or key == "GridChild":
continue
hr.append(TH(type().type_represent()))
qstnTypeList[key] = posn
posn += 1
hr.append(TH(T("Total")))
header = THEAD(hr)
numOfQstnTypes = len(survey_question_type) - 1 # exclude the grid questions
questions = survey_getAllQuestionsForTemplate(template_id)
sectionTitle = ""
line = []
body = TBODY()
section = 0
total = ["", T("Total")] + [0]*numOfQstnTypes
for question in questions:
if sectionTitle != question["section"]:
if line != []:
br = TR()
for cell in line:
br.append(cell)
body.append(br)
section += 1
sectionTitle = question["section"]
line = [section, sectionTitle] + [0]*numOfQstnTypes
if question["type"] == "Grid":
continue
if question["type"] == "GridChild":
# get the real grid question type
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
question["type"] = widgetObj.typeDescription
line[qstnTypeList[question["type"]]+1] += 1
line[numOfQstnTypes+1] += 1
total[qstnTypeList[question["type"]]+1] += 1
total[numOfQstnTypes+1] += 1
# Add the trailing row
br = TR()
for cell in line:
br.append(cell)
body.append(br)
# Add the footer to the table
foot = TFOOT()
tr = TR()
for cell in total:
tr.append(TD(B(cell))) # don't use TH() otherwise dataTables will fail
foot.append(tr)
table.append(header)
table.append(body)
table.append(foot)
# Turn off server side pagination
s3 = current.response.s3
s3.no_sspag = True
s3.no_formats = True
s3.dataTableID = None
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"template_summary",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
dt_action_col = -1,
**attr
)
return form
# =============================================================================
class S3SurveyQuestionModel(S3Model):
"""
Question Model
"""
names = ["survey_question",
"survey_question_metadata",
"survey_question_list",
"survey_qstn_name_represent"
]
def model(self):
T = current.T
s3 = current.response.s3
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# survey_question
# Defines a question that will appear within a section, and thus belong
# to the template.
#
# This holds the actual question and
# A string code (unique within the template) is used to identify the question.
#
# It will have a type from the questionType dictionary.
# This type will determine the options that can be associated with it.
# A question can belong to many different sections.
# The notes are to help the enumerator and will typically appear as a
# footnote in the printed form.
tablename = "survey_question"
table = define_table(tablename,
Field("name", "string", length=200,
notnull=True,
represent = self.qstn_name_represent,
),
Field("code", "string", length=16,
notnull=True,
),
Field("notes", "string", length=400
),
Field("type", "string", length=40,
notnull=True,
),
Field("metadata", "text",
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add an Assessment Question"),
title_display = T("Assessment Question Details"),
title_list = T("Assessment Questions"),
title_update = T("Edit Assessment Question"),
subtitle_create = T("Add a new Assessment Question"),
label_list_button = T("List Assessment Questions"),
label_create_button = T("Add a new Assessment Question"),
label_delete_button = T("Delete this Assessment Question"),
msg_record_created = T("Assessment Question added"),
msg_record_modified = T("Assessment Question updated"),
msg_record_deleted = T("Assessment Question deleted"),
msg_list_empty = T("No Assessment Questions"))
configure(tablename,
onvalidation = self.question_onvalidate,
onaccept = self.question_onaccept,
deduplicate = self.survey_question_duplicate,
)
# ---------------------------------------------------------------------
# survey_question_metadata
# referenced by
# the survey_question table and is used to manage
# the metadata that will be associated with a question type.
# For example: if the question type is option, then valid metadata
# might be:
# count: the number of options that will be presented: 3
# 1 : the first option : Female
# 2 : the second option : Male
# 3 : the third option : Not Specified
# So in the above case a question record will be associated with four
# question_metadata records.
tablename = "survey_question_metadata"
table = define_table(tablename,
Field("question_id",
"reference survey_question",
readable=False,
writable=False
),
Field("descriptor",
"string",
length=20,
notnull=True,
),
Field("value",
"text",
notnull=True,
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Add Question Meta-Data"),
title_display = T("Question Meta-Data Details"),
title_list = T("Question Meta-Data"),
title_update = T("Edit Question Meta-Data"),
subtitle_create = T("Add new Question Meta-Data"),
label_list_button = T("List Question Meta-Data"),
label_create_button = T("Add new Question Meta-Data"),
label_delete_button = T("Delete this Question Meta-Data"),
msg_record_created = T("Question Meta-Data added"),
msg_record_modified = T("Question Meta-Data updated"),
msg_record_deleted = T("Question Meta-Data deleted"),
msg_list_empty = T("No Question Meta-Data"),
title_upload = T("Upload a Question List import file")
)
configure(tablename,
deduplicate = self.survey_question_metadata_duplicate
)
# -------------------------------------------------------------------------
# The survey_question_list table is a resolver between
# the survey_question and the survey_section tables.
#
# Along with ids mapping back to these tables
# it will have a code that can be used to reference the question
# it will have the position that the question will appear in the template
tablename = "survey_question_list"
table = define_table(tablename,
Field("posn",
"integer",
notnull=True,
),
self.survey_template_id(),
Field("question_id",
"reference survey_question",
readable=False,
writable=False
),
Field("section_id",
"reference survey_section",
readable=False,
writable=False
),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
title_upload = T("Upload an Assessment Template import file")
)
configure(tablename,
onaccept = self.question_list_onaccept,
deduplicate = self.survey_question_list_duplicate,
)
# Pass names back to global scope (s3.*)
# ---------------------------------------------------------------------
return Storage(
survey_qstn_name_represent = self.qstn_name_represent
)
# -------------------------------------------------------------------------
@staticmethod
def qstn_name_represent(value):
"""
Return the question name, for locations in the gis hierarchy
the localised name will be returned
"""
if value == "L0" or value == "L1" or \
value == "L2" or value == "L3" or value == "L4":
return current.gis.get_location_hierarchy(value)
else:
return value
# -------------------------------------------------------------------------
@staticmethod
def question_onvalidate(form):
"""
Any text with the metadata that is imported will be held in
single quotes, rather than double quotes and so these need
to be escaped to double quotes to make it valid JSON
"""
from xml.sax.saxutils import unescape
metadata = form.vars.metadata
if metadata != None:
metadata = unescape(metadata, {"'":'"'})
return True
# -------------------------------------------------------------------------
@staticmethod
def question_onaccept(form):
"""
All of the question metadata will be stored in the metadata
field in a JSON format.
They will then be inserted into the survey_question_metadata
table pair will be a record on that table.
"""
vars = form.vars
if vars.metadata is None:
return
if vars.id:
record = current.s3db.survey_question[vars.id]
else:
return
if vars.metadata and \
vars.metadata != "":
survey_updateMetaData(record,
vars.type,
vars.metadata
)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_duplicate(job):
"""
Rules for finding a duplicate:
- Look for the question code
"""
if job.tablename == "survey_question":
table = job.table
data = job.data
code = "code" in data and data.code
query = (table.code == code)
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_metadata_duplicate(job):
"""
Rules for finding a duplicate:
- Look for the question_id and descriptor
"""
if job.tablename == "survey_question_metadata":
table = job.table
data = job.data
question = "question_id" in data and data.question_id
descriptor = "descriptor" in data and data.descriptor
query = (table.descriptor == descriptor) & \
(table.question_id == question)
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def question_list_onaccept(form):
"""
If a grid question is added to the the list then all of the
grid children will need to be added as well
"""
qstntable = current.s3db.survey_question
try:
vars = form.vars
question_id = vars.question_id
template_id = vars.template_id
section_id = vars.section_id
posn = vars.posn
except:
return
record = qstntable[question_id]
try:
type = record.type
except:
_debug("survey question missing type: %s" % record)
return
if type == "Grid":
widgetObj = survey_question_type["Grid"]()
widgetObj.insertChildrenToList(question_id,
template_id,
section_id,
posn,
)
if type == "Location":
widgetObj = survey_question_type["Location"]()
widgetObj.insertChildrenToList(question_id,
template_id,
section_id,
posn,
)
# -------------------------------------------------------------------------
@staticmethod
def survey_question_list_duplicate(job):
"""
Rules for finding a duplicate:
- The template_id, question_id and section_id are the same
"""
if job.tablename == "survey_question_list":
table = job.table
data = job.data
tid = "template_id" in data and data.template_id
qid = "question_id" in data and data.question_id
sid = "section_id" in data and data.section_id
query = (table.template_id == tid) & \
(table.question_id == qid) & \
(table.section_id == sid)
return duplicator(job, query)
# =============================================================================
def survey_getQuestionFromCode(code, series_id=None):
"""
Function to return the question for the given series
with the code that matches the one passed in
"""
s3db = current.s3db
sertable = s3db.survey_series
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
if series_id != None:
query = (sertable.id == series_id) & \
(q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.code == code)
else:
query = (q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.code == code)
record = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
q_ltable.posn,
limitby=(0, 1)).first()
question = {}
if record != None:
sq = record.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = sq.name
question["type"] = sq.type
question["posn"] = record.survey_question_list.posn
return question
# =============================================================================
def survey_getAllQuestionsForTemplate(template_id):
"""
Function to return the list of questions for the given template
The questions are returned in the order of their position in the
template.
The data on a question that it returns is as follows:
qstn_id, code, name, type, posn, section
"""
s3db = current.s3db
sectable = s3db.survey_section
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.section_id == sectable.id) & \
(q_ltable.question_id == qsntable.id)
rows = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
sectable.name,
q_ltable.posn,
orderby=(q_ltable.posn))
questions = []
for row in rows:
question = {}
sq = row.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = s3db.survey_qstn_name_represent(sq.name)
question["type"] = sq.type
question["posn"] = row.survey_question_list.posn
question["section"] = row.survey_section.name
questions.append(question)
return questions
# =============================================================================
def survey_getAllQuestionsForSeries(series_id):
"""
Function to return the list of questions for the given series
The questions are returned in to order of their position in the
template.
The data on a question that is returns is as follows:
qstn_id, code, name, type, posn, section
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.template_id,
limitby=(0, 1)).first()
template_id = row.template_id
questions = survey_getAllQuestionsForTemplate(template_id)
return questions
# =============================================================================
def survey_getAllQuestionsForComplete(complete_id):
"""
Function to return a tuple of the list of questions and series_id
for the given completed_id
The questions are returned in to order of their position in the
template.
The data on a question that is returns is as follows:
qstn_id, code, name, type, posn, section
"""
table = current.s3db.survey_complete
row = current.db(table.id == complete_id).select(table.series_id,
limitby=(0, 1)).first()
series_id = row.series_id
questions = survey_getAllQuestionsForSeries(series_id)
return (questions, series_id)
# =============================================================================
def survey_get_series_questions_of_type(questionList, type):
"""
"""
if isinstance(type, (list, tuple)):
types = type
else:
types = (type)
questions = []
for question in questionList:
if question["type"] in types:
questions.append(question)
elif question["type"] == "Link" or \
question["type"] == "GridChild":
widgetObj = survey_getWidgetFromQuestion(question["qstn_id"])
if widgetObj.getParentType() in types:
question["name"] = widgetObj.fullName()
questions.append(question)
return questions
# =============================================================================
def survey_getQuestionFromName(name, series_id):
"""
Function to return the question for the given series
with the name that matches the one passed in
"""
s3db = current.s3db
sertable = s3db.survey_series
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (sertable.id == series_id) & \
(q_ltable.template_id == sertable.template_id) & \
(q_ltable.question_id == qsntable.id) & \
(qsntable.name == name)
record = current.db(query).select(qsntable.id,
qsntable.code,
qsntable.name,
qsntable.type,
q_ltable.posn,
limitby=(0, 1)).first()
if record == None:
# Unable to get the record from the question name
# It could be because the question is a location
# So get the location names and then check
locList = current.gis.get_all_current_levels()
for row in locList.items():
if row[1] == name:
return survey_getQuestionFromName(row[0],series_id)
question = {}
sq = record.survey_question
question["qstn_id"] = sq.id
question["code"] = sq.code
question["name"] = sq.name
question["type"] = sq.type
question["posn"] = record.survey_question_list.posn
return question
# =============================================================================
def survey_updateMetaData (record, type, metadata):
"""
"""
metatable = current.s3db.survey_question_metadata
id = record.id
# the metadata can either be passed in as a JSON string
# or as a parsed map. If it is a string load the map.
if isinstance(metadata, str):
metadataList = json2py(metadata)
else:
metadataList = metadata
for (desc, value) in metadataList.items():
desc = desc.strip()
if not isinstance(value, str):
# web2py stomps all over a list so convert back to a string
# before inserting it on the database
value = json.dumps(value)
value = value.strip()
metatable.insert(question_id = id,
descriptor = desc,
value = value
)
if type == "Grid":
widgetObj = survey_question_type["Grid"]()
widgetObj.insertChildren(record, metadataList)
# =============================================================================
class S3SurveyFormatterModel(S3Model):
"""
The survey_formatter table defines the order in which the questions
will be laid out when a formatted presentation is used.
The idea is to be able to present the questions in a format that
best uses the available space and is familiar to those using the
tool.
Examples of formatted presentation are the spreadsheet and the web
form. This may be extended to PDF documents.
The rules are held as a JSON record and describe where each question
within the section should appear in terms of rows and columns. Each
question is referenced by the question code.
For example assume a section with the following eight questions:
QSTN_1, QSTN_2, QSTN_3, QSTN_4, QSTN_5, QSTN_6, QSTN_7, QSTN_8
Then to display them in three rows:
[[QSTN_1, QSTN_2, QSTN_3], [QSTN_4, QSTN_5, QSTN_6], [QSTN_7, QSTN_8]]
would present it as follows:
QSTN_1, QSTN_2, QSTN_3,
QSTN_4, QSTN_5, QSTN_6,
QSTN_7, QSTN_8
The order of the questions does not need to be preserved, thus:
[[QSTN_1, QSTN_2], [QSTN_4, QSTN_5, QSTN_3], [QSTN_7, QSTN_8, QSTN_6]]
would be valid, and give:
QSTN_1, QSTN_2,
QSTN_4, QSTN_5, QSTN_3,
QSTN_7, QSTN_8, QSTN_6,
***NOTE***
When importing this record with a CSV file the question code will be
single quoted, rather than double quoted which JSON requires.
This is because the whole rule needs to be double quoted. Code that
extracts the records from the table will then need to change all
single quotes to double quotes. This can be done as follows:
rowList = json2py(rules)
"""
names = ["survey_formatter"]
def model(self):
T = current.T
survey_formatter_methods = {
1: T("Default"),
2: T("Web Form"),
3: T("Spreadsheet"),
4: T("PDF"),
}
# ---------------------------------------------------------------------
tablename = "survey_formatter"
table = self.define_table(tablename,
self.survey_template_id(),
Field("section_id", "reference survey_section",
readable=False,
writable=False
),
Field("method", "integer",
requires = IS_IN_SET(survey_formatter_methods,
zero=None),
default=1,
represent = lambda index: \
survey_formatter_methods[index],
readable=True,
writable=False),
Field("rules", "text", default=""),
*s3_meta_fields()
)
self.configure(tablename,
onaccept = self.formatter_onaccept,
deduplicate=self.survey_formatter_duplicate
)
# ---------------------------------------------------------------------
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def formatter_onaccept(form):
"""
If this is the formatter rules for the Background Information
section then add the standard questions to the layout
"""
s3db = current.s3db
section_id = form.vars.section_id
sectionTbl = s3db.survey_section
section_name = sectionTbl[section_id].name
if section_name == "Background Information":
col1 = []
# Add the default layout
templateTbl = s3db.survey_template
template = templateTbl[form.vars.template_id]
if template.competion_qstn != "":
col1.append("STD-WHO")
if template.date_qstn != "":
col1.append("STD-DATE")
if template.time_qstn != "":
col1.append("STD-TIME")
if "location_detail" in template:
col2 = ["STD-P-Code"]
locationList = json2py(template.location_detail)
for loc in locationList:
col2.append("STD-%s" % loc)
col = [col1, col2]
rule = [{"columns":col}]
ruleList = json2py(form.vars.rules)
ruleList[:0]=rule
rules = json.dumps(ruleList)
db = current.db
ftable = db.survey_formatter
db(ftable.id == form.vars.id).update(rules = rules)
# -------------------------------------------------------------------------
@staticmethod
def survey_formatter_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same template_id and section_id
"""
if job.tablename == "survey_formatter":
table = job.table
data = job.data
tid = "template_id" in data and data.template_id
sid = "section_id" in data and data.section_id
query = (table.template_id == tid) & \
(table.section_id == sid)
return duplicator(job, query)
# =============================================================================
def survey_getQstnLayoutRules(template_id,
section_id,
method = 1
):
"""
This will return the rules for laying out the questions for
the given section within the template.
This is used when generating a formatted layout.
First it will look for a survey_formatter record that matches
the method given. Failing that it will look for a default
survey_formatter record. If no appropriate survey_formatter
record exists for the section then it will use the posn
field found in the survey_question_list record.
The function will return a list of rows. Each row is a list
of question codes.
"""
db = current.db
s3db = current.s3db
# search for layout rules on the survey_formatter table
fmttable = s3db.survey_formatter
query = (fmttable.template_id == template_id) & \
(fmttable.section_id == section_id)
rows = db(query).select(fmttable.method,
fmttable.rules)
rules = None
drules = None # default rules
for row in rows:
if row.method == method:
rules = row.rules
break
elif row.method == 1:
drules = row.rules
if rules == None and drules != None:
rules = drules
rowList = []
if rules is None or rules == "":
# get the rules from survey_question_list
q_ltable = s3db.survey_question_list
qsntable = s3db.survey_question
query = (q_ltable.template_id == template_id) & \
(q_ltable.section_id == section_id) & \
(q_ltable.question_id == qsntable.id)
rows = db(query).select(qsntable.code,
q_ltable.posn,
orderby=(q_ltable.posn))
append = rowList.append
for qstn in rows:
append([qstn.survey_question.code])
else:
# convert the JSON rules to python
rowList = json2py(rules)
return rowList
# =============================================================================
class S3SurveySeriesModel(S3Model):
"""
Series Model
"""
names = ["survey_series",
"survey_series_status",
]
def model(self):
T = current.T
person_id = self.pr_person_id
pr_person_comment = self.pr_person_comment
organisation_id = self.org_organisation_id
s3_date_represent = S3DateTime.date_represent
s3_date_format = current.deployment_settings.get_L10n_date_format()
crud_strings = current.response.s3.crud_strings
set_method = self.set_method
if current.deployment_settings.get_org_autocomplete():
org_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
org_widget = None
# ---------------------------------------------------------------------
# The survey_series table is used to hold all uses of a template
#
# When a series is first created the template status will change from
# Pending to Active and at the stage no further changes to the
# template can be made.
#
# Typically a series will be created for an event, which may be a
# response to a natural disaster, an exercise,
# or regular data collection activity.
#
# The series is a container for all the responses for the event
series_status = {
1: T("Active"),
2: T("Closed"),
}
tablename = "survey_series"
table = self.define_table(tablename,
Field("name", "string", length=120,
default="",
requires = IS_NOT_EMPTY()),
Field("description", "text", default="", length=500),
Field("status", "integer",
requires = IS_IN_SET(series_status,
zero=None),
default=1,
represent = lambda index: series_status[index],
readable=True,
writable=False),
self.survey_template_id(empty=False,
ondelete="RESTRICT"),
person_id(),
organisation_id(widget = org_widget),
Field("logo", "string", default="", length=512),
Field("language", "string", default="en", length=8),
Field("start_date", "date",
requires = IS_EMPTY_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget(),
default=None),
Field("end_date", "date",
requires = IS_EMPTY_OR(IS_DATE(format = s3_date_format)),
represent = s3_date_represent,
widget = S3DateWidget(),
default=None),
#self.super_link("source_id", "doc_source_entity"),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Conduct a Disaster Assessment"),
title_display = T("Details of Disaster Assessment"),
title_list = T("Disaster Assessments"),
title_update = T("Edit this Disaster Assessment"),
title_analysis_summary = T("Disaster Assessment Summary"),
title_analysis_chart = T("Disaster Assessment Chart"),
title_map = T("Disaster Assessment Map"),
subtitle_create = T("Add a new Disaster Assessment"),
subtitle_analysis_summary = T("Summary of Completed Assessment Forms"),
help_analysis_summary = T("Click on questions below to select them, then click 'Display Selected Questions' button to view the selected questions for all Completed Assessment Forms"),
subtitle_analysis_chart = T("Select a label question and at least one numeric question to display the chart."),
subtitle_map = T("Disaster Assessment Map"),
label_list_button = T("List Disaster Assessments"),
label_create_button = T("Add a new Disaster Assessment"),
label_delete_button = T("Delete this Disaster Assessment"),
msg_record_created = T("Disaster Assessment added"),
msg_record_modified = T("Disaster Assessment updated"),
msg_record_deleted = T("Disaster Assessment deleted"),
msg_list_empty = T("No Disaster Assessments"))
self.configure(tablename,
create_next = URL(f="newAssessment",
vars={"viewing":"survey_series.[id]"}),
onaccept = self.series_onaccept,
deduplicate = self.survey_series_duplicate,
)
# Components
self.add_component("survey_complete", survey_series="series_id")
# Custom Methods
set_method("survey", "series", method="summary", action=self.seriesSummary)
set_method("survey", "series", method="graph", action=self.seriesGraph)
set_method("survey", "series", method="map", action=self.seriesMap)
set_method("survey", "series",
method="series_chart_download",
action=self.seriesChartDownload
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return Storage(
survey_series_status = series_status,
)
# -------------------------------------------------------------------------
@staticmethod
def series_onaccept(form):
"""
Ensure that the template status is set to Active
"""
if form.vars.template_id:
template_id = form.vars.template_id
else:
return
table = current.s3db.survey_template
current.db(table.id == template_id).update(status = 2)
# -------------------------------------------------------------------------
@staticmethod
def survey_series_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with a similar name, ignoring case
"""
if job.tablename == "survey_series":
table = job.table
data = job.data
name = "name" in data and data.name
query = table.name.lower().like('%%%s%%' % name.lower())
return duplicator(job, query)
# -------------------------------------------------------------------------
@staticmethod
def seriesSummary(r, **attr):
"""
"""
db = current.db
s3db = current.s3db
request = current.request
s3 = current.response.s3
posn_offset = 11
# Retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(rheader=rheader)
else:
output = dict()
if request.env.request_method == "POST" \
or "mode" in request.vars:
# This means that the user has selected the questions and
# Wants to display the details of the selected questions
crud_strings = s3.crud_strings["survey_complete"]
question_ids = []
vars = request.vars
if "mode" in vars:
mode = vars["mode"]
series_id = r.id
if "selected" in vars:
selected = vars["selected"].split(",")
else:
selected = []
q_ltable = s3db.survey_question_list
sertable = s3db.survey_series
query = (sertable.id == series_id) & \
(sertable.template_id == q_ltable.template_id)
questions = db(query).select(q_ltable.posn,
q_ltable.question_id,
orderby = q_ltable.posn)
for question in questions:
qstn_posn = question.posn + posn_offset
if mode == "Inclusive":
if str(qstn_posn) in selected:
question_ids.append(str(question.question_id))
elif mode == "Exclusive":
if str(qstn_posn) not in selected:
question_ids.append(str(question.question_id))
items = buildCompletedList(series_id, question_ids)
if r.representation == "xls":
from ..s3.codecs.xls import S3XLS
exporter = S3XLS()
return exporter.encode(items,
title=crud_strings.title_selected,
use_colour=False
)
if r.representation == "html":
table = buildTableFromCompletedList(items)
#exporter = S3Exporter()
#table = exporter.html(items)
output["items"] = table
output["sortby"] = [[0, "asc"]]
url_pdf = URL(c="survey", f="series",
args=[series_id, "summary.pdf"],
vars = {"mode": mode,
"selected": vars["selected"]}
)
url_xls = URL(c="survey", f="series",
args=[series_id, "summary.xls"],
vars = {"mode": mode,
"selected": vars["selected"]}
)
s3.formats["pdf"] = url_pdf
s3.formats["xls"] = url_xls
else:
output["items"] = None
output["title"] = crud_strings.title_selected
output["subtitle"] = crud_strings.subtitle_selected
output["help"] = ""
else:
crud_strings = s3.crud_strings["survey_series"]
viewing = request.get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = request.get_vars.get("series", None)
if not series_id:
series_id = r.id
form = buildSeriesSummary(series_id, posn_offset)
output["items"] = form
output["sortby"] = [[0, "asc"]]
output["title"] = crud_strings.title_analysis_summary
output["subtitle"] = crud_strings.subtitle_analysis_summary
output["help"] = crud_strings.help_analysis_summary
s3.dataTableBulkActionPosn = "top"
s3.actions = None
current.response.view = "survey/series_summary.html"
return output
# -------------------------------------------------------------------------
@staticmethod
def getChartName():
"""
Create a Name for a Chart
"""
import hashlib
vars = current.request.vars
end_part = "%s_%s" % (vars.numericQuestion,
vars.labelQuestion)
h = hashlib.sha256()
h.update(end_part)
encoded_part = h.hexdigest()
chartName = "survey_series_%s_%s" % (vars.series, encoded_part)
return chartName
# -------------------------------------------------------------------------
@staticmethod
def seriesChartDownload(r, **attr):
"""
"""
from gluon.contenttype import contenttype
series_id = r.id
seriesName = survey_getSeriesName(series_id)
filename = "%s_chart.png" % seriesName
response = current.response
response.headers["Content-Type"] = contenttype(".png")
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
chartFile = S3SurveySeriesModel.getChartName()
cached = S3Chart.getCachedFile(chartFile)
if cached:
return cached
# The cached version doesn't exist so regenerate it
output = dict()
vars = current.request.get_vars
if "labelQuestion" in vars:
labelQuestion = vars.labelQuestion
if "numericQuestion" in vars:
numQstnList = vars.numericQuestion
if not isinstance(numQstnList, (list, tuple)):
numQstnList = [numQstnList]
if (numQstnList != None) and (labelQuestion != None):
S3SurveySeriesModel.drawChart(output, series_id, numQstnList,
labelQuestion, outputFormat="png")
return output["chart"]
# -------------------------------------------------------------------------
@staticmethod
def seriesGraph(r, **attr):
"""
Allows the user to select one string question and multiple numeric
questions. The string question is used to group the numeric data,
with the result displayed as a bar chart.
For example:
The string question can be Geographic area, and the numeric
questions could be people injured and families displaced.
Then the results will be grouped by each geographical area.
"""
T = current.T
request = current.request
s3 = current.response.s3
output = dict()
# Draw the chart
vars = request.vars
if "viewing" in vars:
dummy, series_id = vars.viewing.split(".")
elif "series" in vars:
series_id = vars.series
else:
series_id = r.id
chartFile = S3SurveySeriesModel.getChartName()
cachePath = S3Chart.getCachedPath(chartFile)
if cachePath and request.ajax:
return IMG(_src=cachePath)
else:
numQstnList = None
labelQuestion = None
post_vars = request.post_vars
if post_vars is not None:
if "labelQuestion" in post_vars:
labelQuestion = post_vars.labelQuestion
if "numericQuestion" in post_vars:
numQstnList = post_vars.numericQuestion
if not isinstance(numQstnList, (list, tuple)):
numQstnList = [numQstnList]
if (numQstnList != None) and (labelQuestion != None):
S3SurveySeriesModel.drawChart(output, series_id, numQstnList,
labelQuestion)
if request.ajax == True and "chart" in output:
return output["chart"]
# retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output["rheader"] = rheader
# ---------------------------------------------------------------------
def addQstnChkboxToTR(numQstnList, qstn):
"""
Build the form
"""
tr = TR()
if numQstnList != None and qstn["code"] in numQstnList:
tr.append(INPUT(_type="checkbox",
_name="numericQuestion",
_value=qstn["code"],
value=True,
)
)
else:
tr.append(INPUT(_type="checkbox",
_name="numericQuestion",
_value=qstn["code"],
)
)
tr.append(LABEL(qstn["name"]))
return tr
if series_id == None:
return output
allQuestions = survey_getAllQuestionsForSeries(series_id)
labelTypeList = ("String",
"Option",
"YesNo",
"YesNoDontKnow",
"Location",
)
labelQuestions = survey_get_series_questions_of_type (allQuestions, labelTypeList)
lblQstns = []
for question in labelQuestions:
lblQstns.append(question["name"])
numericTypeList = ("Numeric")
form = FORM(_id="mapGraphForm")
table = TABLE()
labelQstn = SELECT(lblQstns, _name="labelQuestion", value=labelQuestion)
table.append(TR(TH("%s:" % T("Select Label Question")), _class="survey_question"))
table.append(labelQstn)
table.append(TR(TH(T("Select Numeric Questions (one or more):")), _class="survey_question"))
# First add the special questions
specialQuestions = [{"code":"Count", "name" : T("Number of Completed Assessment Forms")}]
innerTable = TABLE()
for qstn in specialQuestions:
tr = addQstnChkboxToTR(numQstnList, qstn)
innerTable.append(tr)
table.append(innerTable)
# Now add the numeric questions
numericQuestions = survey_get_series_questions_of_type (allQuestions, numericTypeList)
innerTable = TABLE()
for qstn in numericQuestions:
tr = addQstnChkboxToTR(numQstnList, qstn)
innerTable.append(tr)
table.append(innerTable)
form.append(table)
series = INPUT(_type="hidden",
_id="selectSeriesID",
_name="series",
_value="%s" % series_id
)
button = INPUT(_type="button", _id="chart_btn", _name="Chart", _value=T("Display Chart"))
form.append(series)
form.append(button)
# Set up the javascript code for ajax interaction
jurl = URL(r=request, c=r.prefix, f=r.function, args=request.args)
s3.jquery_ready.append('''
$('#chart_btn').click(function(){
var data=$('#mapGraphForm').serialize()
var url='<a class="action-btn" href=series_chart_download?' + data + '>Download Chart</a>'
$.post('%s',data,function(data){
$('#survey_chart').empty();
$('#survey_chart').append(data);
$('#survey_chart_download').empty();
$('#survey_chart_download').append(url);
});
});
''' % jurl)
output["showForm"] = P(T("Click on the chart to show/hide the form."))
output["form"] = form
output["title"] = s3.crud_strings["survey_series"].title_analysis_chart
current.response.view = "survey/series_analysis.html"
return output
# -------------------------------------------------------------------------
@staticmethod
def drawChart(output, series_id, numQstnList, labelQuestion, outputFormat=None):
"""
"""
T = current.T
getAnswers = survey_getAllAnswersForQuestionInSeries
gqstn = survey_getQuestionFromName(labelQuestion, series_id)
gqstn_id = gqstn["qstn_id"]
ganswers = getAnswers(gqstn_id, series_id)
dataList = []
legendLabels = []
for numericQuestion in numQstnList:
if numericQuestion == "Count":
# get the count of replies for the label question
gqstn_type = gqstn["type"]
analysisTool = survey_analysis_type[gqstn_type](gqstn_id, ganswers)
map = analysisTool.uniqueCount()
label = map.keys()
data = map.values()
legendLabels.append(T("Count of Question"))
else:
qstn = survey_getQuestionFromCode(numericQuestion, series_id)
qstn_id = qstn["qstn_id"]
qstn_type = qstn["type"]
answers = getAnswers(qstn_id, series_id)
analysisTool = survey_analysis_type[qstn_type](qstn_id, answers)
label = analysisTool.qstnWidget.fullName()
if len(label) > 20:
label = "%s..." % label[0:20]
legendLabels.append(label)
grouped = analysisTool.groupData(ganswers)
aggregate = "Sum"
filtered = analysisTool.filter(aggregate, grouped)
(label, data) = analysisTool.splitGroupedData(filtered)
if data != []:
dataList.append(data)
if dataList == []:
output["chart"] = H4(T("There is insufficient data to draw a chart from the questions selected"))
else:
chartFile = S3SurveySeriesModel.getChartName()
chart = S3Chart(path=chartFile, width=7.2)
chart.asInt = True
chart.survey_bar(labelQuestion,
dataList,
label,
legendLabels)
if outputFormat == None:
image = chart.draw()
else:
image = chart.draw(output=outputFormat)
output["chart"] = image
request = current.request
chartLink = A(T("Download"),
_href=URL(c="survey",
f="series",
args=request.args,
vars=request.vars
)
)
output["chartDownload"] = chartLink
# -------------------------------------------------------------------------
@staticmethod
def seriesMap(r, **attr):
"""
"""
import math
from s3survey import S3AnalysisPriority
T = current.T
response = current.response
s3 = response.s3
request = current.request
gis = current.gis
# retain the rheader
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(rheader=rheader)
else:
output = dict()
crud_strings = s3.crud_strings["survey_series"]
viewing = request.get_vars.get("viewing", None)
if viewing:
dummy, series_id = viewing.split(".")
else:
series_id = request.get_vars.get("series", None)
if not series_id:
series_id = r.id
if series_id == None:
seriesList = []
append = seriesList.append
records = survey_getAllSeries()
for row in records:
append(row.id)
else:
seriesList = [series_id]
pqstn = {}
pqstn_name = request.post_vars.get("pqstn_name", None)
if pqstn_name is None:
pqstn = survey_getPriorityQuestionForSeries(series_id)
if "name" in pqstn:
pqstn_name = pqstn["name"]
feature_queries = []
bounds = {}
# Build the drop down list of priority questions
allQuestions = survey_getAllQuestionsForSeries(series_id)
numericTypeList = ("Numeric")
numericQuestions = survey_get_series_questions_of_type(allQuestions,
numericTypeList)
numQstns = []
for question in numericQuestions:
numQstns.append(question["name"])
form = FORM(_id="mapQstnForm")
table = TABLE()
if pqstn:
priorityQstn = SELECT(numQstns, _name="pqstn_name",
value=pqstn_name)
else:
priorityQstn = None
# Set up the legend
priorityObj = S3AnalysisPriority(range=[-.66, .66],
colour={-1:"#888888", # grey
0:"#008000", # green
1:"#FFFF00", # yellow
2:"#FF0000", # red
},
# Make Higher-priority show up more clearly
opacity={-1:0.5,
0:0.6,
1:0.7,
2:0.8,
},
image={-1:"grey",
0:"green",
1:"yellow",
2:"red",
},
desc={-1:"No Data",
0:"Low",
1:"Average",
2:"High",
},
zero = True)
for series_id in seriesList:
series_name = survey_getSeriesName(series_id)
response_locations = getLocationList(series_id)
if pqstn == {} and pqstn_name:
for question in numericQuestions:
if pqstn_name == question["name"]:
pqstn = question
if pqstn != {}:
pqstn_id = pqstn["qstn_id"]
answers = survey_getAllAnswersForQuestionInSeries(pqstn_id,
series_id)
analysisTool = survey_analysis_type["Numeric"](pqstn_id,
answers)
analysisTool.advancedResults()
else:
analysisTool = None
if analysisTool != None and not math.isnan(analysisTool.mean):
pBand = analysisTool.priorityBand(priorityObj)
legend = TABLE(
TR (TH(T("Marker Levels"), _colspan=3),
_class= "survey_question"),
)
for key in priorityObj.image.keys():
tr = TR(TD(priorityObj.imageURL(request.application,
key)),
TD(priorityObj.desc(key)),
TD(priorityObj.rangeText(key, pBand)),
)
legend.append(tr)
output["legend"] = legend
if len(response_locations) > 0:
for i in range( 0 , len( response_locations) ):
location = response_locations[i]
complete_id = location.complete_id
# Insert how we want this to appear on the map
url = URL(c="survey",
f="series",
args=[series_id,
"complete",
complete_id,
"read"
]
)
location.shape = "circle"
location.size = 5
if analysisTool is None:
priority = -1
else:
priority = analysisTool.priority(complete_id,
priorityObj)
location.colour = priorityObj.colour[priority]
location.opacity = priorityObj.opacity[priority]
location.popup_url = url
location.popup_label = response_locations[i].name
feature_queries.append({ "name": "%s: Assessments" % series_name,
"query": response_locations,
"active": True })
if bounds == {}:
bounds = (gis.get_bounds(features=response_locations))
else:
new_bounds = gis.get_bounds(features=response_locations)
# Where is merge_bounds defined!?
bounds = merge_bounds([bounds, new_bounds])
if bounds == {}:
bounds = gis.get_bounds()
map = gis.show_map(feature_queries = feature_queries,
#height = 600,
#width = 720,
bbox = bounds,
#collapsed = True,
catalogue_layers = True,
)
series = INPUT(_type="hidden",
_id="selectSeriesID",
_name="series",
_value="%s" % series_id
)
table.append(TR(TH("%s:" % T("Display Question on Map")),
_class="survey_question"))
table.append(priorityQstn)
table.append(series)
form.append(table)
button = INPUT(_type="submit", _name="Chart",
_value=T("Update Map"))
# REMOVED until we have dynamic loading of maps.
#button = INPUT(_type="button", _id="map_btn", _name="Map_Btn", _value=T("Select the Question"))
#jurl = URL(r=request, c=r.prefix, f=r.function, args=request.args)
#s3.jquery_ready.append('''
#$('#map_btn').click(function(){
# $.post('%s',$('#mapQstnForm').serialize(),function(data){
# obj = jQuery.parseJSON(data);
# $('#survey_map-legend').empty();
# $('#survey_map-legend').append(obj.legend);
# $('#survey_map-container').empty();
# $('#survey_map-container').append(obj.map);
# });
#});
#''' % jurl)
form.append(button)
output["title"] = crud_strings.title_map
output["subtitle"] = crud_strings.subtitle_map
output["instructions"] = T("Click on a marker to see the Completed Assessment Form")
output["form"] = form
output["map"] = map
response.view = "survey/series_map.html"
return output
# =============================================================================
def survey_serieslist_dataTable_post(r):
"""
Replace the Action Buttons
"""
#S3CRUD.action_buttons(r)
current.response.s3.actions = [
dict(label=current.messages.UPDATE,
_class="action-btn edit",
url=URL(c="survey", f="series",
args=["[id]", "summary"]
)
),
]
# =============================================================================
def survey_series_represent(value):
"""
This will display the series name, rather than the id
"""
table = current.s3db.survey_series
row = current.db(table.id == value).select(table.name,
limitby=(0, 1)).first()
return row.name
# =============================================================================
def survey_series_rheader(r):
"""
The series rheader
"""
if r.representation == "html":
tablename, record = s3_rheader_resource(r)
if not record:
series_id = current.request.vars.series
record = survey_getSeries(series_id)
if record != None:
T = current.T
s3db = current.s3db
# Tabs
tabs = [(T("Details"), None),
(T("Completed Assessments"), "complete"),
(T("Summary"), "summary"),
(T("Chart"), "graph"),
(T("Map"), "map"),
]
if current.auth.s3_has_permission("create", "survey_complete"):
tabs.insert(1, (T("Enter Completed Assessment"), "newAssessment/"))
rheader_tabs = s3_rheader_tabs(r, tabs)
completeTable = s3db.survey_complete
qty = current.db(completeTable.series_id == record.id).count()
tsection = TABLE(_class="survey-complete-list")
lblSection = T("Number of Completed Assessment Forms")
rsection = TR(TH(lblSection), TD(qty))
tsection.append(rsection)
urlexport = URL(c="survey", f="series_export_formatted",
args=[record.id])
tranForm = FORM(_action=urlexport)
translationList = survey_getAllTranslationsForSeries(record.id)
if len(translationList) > 0:
tranTable = TABLE()
tr = TR(INPUT(_type='radio',
_name='translationLanguage',
_value="Default",
_checked=True,
),
LABEL("Default"))
colCnt = 1
for translation in translationList:
# include a maximum of 4 translation languages per row
if colCnt == 4:
tranTable.append(tr)
tr = TR()
colCnt = 0
tr.append(INPUT(_type="radio",
_name="translationLanguage",
_value=translation["code"],
))
tr.append(LABEL(translation["language"]))
colCnt += 1
if colCnt != 0:
tranTable.append(tr)
tranForm.append(tranTable)
export_xls_btn = INPUT(_type="submit",
_id="export_xls_btn",
_name="Export_Spreadsheet",
_value=T("Download Assessment Form Spreadsheet"),
_class="action-btn"
)
tranForm.append(export_xls_btn)
try:
# only add the Export to Word button up if PyRTF is installed
from PyRTF import Document
export_rtf_btn = INPUT(_type="submit",
_id="export_rtf_btn",
_name="Export_Word",
_value=T("Download Assessment Form Document"),
_class="action-btn"
)
tranForm.append(export_rtf_btn)
except:
pass
urlimport = URL(c="survey",
f="export_all_responses",
args=[record.id],
)
buttons = DIV(A(T("Export all Completed Assessment Data"),
_href=urlimport,
_id="All_resposnes",
_class="action-btn"
),
)
rheader = DIV(TABLE(
TR(TH("%s: " % T("Template")),
survey_template_represent(record.template_id),
TH("%s: " % T("Name")),
record.name,
TH("%s: " % T("Status")),
s3db.survey_series_status[record.status],
),
),
tsection,
tranForm,
buttons,
rheader_tabs)
return rheader
return None
# =============================================================================
def survey_getSeries(series_id):
"""
function to return the series from a series id
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(limitby=(0, 1)).first()
return row
# =============================================================================
def survey_getSeriesName(series_id):
"""
function to return the Series Name from the id
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.name,
limitby=(0, 1)).first()
try:
return row.name
except:
return ""
# =============================================================================
def survey_getAllSeries():
"""
function to return all the series on the database
"""
table = current.s3db.survey_series
row = current.db(table.id > 0).select()
return row
# =============================================================================
def survey_buildQuestionnaireFromSeries(series_id, complete_id=None):
"""
build a form displaying all the questions for a given series_id
If the complete_id is also provided then the responses to each
completed question will also be displayed
"""
questions = survey_getAllQuestionsForSeries(series_id)
return buildQuestionsForm(questions, complete_id)
# =============================================================================
def survey_save_answers_for_series(series_id, complete_id, vars):
"""
function to save the list of answers for a completed series
"""
questions = survey_getAllQuestionsForSeries(series_id)
return saveAnswers(questions, series_id, complete_id, vars)
# =============================================================================
def saveAnswers(questions, series_id, complete_id, vars):
"""
"""
text = ""
table = current.s3db.survey_complete
for question in questions:
code = question["code"]
if (code in vars) and vars[code] != "":
line = '"%s","%s"\n' % (code, vars[code])
text += line
if complete_id == None:
# Insert into database
id = table.insert(series_id = series_id, answer_list = text)
S3SurveyCompleteModel.completeOnAccept(id)
return id
else:
# Update the complete_id record
current.db(table.id == complete_id).update(answer_list = text)
S3SurveyCompleteModel.completeOnAccept(complete_id)
return complete_id
# =============================================================================
def survey_getPriorityQuestionForSeries(series_id):
"""
"""
templateRec = survey_getTemplateFromSeries(series_id)
if templateRec != None:
priorityQstnCode = templateRec["priority_qstn"]
question = survey_getQuestionFromCode(priorityQstnCode, series_id)
return question
else:
return None
# =============================================================================
def buildSeriesSummary(series_id, posn_offset):
"""
"""
from s3.s3data import S3DataTable
T = current.T
table = TABLE(_id="series_summary",
_class="dataTable display")
hr = TR(TH(""), # Bulk action column
TH(T("Position")),
TH(T("Question")),
TH(T("Type")),
TH(T("Summary"))
)
header = THEAD(hr)
questions = survey_getAllQuestionsForSeries(series_id)
line = []
body = TBODY()
for question in questions:
if question["type"] == "Grid":
continue
question_id = question["qstn_id"]
widgetObj = survey_getWidgetFromQuestion(question_id)
br = TR()
posn = int(question["posn"])+posn_offset
br.append(TD(INPUT(_id="select%s" % posn,
_type="checkbox",
_class="bulkcheckbox",
)))
br.append(posn) # add an offset to make all id's +ve
br.append(widgetObj.fullName())
#br.append(question["name"])
type = widgetObj.type_represent()
answers = survey_getAllAnswersForQuestionInSeries(question_id,
series_id)
analysisTool = survey_analysis_type[question["type"]](question_id,
answers)
chart = analysisTool.chartButton(series_id)
cell = TD()
cell.append(type)
if chart:
cell.append(chart)
br.append(cell)
analysisTool.count()
br.append(analysisTool.summary())
body.append(br)
table.append(header)
table.append(body)
s3 = current.response.s3
# Turn off server side pagination
s3.no_sspag = True
# Turn multi-select on
s3.dataTableBulkActions = [current.T("Display Selected Questions")]
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"series_summary",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
**attr
)
series = INPUT(_type="hidden", _id="selectSeriesID", _name="series",
_value="%s" % series_id)
form.append(series)
return form
# =============================================================================
class S3SurveyCompleteModel(S3Model):
"""
Completed Surveys Model
"""
names = ["survey_complete",
"survey_answer",
]
def model(self):
T = current.T
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# The survey_complete table holds all of the answers for a completed
# response. It has a link back to the series this response belongs to.
#
# Whilst this table holds all of the answers in a text field during
# the onaccept each answer is extracted and then stored in the
# survey_answer table. This process of moving the answers to a
# separate table makes it easier to analyse the answers
# for a given question across all responses.
tablename = "survey_complete"
table = define_table(tablename,
Field("series_id", "reference survey_series",
represent = survey_series_represent,
label = T("Series"),
readable=False,
writable=False
),
Field("answer_list", "text",
represent = survey_answer_list_represent
),
Field("location", "text",
readable=False,
writable=False
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_create = T("Enter Completed Assessment Form"),
title_display = T("Completed Assessment Form Details"),
title_list = T("Completed Assessment Forms"),
title_update = T("Edit Completed Assessment Form"),
title_selected = T("Selected Questions for all Completed Assessment Forms"),
subtitle_create = T("Enter Completed Assessment Form"),
subtitle_selected = T("Selected Questions for all Completed Assessment Forms"),
label_list_button = T("List Completed Assessment Forms"),
label_create_button = T("Add a new Completed Assessment Form"),
label_delete_button = T("Delete this Completed Assessment Form"),
msg_record_created = T("Completed Assessment Form entered"),
msg_record_modified = T("Completed Assessment Form updated"),
msg_record_deleted = T("Completed Assessment Form deleted"),
msg_list_empty = T("No Completed Assessment Forms"),
title_upload = T("Upload the Completed Assessment Form")
)
configure(tablename,
onvalidation = self.complete_onvalidate,
onaccept = self.complete_onaccept,
deduplicate=self.survey_complete_duplicate,
)
self.add_component("survey_complete",
survey_series = dict(joinby="series_id",
multiple=True)
)
# ---------------------------------------------------------------------
# The survey_answer table holds the answer for a single response
# of a given question.
tablename = "survey_answer"
table = define_table(tablename,
Field("complete_id", "reference survey_complete",
readable=False,
writable=False
),
Field("question_id", "reference survey_question",
readable=True,
writable=False
),
Field("value", "text",
readable=True,
writable=True
),
*s3_meta_fields())
crud_strings[tablename] = Storage(
title_create = T("Add Assessment Answer"),
title_display = T("Assessment Answer Details"),
title_list = T("Assessment Answers"),
title_update = T("Edit Assessment Answer"),
subtitle_create = T("Add a new Assessment Answer"),
label_list_button = T("List Assessment Answers"),
label_create_button = T("Add a new Assessment Answer"),
label_delete_button = T("Delete this Assessment Answer"),
msg_record_created = T("Assessment Answer added"),
msg_record_modified = T("Assessment Answer updated"),
msg_record_deleted = T("Assessment Answer deleted"),
msg_list_empty = T("No Assessment Answers"))
configure(tablename,
onaccept = self.answer_onaccept,
deduplicate = self.survey_answer_duplicate
)
# ---------------------------------------------------------------------
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def extractAnswerFromAnswerList(answerList, qstnCode):
"""
function to extract the answer for the question code
passed in from the list of answers. This is in a CSV
format created by the XSL stylesheet or by the function
saveAnswers()
"""
start = answerList.find(qstnCode)
if start == -1:
return None
start = start + len(qstnCode) + 3
end = answerList.find('"', start)
answer = answerList[start:end]
return answer
# -------------------------------------------------------------------------
@staticmethod
def complete_onvalidate(form):
"""
"""
T = current.T
vars = form.vars
if "series_id" not in vars or vars.series_id == None:
form.errors.series_id = T("Series details missing")
return False
if "answer_list" not in vars or vars.answer_list == None:
form.errors.answer_list = T("The answers are missing")
return False
series_id = vars.series_id
answer_list = vars.answer_list
qstn_list = survey_getAllQuestionsForSeries(series_id)
qstns = []
for qstn in qstn_list:
qstns.append(qstn["code"])
answerList = answer_list.splitlines(True)
for answer in answerList:
qstn_code = answer[1:answer.find('","')]
if qstn_code not in qstns:
msg = "%s: %s" % (T("Unknown question code"), qstn_code)
if answer_list not in form.errors:
form.errors.answer_list = msg
else:
form.errors.answer_list += msg
return True
# -------------------------------------------------------------------------
@staticmethod
def complete_onaccept(form):
"""
All of the answers will be stored in the answer_list in the
format "code","answer"
They will then be inserted into the survey_answer table
each item will be a record on that table.
This will also extract the default location question as
defined by the template and store this in the location field
"""
if form.vars.id:
S3SurveyCompleteModel.completeOnAccept(form.vars.id)
# -------------------------------------------------------------------------
@staticmethod
def completeOnAccept(complete_id):
"""
"""
# Get the basic data that is needed
s3db = current.s3db
rtable = s3db.survey_complete
atable = s3db.survey_answer
record = rtable[complete_id]
series_id = record.series_id
purgePrefix = "survey_series_%s" % series_id
S3Chart.purgeCache(purgePrefix)
if series_id == None:
return
# Save all the answers from answerList in the survey_answer table
answerList = record.answer_list
S3SurveyCompleteModel.importAnswers(complete_id, answerList)
# Extract the default template location question and save the
# answer in the location field
templateRec = survey_getTemplateFromSeries(series_id)
locDetails = templateRec["location_detail"]
if not locDetails:
return
widgetObj = get_default_location(complete_id)
if widgetObj:
current.db(rtable.id == complete_id).update(location = widgetObj.repr())
locations = get_location_details(complete_id)
S3SurveyCompleteModel.importLocations(locations)
# -------------------------------------------------------------------------
@staticmethod
def importAnswers(id, list):
"""
private function used to save the answer_list stored in
survey_complete into answer records held in survey_answer
"""
import csv
import os
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
strio = StringIO()
strio.write(list)
strio.seek(0)
answer = []
append = answer.append
reader = csv.reader(strio)
for row in reader:
if row != None:
row.insert(0, id)
append(row)
from tempfile import TemporaryFile
csvfile = TemporaryFile()
writer = csv.writer(csvfile)
writerow = writer.writerow
writerow(["complete_id", "question_code", "value"])
for row in answer:
writerow(row)
csvfile.seek(0)
xsl = os.path.join("applications",
current.request.application,
"static",
"formats",
"s3csv",
"survey",
"answer.xsl")
resource = current.s3db.resource("survey_answer")
resource.import_xml(csvfile, stylesheet = xsl, format="csv",)
# -------------------------------------------------------------------------
@staticmethod
def importLocations(location_dict):
"""
private function used to save the locations to gis.location
"""
import csv
import os
lastLocWidget = None
codeList = ["STD-L0","STD-L1","STD-L2","STD-L3","STD-L4"]
headingList = ["Country",
"ADM1_NAME",
"ADM2_NAME",
"ADM3_NAME",
"ADM4_NAME"
]
cnt = 0
answer = []
headings = []
aappend = answer.append
happend = headings.append
for loc in codeList:
if loc in location_dict:
aappend(location_dict[loc].repr())
lastLocWidget = location_dict[loc]
happend(headingList[cnt])
cnt += 1
# Check that we have at least one location question answered
if lastLocWidget == None:
return
codeList = ["STD-P-Code","STD-Lat","STD-Lon"]
for loc in codeList:
if loc in location_dict:
aappend(location_dict[loc].repr())
else:
aappend("")
from tempfile import TemporaryFile
csvfile = TemporaryFile()
writer = csv.writer(csvfile)
headings += ["Code2", "Lat", "Lon"]
writer.writerow(headings)
writer.writerow(answer)
csvfile.seek(0)
xsl = os.path.join("applications",
current.request.application,
"static",
"formats",
"s3csv",
"gis",
"location.xsl")
resource = current.s3db.resource("gis_location")
resource.import_xml(csvfile, stylesheet = xsl, format="csv",)
# -------------------------------------------------------------------------
@staticmethod
def survey_complete_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same name, answer_list
"""
if job.tablename == "survey_complete":
table = job.table
data = job.data
answers = "answer_list" in data and data.answer_list
query = (table.answer_list == answers)
try:
return duplicator(job, query)
except:
# if this is part of an import then the select will throw an error
# if the question code doesn't exist.
# This can happen during an import if the wrong file is used.
return
# -------------------------------------------------------------------------
@staticmethod
def answer_onaccept(form):
"""
Some question types may require additional processing
"""
vars = form.vars
if vars.complete_id and vars.question_id:
atable = current.s3db.survey_answer
complete_id = vars.complete_id
question_id = vars.question_id
value = vars.value
widgetObj = survey_getWidgetFromQuestion(question_id)
newValue = widgetObj.onaccept(value)
if newValue != value:
query = (atable.question_id == question_id) & \
(atable.complete_id == complete_id)
current.db(query).update(value = newValue)
# -------------------------------------------------------------------------
@staticmethod
def survey_answer_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same complete_id and question_id
"""
if job.tablename == "survey_answer":
table = job.table
data = job.data
qid = "question_id" in data and data.question_id
rid = "complete_id" in data and data.complete_id
query = (table.question_id == qid) & \
(table.complete_id == rid)
return duplicator(job, query)
# =============================================================================
def survey_answerlist_dataTable_pre():
"""
The answer list has been removed for the moment. Currently it
displays all answers for a summary it would be better to
be able to display just a few select answers
"""
list_fields = ["created_on", "series_id", "location", "modified_by"]
current.s3db.configure("survey_complete", list_fields=list_fields)
# =============================================================================
def survey_answerlist_dataTable_post(r):
"""
Replace Action Buttons
"""
#S3CRUD.action_buttons(r)
current.response.s3.actions = [
dict(label=current.messages["UPDATE"],
_class="action-btn edit",
url=URL(c="survey", f="series",
args=[r.id, "complete", "[id]", "update"])
),
]
# =============================================================================
def survey_answer_list_represent(value):
"""
Display the answer list in a formatted table.
Displaying the full question (rather than the code)
and the answer.
"""
db = current.db
qtable = current.s3db.survey_question
answer_text = value
list = answer_text.splitlines()
result = TABLE()
questions = {}
xml_decode = S3Codec.xml_decode
for line in list:
line = xml_decode(line)
(question, answer) = line.split(",",1)
question = question.strip("\" ")
if question in questions:
question = questions[question]
else:
query = (qtable.code == question)
qstn = db(query).select(qtable.name,
limitby=(0, 1)).first()
if not qstn:
continue
questions[question] = qstn.name
question = qstn.name
answer = answer.strip("\" ")
result.append(TR(TD(B(question)), TD(answer)))
return result
# =============================================================================
def get_location_details(complete_id):
"""
It will return a dict of values for all of the standard location
questions that have been answered
"""
db = current.db
s3db = current.s3db
locations = {}
comtable = s3db.survey_complete
qsntable = s3db.survey_question
answtable = s3db.survey_answer
query = (answtable.question_id == qsntable.id) & \
(answtable.complete_id == comtable.id)
codeList = ["STD-P-Code",
"STD-L0", "STD-L1", "STD-L2", "STD-L3", "STD-L4",
"STD-Lat", "STD-Lon"]
for locCode in codeList:
record = db(query & (qsntable.code == locCode)).select(qsntable.id,
limitby=(0, 1)).first()
if record:
widgetObj = survey_getWidgetFromQuestion(record.id)
widgetObj.loadAnswer(complete_id, record.id)
locations[locCode] = widgetObj
return locations
# =============================================================================
def get_default_location(complete_id):
"""
It will check each standard location question in
the hierarchy until either one is found or none are found
"""
db = current.db
s3db = current.s3db
comtable = s3db.survey_complete
qsntable = s3db.survey_question
answtable = s3db.survey_answer
query = (answtable.question_id == qsntable.id) & \
(answtable.complete_id == comtable.id)
codeList = ["STD-L4", "STD-L3", "STD-L2", "STD-L1", "STD-L0"]
for locCode in codeList:
record = db(query & (qsntable.code == locCode)).select(qsntable.id,
limitby=(0, 1)).first()
if record:
widgetObj = survey_getWidgetFromQuestion(record.id)
break
if record:
widgetObj.loadAnswer(complete_id, record.id)
return widgetObj
else:
return None
# =============================================================================
def survey_getAllAnswersForQuestionInSeries(question_id, series_id):
"""
function to return all the answers for a given question
from with a specified series
"""
s3db = current.s3db
ctable = s3db.survey_complete
atable = s3db.survey_answer
query = (atable.question_id == question_id) & \
(atable.complete_id == ctable.id) & \
(ctable.series_id == series_id)
rows = current.db(query).select(atable.id,
atable.value,
atable.complete_id)
answers = []
for row in rows:
answer = {}
answer["answer_id"] = row.id
answer["value"] = row.value
answer["complete_id"] = row.complete_id
answers.append(answer)
return answers
# =============================================================================
def buildTableFromCompletedList(dataSource):
"""
"""
headers = dataSource[0]
items = dataSource[2:]
table = TABLE(_id="completed_list",
_class="dataTable display")
hr = TR()
for title in headers:
hr.append(TH(title))
header = THEAD(hr)
body = TBODY()
for row in items:
tr = TR()
for answer in row:
tr.append(TD(answer))
body.append(tr)
table.append(header)
table.append(body)
# Turn off server side pagination
current.response.s3.no_sspag = True
attr = S3DataTable.getConfigData()
form = S3DataTable.htmlConfig(table,
"completed_list",
[[0, 'asc']], # order by
"", # the filter string
None, # the rfields
**attr
)
return form
# =============================================================================
def buildCompletedList(series_id, question_id_list):
"""
build a list of completed items for the series including
just the questions in the list passed in
The list will come in three parts.
1) The first row is the header (list of field labels)
2) The seconds row is the type of each column
3) The remaining rows are the data
@param series_id: The id of the series
@param question_id_list: The list of questions to display
"""
db = current.db
qtable = current.s3db.survey_question
headers = []
happend = headers.append
types = []
items = []
qstn_posn = 0
rowLen = len(question_id_list)
complete_lookup = {}
for question_id in question_id_list:
answers = survey_getAllAnswersForQuestionInSeries(question_id,
series_id)
widgetObj = survey_getWidgetFromQuestion(question_id)
question = db(qtable.id == question_id).select(qtable.name,
limitby=(0, 1)).first()
happend(question.name)
types.append(widgetObj.db_type())
for answer in answers:
complete_id = answer["complete_id"]
if complete_id in complete_lookup:
row = complete_lookup[complete_id]
else:
row = len(complete_lookup)
complete_lookup[complete_id]=row
items.append([''] * rowLen)
items[row][qstn_posn] = widgetObj.repr(answer["value"])
qstn_posn += 1
return [headers] + [types] + items
# =============================================================================
def getLocationList(series_id):
"""
Get a list of the LatLons for each Response in a Series
"""
response_locations = []
rappend = response_locations.append
codeList = ["STD-L4", "STD-L3", "STD-L2", "STD-L1", "STD-L0"]
table = current.s3db.survey_complete
rows = current.db(table.series_id == series_id).select(table.id,
table.answer_list)
for row in rows:
lat = None
lon = None
name = None
answer_list = row.answer_list.splitlines()
answer_dict = {}
for line in answer_list:
(question, answer) = line.split(",", 1)
question = question.strip('"')
if question in codeList:
# Store to get the name
answer_dict[question] = answer.strip('"')
elif question == "STD-Lat":
try:
lat = float(answer.strip('"'))
except:
pass
else:
if lat < -90.0 or lat > 90.0:
lat = None
elif question == "STD-Lon":
try:
lon = float(answer.strip('"'))
except:
pass
else:
if lon < -180.0 or lon > 180.0:
lon = None
else:
# Not relevant here
continue
for locCode in codeList:
# Retrieve the name of the lowest Lx
if locCode in answer_dict:
name = answer_dict[locCode]
break
if lat and lon:
# We have sufficient data to display on the map
location = Row()
location.lat = lat
location.lon = lon
location.name = name
location.complete_id = row.id
rappend(location)
else:
# The lat & lon were not added to the assessment so try and get one
locWidget = get_default_location(row.id)
if locWidget:
complete_id = locWidget.question["complete_id"]
if "answer" not in locWidget.question:
continue
answer = locWidget.question["answer"]
if locWidget != None:
record = locWidget.getLocationRecord(complete_id, answer)
if len(record.records) == 1:
location = record.records[0].gis_location
location.complete_id = complete_id
rappend(location)
return response_locations
# =============================================================================
class S3SurveyTranslateModel(S3Model):
"""
Translations Model
"""
from gluon.languages import read_dict, write_dict
names = ["survey_translate"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# The survey_translate table holds the details of the language
# for which the template has been translated into.
LANG_HELP = T("This is the full name of the language and will be displayed to the user when selecting the template language.")
CODE_HELP = T("This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.")
tablename = "survey_translate"
table = self.define_table(tablename,
self.survey_template_id(),
Field("language",
readable=True,
writable=True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language"),
LANG_HELP))
),
Field("code",
readable=True,
writable=True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language Code"),
CODE_HELP))
),
Field("file", "upload",
autodelete=True),
Field("filename",
readable=False,
writable=False),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add new translation language"),
)
self.configure(tablename,
onaccept = self.translate_onaccept,
)
# ---------------------------------------------------------------------
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def translate_onaccept(form):
"""
If the translation spreadsheet has been uploaded then
it needs to be processed.
The translation strings need to be extracted from
the spreadsheet and inserted into the language file.
"""
if "file" in form.vars:
try:
import xlrd
except ImportError:
print >> sys.stderr, "ERROR: xlrd & xlwt modules are needed for importing spreadsheets"
return None
from gluon.languages import read_dict, write_dict
T = current.T
request = current.request
response = current.response
msgNone = T("No translations exist in spreadsheet")
upload_file = request.post_vars.file
upload_file.file.seek(0)
openFile = upload_file.file.read()
lang = form.record.language
code = form.record.code
try:
workbook = xlrd.open_workbook(file_contents=openFile)
except:
msg = T("Unable to open spreadsheet")
response.error = msg
response.flash = None
return
try:
sheetL = workbook.sheet_by_name(lang)
except:
msg = T("Unable to find sheet %(sheet_name)s in uploaded spreadsheet") % \
dict(sheet_name=lang)
response.error = msg
response.flash = None
return
if sheetL.ncols == 1:
response.warning = msgNone
response.flash = None
return
count = 0
lang_fileName = "applications/%s/uploads/survey/translations/%s.py" % \
(request.application, code)
try:
strings = read_dict(lang_fileName)
except:
strings = dict()
for row in xrange(1, sheetL.nrows):
original = sheetL.cell_value(row, 0)
translation = sheetL.cell_value(row, 1)
if (original not in strings) or translation != "":
strings[original] = translation
count += 1
write_dict(lang_fileName, strings)
if count == 0:
response.warning = msgNone
response.flash = None
else:
response.flash = T("%(count_of)d translations have been imported to the %(language)s language file") % \
dict(count_of=count, language=lang)
# =============================================================================
def survey_getAllTranslationsForTemplate(template_id):
"""
Function to return all the translations for the given template
"""
table = current.s3db.survey_translate
row = current.db(table.template_id == template_id).select()
return row
# =============================================================================
def survey_getAllTranslationsForSeries(series_id):
"""
Function to return all the translations for the given series
"""
table = current.s3db.survey_series
row = current.db(table.id == series_id).select(table.template_id,
limitby=(0, 1)).first()
template_id = row.template_id
return survey_getAllTranslationsForTemplate(template_id)
# =============================================================================
# Generic function called by the duplicator methods to determine if the
# record already exists on the database.
def duplicator(job, query):
"""
This callback will be called when importing records it will look
to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
"""
table = job.table
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# END =========================================================================
| mit | 4,581,564,324,637,390,300 | 38.911366 | 195 | 0.471481 | false |
AIFDR/inasafe-django | django_project/realtime/migrations/0050_reporttemplate.py | 2 | 1641 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('realtime', '0049_auto_20180320_0406'),
]
operations = [
migrations.CreateModel(
name='ReportTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timestamp', models.DateTimeField(help_text='The time the template uploaded.', verbose_name='Timestamp')),
('version', models.CharField(default=None, max_length=10, blank=True, help_text='Version number of the template.', null=True, verbose_name='Template version')),
('notes', models.CharField(default=None, max_length=255, blank=True, help_text='Notes of the report template.', null=True, verbose_name='Template Notes')),
('language', models.CharField(default=b'id', help_text='The language ID of the report', max_length=4, verbose_name='Language ID')),
('hazard', models.CharField(default=None, help_text='The hazard type of the template.', max_length=25, verbose_name='Hazard Type')),
('template_file', models.FileField(help_text='Template file formatted as qgis template file (*.qpt).', upload_to=b'', verbose_name='Template File')),
('owner', models.IntegerField(default=0, help_text='The owner/uploader of the template.', verbose_name='Owner')),
],
options={
'verbose_name_plural': 'Report Templates',
},
),
]
| bsd-2-clause | -3,155,160,990,121,272,300 | 53.7 | 176 | 0.622182 | false |
supernifty/mgsa | mgsa/analyze_bam.py | 1 | 2071 |
import argparse
import collections
import numpy
import sys
import bio
import config
parser = argparse.ArgumentParser(description='Analyze BAM')
parser.add_argument('bam', metavar='bam', help='bam file to analyze')
parser.add_argument('--buckets', metavar='buckets', type=int, default=10, help='number of buckets')
parser.add_argument('--max_sample', metavar='max_sample', type=int, default=-1, help='max number of samples in each group')
parser.add_argument('--skip', metavar='skip', type=int, default=0, help='skip the first reads')
args = parser.parse_args()
bam = bio.BamReaderExternal( config.BAM_TO_SAM, args.bam )
stats = bio.SamStats( bam, max_sample=args.max_sample, skip=skip )
# gc
buckets = numpy.linspace(0, 1, args.buckets + 1)
mapped_buckets = bio.bucket( filter( None, stats.mapped['gc'] ), buckets )
unmapped_buckets = bio.bucket( filter( None, stats.unmapped['gc'] ), buckets )
total_mapped = sum( mapped_buckets )
total_unmapped = sum( unmapped_buckets )
print '========== GC content =========='
print 'GC %%: %s' % '\t'.join( [ '%.2f' % bucket for bucket in buckets ] )
print 'mapped: %s' % '\t'.join( [ '%.1f' % ( 100. * x / total_mapped ) for x in mapped_buckets ] )
print 'unmapped: %s' % '\t'.join( [ '%.1f' % ( 100. * x / total_unmapped ) for x in unmapped_buckets ] )
# entropy
mapped_buckets = bio.bucket( stats.mapped['entropy'], buckets )
unmapped_buckets = bio.bucket( stats.unmapped['entropy'], buckets )
total_mapped = sum( mapped_buckets )
total_unmapped = sum( unmapped_buckets )
print '\n========== Entropy =========='
print 'Mapped: min: %.2f max: %.2f' % ( min( stats.mapped['entropy'] ), max( stats.mapped['entropy'] ) )
print 'Unmapped: min: %.2f max: %.2f' % ( min( stats.unmapped['entropy'] ), max( stats.unmapped['entropy'] ) )
print 'Entropy: %s' % '\t'.join( [ '%.2f' % bucket for bucket in buckets ] )
print 'mapped: %s' % '\t'.join( [ '%.1f' % ( 100. * x / total_mapped ) for x in mapped_buckets ] )
print 'unmapped: %s' % '\t'.join( [ '%.1f' % ( 100. * x / total_unmapped ) for x in unmapped_buckets ] )
| mit | 6,292,994,006,158,052,000 | 45.022222 | 123 | 0.649445 | false |
minlexx/pyevemon | esi_client/models/get_characters_character_id_mail_labels_forbidden.py | 1 | 3097 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCharactersCharacterIdMailLabelsForbidden(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
GetCharactersCharacterIdMailLabelsForbidden - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this GetCharactersCharacterIdMailLabelsForbidden.
Forbidden message
:return: The error of this GetCharactersCharacterIdMailLabelsForbidden.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this GetCharactersCharacterIdMailLabelsForbidden.
Forbidden message
:param error: The error of this GetCharactersCharacterIdMailLabelsForbidden.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCharactersCharacterIdMailLabelsForbidden):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| gpl-3.0 | -9,000,179,383,787,860,000 | 25.470085 | 84 | 0.539554 | false |
jortel/gofer | test/unit/messaging/adapter/amqp/test_model.py | 1 | 5194 | # Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from unittest import TestCase
from mock import Mock, patch
from gofer.devel import ipatch
with ipatch('amqp'):
from gofer.messaging.adapter.amqp.model import Exchange, BaseExchange
from gofer.messaging.adapter.amqp.model import Queue, BaseQueue
class TestExchange(TestCase):
def test_init(self):
name = 'test-exchange'
policy = 'direct'
# test
exchange = Exchange(name, policy=policy)
# validation
self.assertTrue(isinstance(exchange, BaseExchange))
self.assertEqual(exchange.name, name)
self.assertEqual(exchange.policy, policy)
self.assertEqual(exchange.auto_delete, False)
@patch('gofer.messaging.adapter.amqp.reliability.Connection.channel')
@patch('gofer.messaging.adapter.amqp.connection.RealConnection', Mock())
def test_declare(self, channel):
url = 'test-url'
# test
exchange = Exchange('test', policy='direct')
exchange.declare(url)
# validation
channel.return_value.exchange_declare.assert_called_once_with(
exchange.name,
exchange.policy,
durable=exchange.durable,
auto_delete=exchange.auto_delete)
@patch('gofer.messaging.adapter.amqp.reliability.Connection.channel')
@patch('gofer.messaging.adapter.amqp.connection.RealConnection', Mock())
def test_delete(self, channel):
url = 'test-url'
# test
exchange = Exchange('test')
exchange.delete(url)
# validation
channel.return_value.exchange_delete.assert_called_once_with(exchange.name, nowait=True)
@patch('gofer.messaging.adapter.amqp.reliability.Connection.channel')
@patch('gofer.messaging.adapter.amqp.connection.RealConnection', Mock())
def test_bind(self, channel):
url = 'test-url'
queue = BaseQueue('test-queue')
# test
exchange = Exchange('test-exchange')
exchange.bind(queue, url)
# validation
channel.return_value.queue_bind.assert_called_once_with(
queue.name,
exchange=exchange.name,
routing_key=queue.name)
@patch('gofer.messaging.adapter.amqp.reliability.Connection.channel')
@patch('gofer.messaging.adapter.amqp.connection.RealConnection', Mock())
def test_unbind(self, channel):
url = 'test-url'
queue = BaseQueue('test-queue')
# test
exchange = Exchange('test-exchange')
exchange.unbind(queue, url)
# validation
channel.return_value.queue_unbind.assert_called_once_with(
queue.name,
exchange=exchange.name,
routing_key=queue.name)
class TestQueue(TestCase):
def test_init(self):
name = 'test-queue'
queue = Queue(name)
self.assertEqual(queue.name, name)
self.assertTrue(isinstance(queue, BaseQueue))
self.assertEqual(queue.exclusive, False)
self.assertEqual(queue.auto_delete, False)
self.assertEqual(queue.expiration, 0)
@patch('gofer.messaging.adapter.amqp.reliability.Connection.channel')
@patch('gofer.messaging.adapter.amqp.connection.RealConnection', Mock())
def test_declare(self, channel):
url = 'test-url'
# test
queue = Queue('test')
queue.declare(url)
# validation
channel.return_value.queue_declare.assert_called_once_with(
queue.name,
durable=queue.durable,
exclusive=queue.exclusive,
auto_delete=queue.auto_delete,
arguments=None)
@patch('gofer.messaging.adapter.amqp.reliability.Connection.channel')
@patch('gofer.messaging.adapter.amqp.connection.RealConnection', Mock())
def test_declare_auto_delete(self, channel):
url = 'test-url'
# test
queue = Queue('test')
queue.auto_delete = True
queue.expiration = 10
queue.declare(url)
# validation
channel.return_value.queue_declare.assert_called_once_with(
queue.name,
durable=queue.durable,
exclusive=queue.exclusive,
auto_delete=queue.auto_delete,
arguments={'x-expires': queue.expiration * 1000})
@patch('gofer.messaging.adapter.amqp.reliability.Connection.channel')
@patch('gofer.messaging.adapter.amqp.connection.RealConnection', Mock())
def test_delete(self, channel):
url = 'test-url'
# test
queue = Queue('test')
queue.delete(url)
# validation
channel.return_value.queue_delete.assert_called_once_with(queue.name, nowait=True)
| lgpl-2.1 | -3,331,366,972,671,112,700 | 32.294872 | 96 | 0.652869 | false |
dafrito/trac-mirror | trac/ticket/default_workflow.py | 1 | 21721 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Alec Thomas
# Copyright (C) 2007 Eli Carter
# Copyright (C) 2007 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Eli Carter
import pkg_resources
from ConfigParser import RawConfigParser
from StringIO import StringIO
from genshi.builder import tag
from trac.config import Configuration, ConfigSection
from trac.core import *
from trac.env import IEnvironmentSetupParticipant
from trac.perm import PermissionSystem
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.ticket.model import Resolution
from trac.util.text import obfuscate_email_address
from trac.util.translation import _, tag_, cleandoc_
from trac.web.chrome import Chrome, add_script, add_script_data
from trac.wiki.macros import WikiMacroBase
# -- Utilities for the ConfigurableTicketWorkflow
def parse_workflow_config(rawactions):
"""Given a list of options from [ticket-workflow]"""
actions = {}
for option, value in rawactions:
parts = option.split('.')
action = parts[0]
if action not in actions:
actions[action] = {'oldstates': '', 'newstate': ''}
if len(parts) == 1:
# Base name, of the syntax: old,states,here -> newstate
try:
oldstates, newstate = [x.strip() for x in value.split('->')]
except ValueError:
continue # Syntax error, a warning will be logged later
actions[action]['newstate'] = newstate
actions[action]['oldstates'] = oldstates
else:
action, attribute = option.split('.')
actions[action][attribute] = value
# Fill in the defaults for every action, and normalize them to the desired
# types
def as_list(key):
value = attributes.get(key, '')
return [item for item in (x.strip() for x in value.split(',')) if item]
for action, attributes in actions.items():
# Default the 'name' attribute to the name used in the ini file
if 'name' not in attributes:
attributes['name'] = action
# If not specified, an action is not the default.
attributes['default'] = int(attributes.get('default', 0))
# If operations are not specified, that means no operations
attributes['operations'] = as_list('operations')
# If no permissions are specified, then no permissions are needed
attributes['permissions'] = as_list('permissions')
# Normalize the oldstates
attributes['oldstates'] = as_list('oldstates')
return actions
def get_workflow_config(config):
"""Usually passed self.config, this will return the parsed ticket-workflow
section.
"""
raw_actions = list(config.options('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
return actions
def load_workflow_config_snippet(config, filename):
"""Loads the ticket-workflow section from the given file (expected to be in
the 'workflows' tree) into the provided config.
"""
filename = pkg_resources.resource_filename('trac.ticket',
'workflows/%s' % filename)
new_config = Configuration(filename)
for name, value in new_config.options('ticket-workflow'):
config.set('ticket-workflow', name, value)
class ConfigurableTicketWorkflow(Component):
"""Ticket action controller which provides actions according to a
workflow defined in trac.ini.
The workflow is idefined in the `[ticket-workflow]` section of the
[wiki:TracIni#ticket-workflow-section trac.ini] configuration file.
"""
ticket_workflow_section = ConfigSection('ticket-workflow',
"""The workflow for tickets is controlled by plugins. By default,
there's only a `ConfigurableTicketWorkflow` component in charge.
That component allows the workflow to be configured via this section
in the `trac.ini` file. See TracWorkflow for more details.
(''since 0.11'')""")
def __init__(self, *args, **kwargs):
self.actions = get_workflow_config(self.config)
if not '_reset' in self.actions:
# Special action that gets enabled if the current status no longer
# exists, as no other action can then change its state. (#5307)
self.actions['_reset'] = {
'default': 0,
'name': 'reset',
'newstate': 'new',
'oldstates': [], # Will not be invoked unless needed
'operations': ['reset_workflow'],
'permissions': []}
self.log.debug('Workflow actions at initialization: %s\n' %
str(self.actions))
for name, info in self.actions.iteritems():
if not info['newstate']:
self.log.warning("Ticket workflow action '%s' doesn't define "
"any transitions", name)
implements(ITicketActionController, IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""When an environment is created, we provide the basic-workflow,
unless a ticket-workflow section already exists.
"""
if not 'ticket-workflow' in self.config.sections():
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.save()
self.actions = get_workflow_config(self.config)
def environment_needs_upgrade(self, db):
"""The environment needs an upgrade if there is no [ticket-workflow]
section in the config.
"""
return not list(self.config.options('ticket-workflow'))
def upgrade_environment(self, db):
"""Insert a [ticket-workflow] section using the original-workflow"""
load_workflow_config_snippet(self.config, 'original-workflow.ini')
self.config.save()
self.actions = get_workflow_config(self.config)
info_message = """
==== Upgrade Notice ====
The ticket Workflow is now configurable.
Your environment has been upgraded, but configured to use the original
workflow. It is recommended that you look at changing this configuration to use
basic-workflow.
Read TracWorkflow for more information (don't forget to 'wiki upgrade' as well)
"""
self.log.info(info_message.replace('\n', ' ').replace('==', ''))
print info_message
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
"""Returns a list of (weight, action) tuples that are valid for this
request and this ticket."""
# Get the list of actions that can be performed
# Determine the current status of this ticket. If this ticket is in
# the process of being modified, we need to base our information on the
# pre-modified state so that we don't try to do two (or more!) steps at
# once and get really confused.
status = ticket._old.get('status', ticket['status']) or 'new'
ticket_perm = req.perm(ticket.resource)
allowed_actions = []
for action_name, action_info in self.actions.items():
oldstates = action_info['oldstates']
if oldstates == ['*'] or status in oldstates:
# This action is valid in this state. Check permissions.
required_perms = action_info['permissions']
if self._is_action_allowed(ticket_perm, required_perms):
allowed_actions.append((action_info['default'],
action_name))
if not (status in ['new', 'closed'] or \
status in TicketSystem(self.env).get_all_status()) \
and 'TICKET_ADMIN' in ticket_perm:
# State no longer exists - add a 'reset' action if admin.
allowed_actions.append((0, '_reset'))
return allowed_actions
def _is_action_allowed(self, ticket_perm, required_perms):
if not required_perms:
return True
for permission in required_perms:
if permission in ticket_perm:
return True
return False
def get_all_status(self):
"""Return a list of all states described by the configuration.
"""
all_status = set()
for action_name, action_info in self.actions.items():
all_status.update(action_info['oldstates'])
all_status.add(action_info['newstate'])
all_status.discard('*')
all_status.discard('')
return all_status
def render_ticket_action_control(self, req, ticket, action):
self.log.debug('render_ticket_action_control: action "%s"' % action)
this_action = self.actions[action]
status = this_action['newstate']
operations = this_action['operations']
current_owner_or_empty = ticket._old.get('owner', ticket['owner'])
current_owner = current_owner_or_empty or '(none)'
if not (Chrome(self.env).show_email_addresses
or 'EMAIL_VIEW' in req.perm(ticket.resource)):
format_user = obfuscate_email_address
else:
format_user = lambda address: address
current_owner = format_user(current_owner)
control = [] # default to nothing
hints = []
if 'reset_workflow' in operations:
control.append(tag("from invalid state "))
hints.append(_("Current state no longer exists"))
if 'del_owner' in operations:
hints.append(_("The ticket will be disowned"))
if 'set_owner' in operations:
id = 'action_%s_reassign_owner' % action
selected_owner = req.args.get(id, req.authname)
if this_action.has_key('set_owner'):
owners = [x.strip() for x in
this_action['set_owner'].split(',')]
elif self.config.getbool('ticket', 'restrict_owner'):
perm = PermissionSystem(self.env)
owners = perm.get_users_with_permission('TICKET_MODIFY')
owners.sort()
else:
owners = None
if owners == None:
owner = req.args.get(id, req.authname)
control.append(tag_('to %(owner)s',
owner=tag.input(type='text', id=id,
name=id, value=owner)))
hints.append(_("The owner will be changed from "
"%(current_owner)s to the specified user",
current_owner=current_owner))
elif len(owners) == 1:
owner = tag.input(type='hidden', id=id, name=id,
value=owners[0])
formatted_owner = format_user(owners[0])
control.append(tag_('to %(owner)s ',
owner=tag(formatted_owner, owner)))
if ticket['owner'] != owners[0]:
hints.append(_("The owner will be changed from "
"%(current_owner)s to %(selected_owner)s",
current_owner=current_owner,
selected_owner=formatted_owner))
else:
control.append(tag_('to %(owner)s', owner=tag.select(
[tag.option(x, value=x,
selected=(x == selected_owner or None))
for x in owners],
id=id, name=id)))
hints.append(_("The owner will be changed from "
"%(current_owner)s to the selected user",
current_owner=current_owner))
elif 'set_owner_to_self' in operations and \
ticket._old.get('owner', ticket['owner']) != req.authname:
hints.append(_("The owner will be changed from %(current_owner)s "
"to %(authname)s", current_owner=current_owner,
authname=req.authname))
if 'set_resolution' in operations:
if this_action.has_key('set_resolution'):
resolutions = [x.strip() for x in
this_action['set_resolution'].split(',')]
else:
resolutions = [val.name for val in Resolution.select(self.env)]
if not resolutions:
raise TracError(_("Your workflow attempts to set a resolution "
"but none is defined (configuration issue, "
"please contact your Trac admin)."))
id = 'action_%s_resolve_resolution' % action
if len(resolutions) == 1:
resolution = tag.input(type='hidden', id=id, name=id,
value=resolutions[0])
control.append(tag_('as %(resolution)s',
resolution=tag(resolutions[0],
resolution)))
hints.append(_("The resolution will be set to %(name)s",
name=resolutions[0]))
else:
selected_option = req.args.get(id,
TicketSystem(self.env).default_resolution)
control.append(tag_('as %(resolution)s',
resolution=tag.select(
[tag.option(x, value=x,
selected=(x == selected_option or None))
for x in resolutions],
id=id, name=id)))
hints.append(_("The resolution will be set"))
if 'del_resolution' in operations:
hints.append(_("The resolution will be deleted"))
if 'leave_status' in operations:
control.append(_('as %(status)s ',
status= ticket._old.get('status',
ticket['status'])))
if len(operations) == 1:
hints.append(_("The owner will remain %(current_owner)s",
current_owner=current_owner)
if current_owner_or_empty else
_("The ticket will remain with no owner"))
else:
if status != '*':
hints.append(_("Next status will be '%(name)s'", name=status))
return (this_action['name'], tag(*control), '. '.join(hints) + '.'
if hints else '')
def get_ticket_changes(self, req, ticket, action):
this_action = self.actions[action]
# Enforce permissions
if not self._has_perms_for_action(req, this_action, ticket.resource):
# The user does not have any of the listed permissions, so we won't
# do anything.
return {}
updated = {}
# Status changes
status = this_action['newstate']
if status != '*':
updated['status'] = status
for operation in this_action['operations']:
if operation == 'reset_workflow':
updated['status'] = 'new'
elif operation == 'del_owner':
updated['owner'] = ''
elif operation == 'set_owner':
newowner = req.args.get('action_%s_reassign_owner' % action,
this_action.get('set_owner', '').strip())
# If there was already an owner, we get a list, [new, old],
# but if there wasn't we just get new.
if type(newowner) == list:
newowner = newowner[0]
updated['owner'] = newowner
elif operation == 'set_owner_to_self':
updated['owner'] = req.authname
elif operation == 'del_resolution':
updated['resolution'] = ''
elif operation == 'set_resolution':
newresolution = req.args.get('action_%s_resolve_resolution' % \
action,
this_action.get('set_resolution', '').strip())
updated['resolution'] = newresolution
# leave_status is just a no-op here, so we don't look for it.
return updated
def apply_action_side_effects(self, req, ticket, action):
pass
def _has_perms_for_action(self, req, action, resource):
required_perms = action['permissions']
if required_perms:
for permission in required_perms:
if permission in req.perm(resource):
break
else:
# The user does not have any of the listed permissions
return False
return True
# Public methods (for other ITicketActionControllers that want to use
# our config file and provide an operation for an action)
def get_actions_by_operation(self, operation):
"""Return a list of all actions with a given operation
(for use in the controller's get_all_status())
"""
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations']]
return actions
def get_actions_by_operation_for_req(self, req, ticket, operation):
"""Return list of all actions with a given operation that are valid
in the given state for the controller's get_ticket_actions().
If state='*' (the default), all actions with the given operation are
returned.
"""
# Be sure to look at the original status.
status = ticket._old.get('status', ticket['status'])
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations'] and
('*' in info['oldstates'] or
status in info['oldstates']) and
self._has_perms_for_action(req, info, ticket.resource)]
return actions
class WorkflowMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Render a workflow graph.
This macro accepts a TracWorkflow configuration and renders the states
and transitions as a directed graph. If no parameters are given, the
current ticket workflow is rendered. In WikiProcessors mode the `width`
and `height` arguments can be specified.
(Defaults: `width = 800` and `heigth = 600`)
Examples:
{{{
[[Workflow()]]
[[Workflow(go = here -> there; return = there -> here)]]
{{{
#!Workflow width=700 height=700
leave = * -> *
leave.operations = leave_status
leave.default = 1
accept = new,assigned,accepted,reopened -> accepted
accept.permissions = TICKET_MODIFY
accept.operations = set_owner_to_self
resolve = new,assigned,accepted,reopened -> closed
resolve.permissions = TICKET_MODIFY
resolve.operations = set_resolution
reassign = new,assigned,accepted,reopened -> assigned
reassign.permissions = TICKET_MODIFY
reassign.operations = set_owner
reopen = closed -> reopened
reopen.permissions = TICKET_CREATE
reopen.operations = del_resolution
}}}
}}}
""")
def expand_macro(self, formatter, name, text, args):
if not text:
raw_actions = self.config.options('ticket-workflow')
else:
if args is None:
text = '\n'.join([line.lstrip() for line in text.split(';')])
if not '[ticket-workflow]' in text:
text = '[ticket-workflow]\n' + text
parser = RawConfigParser()
parser.readfp(StringIO(text))
raw_actions = list(parser.items('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
states = list(set(
[state for action in actions.itervalues()
for state in action['oldstates']] +
[action['newstate'] for action in actions.itervalues()]))
action_names = actions.keys()
edges = []
for name, action in actions.items():
new_index = states.index(action['newstate'])
name_index = action_names.index(name)
for old_state in action['oldstates']:
old_index = states.index(old_state)
edges.append((old_index, new_index, name_index))
args = args or {}
graph = {'nodes': states, 'actions': action_names, 'edges': edges,
'width': args.get('width', 800),
'height': args.get('height', 600)}
graph_id = '%012x' % id(graph)
req = formatter.req
add_script(req, 'common/js/excanvas.js', ie_if='IE')
add_script(req, 'common/js/workflow_graph.js')
add_script_data(req, {'graph_%s' % graph_id: graph})
return tag.div(_("Enable JavaScript to display the workflow graph."),
class_='trac-workflow-graph system-message',
id='trac-workflow-graph-%s' % graph_id)
| bsd-3-clause | -5,058,617,368,730,896,000 | 42.268924 | 79 | 0.569173 | false |
cs207-project/TimeSeries | procs/_corr.py | 1 | 4794 | import numpy.fft as nfft
import numpy as np
import timeseries as ts
from scipy.stats import norm
# import pyfftw
import sys
#sys.path.append("/Users/yuhantang/CS207/TimeSeries/procs")
from .interface import *
def createfromlist(l):
d = new_darray(len(l))
for i in range(0,len(l)):
darray_set(d,i,l[i])
return d
def tsmaker(m, s, j):
meta={}
meta['order'] = int(np.random.choice([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j*np.random.randn(100)
return meta, ts.TimeSeries(t, v)
def random_ts(a):
t = np.arange(0.0, 1.0, 0.01)
v = a*np.random.random(100)
return ts.TimeSeries(t, v)
def stand(x, m, s):
return (x-m)/s
def ccor(ts1, ts2):
"given two standardized time series, compute their cross-correlation using FFT"
# Get the next 2 th power 110 -> 128
next_2 = int(2**np.ceil(np.log(len(ts1.values()))))
#
ts1_value = ts1.values()
ts2_value = ts2.values()
ts1_container,ts2_container = [],[]
ts1_zero_container = [0]*len(ts1.values())
ts2_zero_container = [0]*len(ts2.values())
ts1_c_array,ts2_c_array = [None]*(len(ts1.values())*2),[None]*(len(ts2.values())*2)
ts1_c_array[::2] = ts1_value
ts1_c_array[1::2] = ts1_zero_container
ts2_c_array[::2] = ts2_value
ts2_c_array[1::2] = ts2_zero_container
for i in range(len(ts1_c_array)+1,next_2*2):
ts1_c_array.append(np.double(0))
for i in range(len(ts2_c_array)+1,next_2*2):
ts2_c_array.append(np.double(0))
ts1_c_array.insert(0,0)
ts2_c_array.insert(0,0)
ts1_c_array = createfromlist(np.double(ts1_c_array))
ts2_c_array = createfromlist(np.double(ts2_c_array))
four1(ts1_c_array,next_2,1)
four1(ts2_c_array,next_2,1)
for i in range(len(ts2.values())*2+1):
ts1_container.append(darray_get(ts1_c_array,i))
for j in range(len(ts1.values())*2+1):
ts2_container.append(darray_get(ts2_c_array,j))
ts1_fft = np.asarray(ts1_container[1::2]) + 1j * np.asarray(ts1_container[2::2])
ts2_fft = np.asarray(ts2_container[1::2]) + 1j * np.asarray(ts2_container[2::2])
ts1_fft = ts1_fft[:len(ts1)+1]
ts2_fft = ts2_fft[:len(ts2)+1]
# ifft part
ts1_ts2_conj = ts1_fft * np.conj(ts2_fft)
ts1_ts2_ifft_container = [0]*len(ts1_ts2_conj)*2
ts1_ts2_ifft_container[::2] = ts1_ts2_conj.real
ts1_ts2_ifft_container[1::2] = ts1_ts2_conj.imag
for i in range(len(ts1_ts2_conj)+1, next_2 *2):
ts1_ts2_ifft_container.append(0)
ts1_ts2_ifft_container.insert(0,0)
ts1_ts2_ifft_container = createfromlist(ts1_ts2_ifft_container)
four1(ts1_ts2_ifft_container, next_2, -1)
ts1_ts2_ifft_container_python = []
for i in range(len(ts1_ts2_conj)*2+1):
ts1_ts2_ifft_container_python.append(darray_get(ts1_ts2_ifft_container,i))
ccor_value = np.asarray(ts1_ts2_ifft_container_python[1::2])
return 1/len(ts1) * ccor_value
def max_corr_at_phase(ts1, ts2):
ccorts = ccor(ts1, ts2)
idx = np.argmax(ccorts)
maxcorr = ccorts[idx]
return idx, maxcorr
#The equation for the kernelized cross correlation is given at
#http://www.cs.tufts.edu/~roni/PUB/ecml09-tskernels.pdf
#normalize the kernel there by np.sqrt(K(x,x)K(y,y)) so that the correlation
#of a time series with itself is 1.
def kernel_corr(ts1, ts2, mult=1):
"compute a kernelized correlation so that we can get a real distance"
#your code here.
cross_correlation = ccor(ts1, ts2) * mult
corr_ts1, corr_ts2 = ccor(ts1, ts1) * mult, ccor(ts2, ts2) * mult
return np.sum(np.exp(cross_correlation))/np.sqrt(np.sum(np.exp(corr_ts1))*np.sum(np.exp(corr_ts2)))
#this is for a quick and dirty test of these functions
#you might need to add procs to pythonpath for this to work
if __name__ == "__main__":
print("HI")
_, t1 = tsmaker(0.5, 0.1, 0.01)
_, t2 = tsmaker(0.5, 0.1, 0.01)
print(t1.mean(), t1.std(), t2.mean(), t2.std())
import matplotlib.pyplot as plt
plt.plot(t1)
plt.plot(t2)
plt.show()
standts1 = stand(t1, t1.mean(), t1.std())
standts2 = stand(t2, t2.mean(), t2.std())
#print(type(standts1),'this is the type=================*********')
#assert 1 == 2
idx, mcorr = max_corr_at_phase(standts1, standts2)
print(idx, mcorr)
sumcorr = kernel_corr(standts1, standts2, mult=10)
print(sumcorr)
t3 = random_ts(2)
t4 = random_ts(3)
plt.plot(t3)
plt.plot(t4)
plt.show()
standts3 = stand(t3, t3.mean(), t3.std())
standts4 = stand(t4, t4.mean(), t4.std())
idx, mcorr = max_corr_at_phase(standts3, standts4)
print(idx, mcorr)
sumcorr = kernel_corr(standts3, standts4, mult=10)
print(sumcorr)
| mit | 2,104,750,453,322,507,300 | 29.929032 | 103 | 0.623905 | false |
benjaminoh1/tensorflowcookbook | Chapter 07/bag_of_words.py | 1 | 6082 | # Working with Bag of Words
#---------------------------------------
#
# In this example, we will download and preprocess the ham/spam
# text data. We will then use a one-hot-encoding to make a
# bag of words set of features to use in logistic regression.
#
# We will use these one-hot-vectors for logistic regression to
# predict if a text is spam or ham.
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import numpy as np
import csv
import string
import requests
import io
from zipfile import ZipFile
from tensorflow.contrib import learn
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
# Check if data was downloaded, otherwise download it and save for future use
save_file_name = os.path.join('temp','temp_spam_data.csv')
if os.path.isfile(save_file_name):
text_data = []
with open(save_file_name, 'r') as temp_output_file:
reader = csv.reader(temp_output_file)
for row in reader:
text_data.append(row)
else:
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
# And write to csv
with open(save_file_name, 'w') as temp_output_file:
writer = csv.writer(temp_output_file)
writer.writerows(text_data)
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
# Relabel 'spam' as 1, 'ham' as 0
target = [1 if x=='spam' else 0 for x in target]
# Normalize text
# Lower case
texts = [x.lower() for x in texts]
# Remove punctuation
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]
# Remove numbers
texts = [''.join(c for c in x if c not in '0123456789') for x in texts]
# Trim extra whitespace
texts = [' '.join(x.split()) for x in texts]
# Plot histogram of text lengths
text_lengths = [len(x.split()) for x in texts]
text_lengths = [x for x in text_lengths if x < 50]
plt.hist(text_lengths, bins=25)
plt.title('Histogram of # of Words in Texts')
# Choose max text word length at 25
sentence_size = 25
min_word_freq = 3
# Setup vocabulary processor
vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)
# Have to fit transform to get length of unique words.
vocab_processor.fit_transform(texts)
embedding_size = len(vocab_processor.vocabulary_)
# Split up data set into train/test
train_indices = np.random.choice(len(texts), round(len(texts)*0.8), replace=False)
test_indices = np.array(list(set(range(len(texts))) - set(train_indices)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# Setup Index Matrix for one-hot-encoding
identity_mat = tf.diag(tf.ones(shape=[embedding_size]))
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)
# Text-Vocab Embedding
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0)
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(model_output, y_target))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.initialize_all_variables()
sess.run(init)
# Start Logistic Regression
print('Starting Training Over {} Sentences.'.format(len(texts_train)))
loss_vec = []
train_acc_all = []
train_acc_avg = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_train)):
y_data = [[target_train[ix]]]
sess.run(train_step, feed_dict={x_data: t, y_target: y_data})
temp_loss = sess.run(loss, feed_dict={x_data: t, y_target: y_data})
loss_vec.append(temp_loss)
if (ix+1)%10==0:
print('Training Observation #' + str(ix+1) + ': Loss = ' + str(temp_loss))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
train_acc_temp = target_train[ix]==np.round(temp_pred)
train_acc_all.append(train_acc_temp)
if len(train_acc_all) >= 50:
train_acc_avg.append(np.mean(train_acc_all[-50:]))
# Get test set accuracy
print('Getting Test Set Accuracy For {} Sentences.'.format(len(texts_test)))
test_acc_all = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_test)):
y_data = [[target_test[ix]]]
if (ix+1)%50==0:
print('Test Observation #' + str(ix+1))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
test_acc_temp = target_test[ix]==np.round(temp_pred)
test_acc_all.append(test_acc_temp)
print('\nOverall Test Accuracy: {}'.format(np.mean(test_acc_all)))
# Plot training accuracy over time
plt.plot(range(len(train_acc_avg)), train_acc_avg, 'k-', label='Train Accuracy')
plt.title('Avg Training Acc Over Past 50 Generations')
plt.xlabel('Generation')
plt.ylabel('Training Accuracy')
plt.show() | mit | -8,116,889,667,283,109,000 | 33.5625 | 101 | 0.694015 | false |
antiface/ThinkBayes2 | code/cookie3.py | 1 | 1095 | """This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import thinkbayes2
class Cookie(thinkbayes2.Suite):
"""A map from string bowl ID to probablity."""
def Likelihood(self, data, hypo):
"""The likelihood of the data under the hypothesis.
data: string cookie type
hypo: string bowl ID
"""
like = hypo[data] / hypo.Total()
if like:
hypo[data] -= 1
return like
def main():
bowl1 = thinkbayes2.Hist(dict(vanilla=30, chocolate=10))
bowl2 = thinkbayes2.Hist(dict(vanilla=20, chocolate=20))
pmf = Cookie([bowl1, bowl2])
print('After 1 vanilla')
pmf.Update('vanilla')
for hypo, prob in pmf.Items():
print(hypo, prob)
print('\nAfter 1 vanilla, 1 chocolate')
pmf.Update('chocolate')
for hypo, prob in pmf.Items():
print(hypo, prob)
if __name__ == '__main__':
main()
| gpl-2.0 | 7,652,526,682,298,288,000 | 23.333333 | 60 | 0.628311 | false |
dvro/scikit-protopy | protopy/base.py | 1 | 4528 | """Base and mixin classes for instance reduction techniques"""
# Author: Dayvid Victor <[email protected]>
# License: BSD Style
import warnings
from abc import ABCMeta, abstractmethod
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.utils import check_array
from sklearn.externals import six
class InstanceReductionWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", InstanceReductionWarning)
class InstanceReductionBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for instance reduction estimators."""
@abstractmethod
def __init__(self):
pass
class InstanceReductionMixin(InstanceReductionBase, ClassifierMixin):
"""Mixin class for all instance reduction techniques"""
def set_classifier(self):
"""Sets the classified to be used in the instance reduction process
and classification.
Parameters
----------
classifier : classifier, following the KNeighborsClassifier style
(default = KNN)
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
P : array-like, shape = [indeterminated, n_features]
Resulting training set.
q : array-like, shape = [indertaminated]
Labels for P
"""
self.classifier = classifier
def reduce_data(self, X, y):
"""Perform the instance reduction procedure on the given training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.0
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
X_ : array-like, shape = [indeterminated, n_features]
Resulting training set.
y_ : array-like, shape = [indertaminated]
Labels for X_
"""
pass
def fit(self, X, y, reduce_data=True):
"""
Fit the InstanceReduction model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
reduce_data : bool, flag indicating if the reduction would be performed
"""
self.X = X
self.y = y
if reduce_data:
self.reduce_data(X, y)
return self
def predict(self, X, n_neighbors=1):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
The default prediction is using KNeighborsClassifier, if the
instance reducition algorithm is to be performed with another
classifier, it should be explicited overwritten and explained
in the documentation.
"""
X = check_array(X)
if not hasattr(self, "X_") or self.X_ is None:
raise AttributeError("Model has not been trained yet.")
if not hasattr(self, "y_") or self.y_ is None:
raise AttributeError("Model has not been trained yet.")
if self.classifier == None:
self.classifier = KNeighborsClassifier(n_neighbors=n_neighbors)
self.classifier.fit(self.X_, self.y_)
return self.classifier.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test data X.
after a given prototype selection algorithm.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
self.classifier.fit(self.X_, self.y_)
return self.classifier.predict_proba(X)
| bsd-2-clause | -6,141,545,886,733,990,000 | 28.38961 | 79 | 0.607601 | false |
vguzmanp/cloud-in-one | main_crypto.py | 1 | 1392 | #!/usr/bin/env python3
import getpass
import argparse
import shutil
from core.databaseManager import DatabaseManager
from core.securityModule import SecurityModule
def processFile(file_in_name, file_out_name, encrypt_flag):
user = input("CLOUD-IN-ONE Username: ")
password = getpass.getpass()
databaseManager = DatabaseManager(':memory:')
sec = SecurityModule(databaseManager, user, password)
file_processed = None
with open(file_in_name, 'rb') as f_in:
if encrypt_flag:
file_processed = sec.encrypt(f_in)
else:
file_processed = sec.decrypt(f_in)
with open(file_out_name, 'wb') as f_out:
file_processed.seek(0)
shutil.copyfileobj(file_processed, f_out)
file_processed.close()
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-d", "--decrypt", action="store_true")
group.add_argument("-e", "--encrypt", action="store_true")
parser.add_argument("file", help="the file to encrypt / decrypt")
parser.add_argument("file_output", help="name of the destination file")
args = parser.parse_args()
encrypt_flag = args.encrypt
if not encrypt_flag:
encrypt_flag = not args.decrypt
processFile(args.file, args.file_output, encrypt_flag)
if __name__ == '__main__':
main()
| mit | 120,298,394,218,790,240 | 28 | 75 | 0.666667 | false |
PaesslerAG/django-performance-testing | settings.py | 1 | 1267 | # Django settings for autodata project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'mq%31q+sjj^)m^tvy(klwqw6ksv7du2yzdf9-django_performance_testing'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django_performance_testing',
'testapp',
)
STATIC_URL = '/static/'
ROOT_URLCONF = None
| bsd-3-clause | 3,754,765,840,320,147,000 | 27.155556 | 78 | 0.598264 | false |
TestInABox/openstackinabox | openstackinabox/tests/services/cinder/v1/volumes/test_retrieve.py | 1 | 1154 | """
"""
import unittest
import requests
import stackinabox.util.requests_mock.core
from stackinabox.stack import StackInABox
from openstackinabox.services.cinder import CinderV1Service
from openstackinabox.services.keystone import KeystoneV2Service
class TestCinderV1Retrieve(unittest.TestCase):
def setUp(self):
super(TestCinderV1Retrieve, self).setUp()
self.keystone = KeystoneV2Service()
self.cinder = CinderV1Service(self.keystone)
self.headers = {
'x-auth-token': self.keystone.model.tokens.admin_token
}
StackInABox.register_service(self.keystone)
StackInABox.register_service(self.cinder)
def tearDown(self):
super(TestCinderV1Retrieve, self).tearDown()
StackInABox.reset_services()
def test_volume_retrieve(self):
with stackinabox.util.requests_mock.core.activate():
stackinabox.util.requests_mock.core.requests_mock_registration(
'localhost'
)
res = requests.get(
'http://localhost/cinder/v1/volumes'
)
self.assertEqual(res.status_code, 500)
| apache-2.0 | -2,659,424,865,563,916,000 | 30.189189 | 75 | 0.672444 | false |
owlabs/incubator-airflow | airflow/models/taskreschedule.py | 1 | 3374 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TaskReschedule tracks rescheduled task instances."""
from sqlalchemy import Column, ForeignKeyConstraint, Index, Integer, String, asc
from airflow.models.base import Base, ID_LEN
from airflow.utils.db import provide_session
from airflow.utils.sqlalchemy import UtcDateTime
class TaskReschedule(Base):
"""
TaskReschedule tracks rescheduled task instances.
"""
__tablename__ = "task_reschedule"
id = Column(Integer, primary_key=True)
task_id = Column(String(ID_LEN), nullable=False)
dag_id = Column(String(ID_LEN), nullable=False)
execution_date = Column(UtcDateTime, nullable=False)
try_number = Column(Integer, nullable=False)
start_date = Column(UtcDateTime, nullable=False)
end_date = Column(UtcDateTime, nullable=False)
duration = Column(Integer, nullable=False)
reschedule_date = Column(UtcDateTime, nullable=False)
__table_args__ = (
Index('idx_task_reschedule_dag_task_date', dag_id, task_id, execution_date,
unique=False),
ForeignKeyConstraint([task_id, dag_id, execution_date],
['task_instance.task_id', 'task_instance.dag_id',
'task_instance.execution_date'],
name='task_reschedule_dag_task_date_fkey',
ondelete='CASCADE')
)
def __init__(self, task, execution_date, try_number, start_date, end_date,
reschedule_date):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.execution_date = execution_date
self.try_number = try_number
self.start_date = start_date
self.end_date = end_date
self.reschedule_date = reschedule_date
self.duration = (self.end_date - self.start_date).total_seconds()
@staticmethod
@provide_session
def find_for_task_instance(task_instance, session):
"""
Returns all task reschedules for the task instance and try number,
in ascending order.
:param task_instance: the task instance to find task reschedules for
:type task_instance: airflow.models.TaskInstance
"""
TR = TaskReschedule
return (
session
.query(TR)
.filter(TR.dag_id == task_instance.dag_id,
TR.task_id == task_instance.task_id,
TR.execution_date == task_instance.execution_date,
TR.try_number == task_instance.try_number)
.order_by(asc(TR.id))
.all()
)
| apache-2.0 | -3,710,862,593,287,448,000 | 38.694118 | 83 | 0.648785 | false |
cykerway/wmwm | setup.py | 1 | 7315 | #!/usr/bin/env python3
'''
setuptools based setup module;
see <https://packaging.python.org/en/latest/distributing.html>;
'''
from os import path
from setuptools import find_packages
from setuptools import setup
here = path.abspath(path.dirname(__file__))
## get long description from readme file;
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
## ========================================================================
## required for pypi upload;
## ========================================================================
## project name;
##
## this determines how users install this project:
##
## pip install sampleproject
##
## and where this project lives on pypi:
##
## <https://pypi.org/project/sampleproject/>
##
## this name is registered for you the first time you publish this package;
##
## name specification:
##
## <https://packaging.python.org/specifications/core-metadata/#name>
##
name='awd',
## project version;
##
## version specification (pep 440):
##
## <https://www.python.org/dev/peps/pep-0440/>;
##
## single-sourcing techniques:
##
## <https://packaging.python.org/en/latest/single_source_version.html>
##
version='1.3.4',
## project homepage;
##
## this arg corresponds to "home-page" metadata field:
##
## <https://packaging.python.org/specifications/core-metadata/#home-page-optional>
##
url='https://github.com/cykerway/awd',
## author name;
author='Cyker Way',
## author email address;
author_email='[email protected]',
## packages;
##
## you can provide a list of packages manually or use `find_packages()`;
##
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
## ========================================================================
## optional for pypi upload;
## ========================================================================
## a one-line description;
##
## this arg corresponds to "summary" metadata field:
##
## <https://packaging.python.org/specifications/core-metadata/#summary>
##
description='a window director;',
## a longer description shown on project homepage on pypi;
##
## this is often the same as the readme;
##
## this arg corresponds to "description" metadata field:
##
## <https://packaging.python.org/specifications/core-metadata/#description-optional>
##
long_description=long_description,
## longer description content type;
##
## valid values are: `text/plain`, `text/x-rst`, `text/markdown`;
##
## this arg corresponds to "description-content-type" metadata field:
##
## <https://packaging.python.org/specifications/core-metadata/#description-content-type-optional>
##
long_description_content_type='text/markdown',
## classifiers categorizing this project;
##
## see <https://pypi.org/classifiers/>;
##
classifiers=[
## development status;
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
## intended audience;
# 'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
## topic;
'Topic :: Desktop Environment',
# 'Topic :: Games/Entertainment',
# 'Topic :: Multimedia',
# 'Topic :: Office/Business',
# 'Topic :: Scientific/Engineering',
# 'Topic :: Software Development',
# 'Topic :: System',
## license;
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# 'License :: OSI Approved :: BSD License',
# 'License :: OSI Approved :: MIT License',
## supported python versions;
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
## project keywords;
##
## these keywords will appear on the project page;
##
keywords='window layout',
## package data;
##
## this is a dict mapping package names to a list of relative path names
## (or glob patterns) that should be copied into the package when
## installed; the path names are interpreted relative to the package dir;
##
package_data={
# 'sample': ['*.bin'],
},
## additional data files;
##
## this is a sequence of `(dir, files)` pairs; each `(dir, files)` pair
## specifies the install dir and the files to install there; if `dir` is a
## relative path, it is relative to the install prefix (`sys.prefix` or
## `sys.exec_prefix`); each file in `files` is interpreted relative to the
## `setup.py` script;
##
## see <https://docs.python.org/3/distutils/setupscript.html#installing-additional-files>;
##
data_files=[
# ('data_files', ['data/data0.bin', 'data/data1.bin']),
],
## package dependencies;
##
## this is a list of packages that this project depends on; these packages
## will be installed by pip when this project is installed;
##
install_requires=[
'argparse-ext',
'ewmh-ext',
'logging-ext',
'python-xlib',
],
## extra package dependencies;
##
## this is a dict mapping extras (optional features of this project) to a
## list of packages that those extras depend on;
##
## users will be able to install these using the extras syntax:
##
## pip install sampleproject[dev]
##
## see <https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies>
##
extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
},
## to create executable scripts, use entry points:
##
## <https://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation>
##
## for example, the following would provide a console script `sample-cli`
## which executes the `main` function in package `sample.cli`, and a gui
## script `sample-gui` which executes the `main` function in package
## `sample.gui`;
entry_points={
'console_scripts': [
'awd=awd.__main__:main',
],
# 'gui_scripts': [
# 'sample-gui=sample.gui:main',
# ],
},
## additional urls that are relevant to this project;
##
## examples include: where the package tracks issues, where the source is
## hosted, where to say thanks to the package maintainers, and where to
## support the project financially; the keys are used to render the link
## texts on pypi;
##
## this arg corresponds to "project-url" metadata fields:
##
## <https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use>
##
project_urls={
'Bug Reports': 'https://github.com/cykerway/awd/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
'Source': 'https://github.com/cykerway/awd/',
},
)
| gpl-3.0 | -8,959,277,930,186,001,000 | 30.530172 | 136 | 0.579357 | false |
nicko96/Chrome-Infra | glyco/glucose/install.py | 1 | 8024 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import httplib2
import logging
import os
import sys
import urllib
from glucose import util
LOGGER = logging.getLogger(__name__)
DEFAULT_CACHE = os.path.join(os.path.expanduser('~'), '.glyco_wheelcache')
def get_sha1_from_filename(filename, verbose=True):
"""Extract the claimed sha1 from the filename.
Also verify the name matches the wheel convention.
Args:
filename (str): path to a local file.
verbose (bool): print messages only if True.
Returns: claimed_hash(str) or None if no hash can be found.
"""
basename = os.path.split(filename)[-1]
wheel_info = util.WHEEL_FILE_RE.match(basename)
if not wheel_info:
if verbose:
print >> sys.stderr, 'Invalid file name for wheel: %s' % basename
return None
if not wheel_info.group('build'):
if verbose:
print >> sys.stderr, ('No hash could be found in the filename.\n'
'Has this file been generated with Glyco?\n'
'%s' % basename)
return None
return wheel_info.group('build').split('_')[1]
def has_valid_sha1(filename, verbose=True):
"""Verify the hash of a whl file created by Glyco.
Args:
filename (str): path to a whl file.
verbose(bool): print messages only if True.
Returns:
matches (bool): true if the file content and the name match.
"""
claimed_sha = get_sha1_from_filename(filename, verbose=verbose)
if not claimed_sha:
return False
with open(filename, 'rb') as f:
digest = hashlib.sha1(f.read())
actual_sha = digest.hexdigest()
return actual_sha == claimed_sha
def get_install_list(packages):
"""Consolidate the list of things to install.
Args:
packages (list of str): local paths or https/gs URLs.
"""
install_list = []
for package in packages:
location = package
location_type = 'ERROR'
error = None
# Let's support only https. Security matters.
if package.startswith('http://'):
error = 'Non-secure http is not supported, please use https: %s' % package
elif package.startswith('https://'):
location_type = 'http'
elif package.startswith('gs://'):
# TODO(pgervais): handle Cloud Storage properly.
location_type = 'http'
location = 'https://storage.googleapis.com/' + package[len('gs://'):]
elif os.path.isfile(package):
location = 'file://%s' % urllib.pathname2url(os.path.abspath(package))
location_type = 'file'
else:
error = ('Cannot find this file locally: %s\n'
'If you did not specify a file but an URI, '
'then the protocol is probably not supported.'
% os.path.abspath(package))
install_list.append({'location': location,
'location_type': location_type,
'error': error})
return install_list
def fetch_packages(install_list, requester=httplib2.Http(),
cache=DEFAULT_CACHE, verbose=True):
"""Make sure there is a local copy of all packages.
All paths returned by this function point at existing wheel files, with
correct hashes.
Args:
install_list (list of dict): return value of get_install_list.
requester (httplib2.Http): object to use to send http requests.
cache (str): path to a local directory used to store wheel files downloaded
from a remote storage.
verbose(bool): print messages only if True.
Returns:
paths (list of strings): path to each local wheel file.
"""
if not os.path.isdir(cache):
os.mkdir(cache)
paths = []
all_valid = True
for source in install_list:
if source['location_type'] == 'file':
assert source['location'].startswith('file://')
filename = source['location'][len('file://'):]
# FIXME(pgervais): convert to a windows path (/ -> \) and unquote.
if not has_valid_sha1(filename, verbose=verbose):
if verbose:
print >> sys.stderr, ("File content does not match hash for %s"
% filename)
all_valid = False
else:
paths.append(filename)
elif source['location_type'] == 'http':
# This is an URL so the path separator is necessarily /
base_filename = source['location'].split('/')[-1]
filename = os.path.join(cache, base_filename)
if not os.path.exists(filename):
# Try to download file to local cache
resp, content = requester.request(source['location'], 'GET')
if resp['status'] == '200':
temp_filename = os.path.join(cache, base_filename + '.tmp')
try:
with open(temp_filename, 'wb') as f:
f.write(content)
os.rename(temp_filename, filename)
except OSError:
if os.path.isfile(temp_filename):
os.remove(temp_filename)
else:
if verbose:
print >> sys.stderr, ("Got status %s when talking to %s" %
(resp['status'], source['location']))
all_valid = False
# We have to test again for existence since the download
# could have failed.
if os.path.exists(filename) and not has_valid_sha1(filename,
verbose=verbose):
if verbose:
print >> sys.stderr, ("File content does not match hash for %s"
% filename)
all_valid = False
# The file is bad anyway, there's no point in keeping it around.
# Plus we probably want to retry the download some time in the future.
os.remove(filename)
else:
paths.append(filename)
if not all_valid:
raise ValueError('Some errors occurred when getting wheel files.')
return paths
def install(args):
"""Install wheel files"""
if not args.packages:
print 'No packages have been provided on the command-line, doing nothing.'
return
if not args.install_dir:
print >> sys.stderr, ('No destination directory specified, aborting. \n'
'Use the --install-dir option to specify it')
return 2
install_list = get_install_list(args.packages)
error_msgs = [d['error'] for d in install_list if 'error' in d and d['error']]
if error_msgs:
print >> sys.stderr, ('\n'.join(error_msgs))
print >> sys.stderr, 'Aborting (no packages installed)'
return 1
try:
package_paths = fetch_packages(install_list)
except ValueError:
print >> sys.stderr, 'Aborting (no packages installed)'
return 1
if not os.path.isdir(args.install_dir):
os.mkdir(args.install_dir)
with util.Virtualenv() as venv:
cmd = (['pip', 'install', '--no-index', '--target', args.install_dir]
+ package_paths)
LOGGER.debug('Running %s', ' '.join(cmd))
venv.check_call(cmd)
def add_subparser(subparsers):
"""Add the 'install' command.
Also add the 'lysis' command as a synonym (and pun).
Args:
subparsers: output of argparse.ArgumentParser.add_subparsers()
"""
install_parser = subparsers.add_parser('install',
help='Install wheel files to a local '
'directory (synonym of lysis)')
install_parser.set_defaults(command=install)
# Add synonym just for the pun
lysis_parser = subparsers.add_parser('lysis',
help='Install wheel files to a local '
'directory (synonym of install)')
lysis_parser.set_defaults(command=install)
for parser in (install_parser, lysis_parser):
parser.add_argument('--install-dir', '-i',
help='Directory where to install packages')
parser.add_argument('packages', metavar='PACKAGE', nargs='*',
help='Wheel files to install (path)')
| bsd-3-clause | 3,545,463,092,387,777,500 | 32.157025 | 80 | 0.61154 | false |
google-research/google-research | fairness_teaching/baseline/all_real.py | 1 | 5585 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import numpy as np
import tensorflow as tf
import data
import model
# pylint: skip-file
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
parser.add_argument('--real_path', default='../data/resize128')
parser.add_argument('--fake_path', default='../data/fake')
parser.add_argument('--train_label', default='../data/annotations/train_label.txt')
parser.add_argument('--test_label', default='../data/annotations/test_label.txt')
parser.add_argument('--valid_label', default='../data/annotations/val_label.txt')
parser.add_argument('--max_epoch', type=int, default=20, help='Epoch to run [default: 20]')
parser.add_argument('--batch_size', type=int, default=64, help='Batch Size during training [default: 64]')
parser.add_argument('--n_class', type=int, default=2, help='Number of class [default: 2]')
parser.add_argument('--lr', type=float, default=0.1, help='Initial learning rate [default: 0.1]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='momentum', help='adam or momentum [default: momentum]')
FLAGS = parser.parse_args()
ATT_ID = {'5_o_Clock_Shadow': 0, 'Arched_Eyebrows': 1, 'Attractive': 2,
'Bags_Under_Eyes': 3, 'Bald': 4, 'Bangs': 5, 'Big_Lips': 6,
'Big_Nose': 7, 'Black_Hair': 8, 'Blond_Hair': 9, 'Blurry': 10,
'Brown_Hair': 11, 'Bushy_Eyebrows': 12, 'Chubby': 13,
'Double_Chin': 14, 'Eyeglasses': 15, 'Goatee': 16,
'Gray_Hair': 17, 'Heavy_Makeup': 18, 'High_Cheekbones': 19,
'Male': 20, 'Mouth_Slightly_Open': 21, 'Mustache': 22,
'Narrow_Eyes': 23, 'No_Beard': 24, 'Oval_Face': 25,
'Pale_Skin': 26, 'Pointy_Nose': 27, 'Receding_Hairline': 28,
'Rosy_Cheeks': 29, 'Sideburns': 30, 'Smiling': 31,
'Straight_Hair': 32, 'Wavy_Hair': 33, 'Wearing_Earrings': 34,
'Wearing_Hat': 35, 'Wearing_Lipstick': 36,
'Wearing_Necklace': 37, 'Wearing_Necktie': 38, 'Young': 39}
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu
# tf.set_random_seed(0)# 0 for 512
tf.set_random_seed(100)
(train_images, train_labels, train_att), train_iters = data.data_train(FLAGS.real_path, FLAGS.train_label, 64)
(fake_images, fake_labels, fake_att), fake_iters = data.data_fake(FLAGS.fake_path, FLAGS.train_label, 64)
(valid_images, valid_labels, valid_att), valid_iters = data.data_test(FLAGS.real_path, FLAGS.valid_label, FLAGS.batch_size)
(test_images, test_labels, test_att), test_iters = data.data_test(FLAGS.real_path, FLAGS.test_label, FLAGS.batch_size)
batch_images = tf.placeholder(tf.float32,[None,128,128,3])
batch_labels = tf.placeholder(tf.int32,[None,])
is_training = tf.placeholder(tf.bool)
lr_ph = tf.placeholder(tf.float32)
lr = FLAGS.lr
Y_score = model.vgg(batch_images, FLAGS.n_class, is_training)
Y_hat = tf.nn.softmax(Y_score)
Y_pred = tf.argmax(Y_hat, 1)
Y_label = tf.to_float(tf.one_hot(batch_labels, FLAGS.n_class))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = Y_score, labels = Y_label)
loss_op = tf.reduce_mean(cross_entropy)
correct_prediction = tf.equal(tf.argmax(Y_hat, 1), tf.argmax(Y_label, 1))
acc_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
update_op = tf.train.MomentumOptimizer(lr_ph, FLAGS.momentum).minimize(loss_op)
init = tf.global_variables_initializer()
print("================\n\n",train_iters, fake_iters)
with tf.Session() as sess:
sess.run(init)
for i in range(FLAGS.max_epoch):
if i == 30:
lr *= 0.1
elif i == 40:
lr *= 0.1
for j in range(train_iters):
co_images, co_labels = sess.run([train_images,train_labels])
# tr_images, tr_labels = sess.run([train_images,train_labels])
# fa_images, fa_labels = sess.run([fake_images,fake_labels])
# co_images = np.concatenate((tr_images,fa_images),axis=0)
# co_labels = np.concatenate((tr_labels,fa_labels),axis=0)
loss, acc, _ = sess.run([loss_op, acc_op, update_op], {batch_images:co_images, batch_labels:co_labels, lr_ph:lr, is_training:True})
if j % 50 == 0:
print('====epoch_%d====iter_%d: loss=%.4f, train_acc=%.4f' % (i, j, loss, acc))
valid_acc = 0.0
y_pred =[]
y_label = []
y_att = []
for k in range(valid_iters):
va_images, va_labels, va_att = sess.run([valid_images, valid_labels, valid_att])
batch_acc, batch_pred = sess.run([acc_op,Y_pred], {batch_images:va_images, batch_labels:va_labels, is_training:False})
valid_acc += batch_acc
y_pred += batch_pred.tolist()
y_label += va_labels.tolist()
y_att += va_att.tolist()
valid_acc = valid_acc / float(valid_iters)
valid_eo = data.cal_eo(y_att, y_label, y_pred)
print('====epoch_%d: valid_acc=%.4f, valid_eo=%.4f' % (i, valid_acc, valid_eo[-1]))
print('eo: ',valid_eo[0],valid_eo[1])
print('eo: ',valid_eo[2],valid_eo[3])
| apache-2.0 | -3,787,352,119,502,906,000 | 45.157025 | 137 | 0.669651 | false |
laumann/servo | components/script/dom/bindings/codegen/CodegenRust.py | 1 | 243619 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Common codegen classes.
from collections import defaultdict
import operator
import re
import string
import textwrap
import functools
from WebIDL import (
BuiltinTypes,
IDLBuiltinType,
IDLNullValue,
IDLType,
IDLInterfaceMember,
IDLUndefinedValue,
)
from Configuration import (
MemberIsUnforgeable,
getModuleFromObject,
getTypesFromCallback,
getTypesFromDescriptor,
getTypesFromDictionary,
)
AUTOGENERATED_WARNING_COMMENT = \
"/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n\n"
FINALIZE_HOOK_NAME = '_finalize'
TRACE_HOOK_NAME = '_trace'
CONSTRUCT_HOOK_NAME = '_constructor'
HASINSTANCE_HOOK_NAME = '_hasInstance'
def replaceFileIfChanged(filename, newContents):
"""
Read a copy of the old file, so that we don't touch it if it hasn't changed.
Returns True if the file was updated, false otherwise.
"""
# XXXjdm This doesn't play well with make right now.
# Force the file to always be updated, or else changing CodegenRust.py
# will cause many autogenerated bindings to be regenerated perpetually
# until the result is actually different.
# oldFileContents = ""
# try:
# with open(filename, 'rb') as oldFile:
# oldFileContents = ''.join(oldFile.readlines())
# except:
# pass
# if newContents == oldFileContents:
# return False
with open(filename, 'wb') as f:
f.write(newContents)
return True
def toStringBool(arg):
return str(not not arg).lower()
def toBindingNamespace(arg):
return re.sub("((_workers)?$)", "Binding\\1", arg)
def stripTrailingWhitespace(text):
tail = '\n' if text.endswith('\n') else ''
lines = text.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].rstrip()
return '\n'.join(lines) + tail
def MakeNativeName(name):
return name[0].upper() + name[1:]
builtinNames = {
IDLType.Tags.bool: 'bool',
IDLType.Tags.int8: 'i8',
IDLType.Tags.int16: 'i16',
IDLType.Tags.int32: 'i32',
IDLType.Tags.int64: 'i64',
IDLType.Tags.uint8: 'u8',
IDLType.Tags.uint16: 'u16',
IDLType.Tags.uint32: 'u32',
IDLType.Tags.uint64: 'u64',
IDLType.Tags.unrestricted_float: 'f32',
IDLType.Tags.float: 'Finite<f32>',
IDLType.Tags.unrestricted_double: 'f64',
IDLType.Tags.double: 'Finite<f64>'
}
numericTags = [
IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32, IDLType.Tags.uint32,
IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float,
IDLType.Tags.unrestricted_double
]
def unwrapCastableObject(descriptor, source, codeOnFailure, conversionFunction):
"""
A function for unwrapping an object named by the "source" argument
based on the passed-in descriptor. Returns the string of the Rust expression of
the appropriate type.
codeOnFailure is the code to run if unwrapping fails.
"""
args = {
"failureCode": CGIndenter(CGGeneric(codeOnFailure), 8).define(),
"function": conversionFunction,
"source": source,
}
return """\
match %(function)s(%(source)s) {
Ok(val) => val,
Err(()) => {
%(failureCode)s
}
}""" % args
# We'll want to insert the indent at the beginnings of lines, but we
# don't want to indent empty lines. So only indent lines that have a
# non-newline character on them.
lineStartDetector = re.compile("^(?=[^\n#])", re.MULTILINE)
def indent(s, indentLevel=2):
"""
Indent C++ code.
Weird secret feature: this doesn't indent lines that start with # (such as
#include lines or #ifdef/#endif).
"""
if s == "":
return s
return re.sub(lineStartDetector, indentLevel * " ", s)
# dedent() and fill() are often called on the same string multiple
# times. We want to memoize their return values so we don't keep
# recomputing them all the time.
def memoize(fn):
"""
Decorator to memoize a function of one argument. The cache just
grows without bound.
"""
cache = {}
@functools.wraps(fn)
def wrapper(arg):
retval = cache.get(arg)
if retval is None:
retval = cache[arg] = fn(arg)
return retval
return wrapper
@memoize
def dedent(s):
"""
Remove all leading whitespace from s, and remove a blank line
at the beginning.
"""
if s.startswith('\n'):
s = s[1:]
return textwrap.dedent(s)
# This works by transforming the fill()-template to an equivalent
# string.Template.
fill_multiline_substitution_re = re.compile(r"( *)\$\*{(\w+)}(\n)?")
@memoize
def compile_fill_template(template):
"""
Helper function for fill(). Given the template string passed to fill(),
do the reusable part of template processing and return a pair (t,
argModList) that can be used every time fill() is called with that
template argument.
argsModList is list of tuples that represent modifications to be
made to args. Each modification has, in order: i) the arg name,
ii) the modified name, iii) the indent depth.
"""
t = dedent(template)
assert t.endswith("\n") or "\n" not in t
argModList = []
def replace(match):
"""
Replaces a line like ' $*{xyz}\n' with '${xyz_n}',
where n is the indent depth, and add a corresponding entry to
argModList.
Note that this needs to close over argModList, so it has to be
defined inside compile_fill_template().
"""
indentation, name, nl = match.groups()
depth = len(indentation)
# Check that $*{xyz} appears by itself on a line.
prev = match.string[:match.start()]
if (prev and not prev.endswith("\n")) or nl is None:
raise ValueError("Invalid fill() template: $*{%s} must appear by itself on a line" % name)
# Now replace this whole line of template with the indented equivalent.
modified_name = name + "_" + str(depth)
argModList.append((name, modified_name, depth))
return "${" + modified_name + "}"
t = re.sub(fill_multiline_substitution_re, replace, t)
return (string.Template(t), argModList)
def fill(template, **args):
"""
Convenience function for filling in a multiline template.
`fill(template, name1=v1, name2=v2)` is a lot like
`string.Template(template).substitute({"name1": v1, "name2": v2})`.
However, it's shorter, and has a few nice features:
* If `template` is indented, fill() automatically dedents it!
This makes code using fill() with Python's multiline strings
much nicer to look at.
* If `template` starts with a blank line, fill() strips it off.
(Again, convenient with multiline strings.)
* fill() recognizes a special kind of substitution
of the form `$*{name}`.
Use this to paste in, and automatically indent, multiple lines.
(Mnemonic: The `*` is for "multiple lines").
A `$*` substitution must appear by itself on a line, with optional
preceding indentation (spaces only). The whole line is replaced by the
corresponding keyword argument, indented appropriately. If the
argument is an empty string, no output is generated, not even a blank
line.
"""
t, argModList = compile_fill_template(template)
# Now apply argModList to args
for (name, modified_name, depth) in argModList:
if not (args[name] == "" or args[name].endswith("\n")):
raise ValueError("Argument %s with value %r is missing a newline" % (name, args[name]))
args[modified_name] = indent(args[name], depth)
return t.substitute(args)
class CGThing():
"""
Abstract base class for things that spit out code.
"""
def __init__(self):
pass # Nothing for now
def define(self):
"""Produce code for a Rust file."""
raise NotImplementedError # Override me!
class CGMethodCall(CGThing):
"""
A class to generate selection of a method signature from a set of
signatures and generation of a call to that signature.
"""
def __init__(self, argsPre, nativeMethodName, static, descriptor, method):
CGThing.__init__(self)
methodName = '\\"%s.%s\\"' % (descriptor.interface.identifier.name, method.identifier.name)
def requiredArgCount(signature):
arguments = signature[1]
if len(arguments) == 0:
return 0
requiredArgs = len(arguments)
while requiredArgs and arguments[requiredArgs - 1].optional:
requiredArgs -= 1
return requiredArgs
signatures = method.signatures()
def getPerSignatureCall(signature, argConversionStartsAt=0):
signatureIndex = signatures.index(signature)
return CGPerSignatureCall(signature[0], argsPre, signature[1],
nativeMethodName + '_' * signatureIndex,
static, descriptor,
method, argConversionStartsAt)
if len(signatures) == 1:
# Special case: we can just do a per-signature method call
# here for our one signature and not worry about switching
# on anything.
signature = signatures[0]
self.cgRoot = CGList([getPerSignatureCall(signature)])
requiredArgs = requiredArgCount(signature)
if requiredArgs > 0:
code = (
"if argc < %d {\n"
" throw_type_error(cx, \"Not enough arguments to %s.\");\n"
" return false;\n"
"}" % (requiredArgs, methodName))
self.cgRoot.prepend(
CGWrapper(CGGeneric(code), pre="\n", post="\n"))
return
# Need to find the right overload
maxArgCount = method.maxArgCount
allowedArgCounts = method.allowedArgCounts
argCountCases = []
for argCount in allowedArgCounts:
possibleSignatures = method.signaturesForArgCount(argCount)
if len(possibleSignatures) == 1:
# easy case!
signature = possibleSignatures[0]
argCountCases.append(CGCase(str(argCount), getPerSignatureCall(signature)))
continue
distinguishingIndex = method.distinguishingIndexForArgCount(argCount)
# We can't handle unions at the distinguishing index.
for (returnType, args) in possibleSignatures:
if args[distinguishingIndex].type.isUnion():
raise TypeError("No support for unions as distinguishing "
"arguments yet: %s",
args[distinguishingIndex].location)
# Convert all our arguments up to the distinguishing index.
# Doesn't matter which of the possible signatures we use, since
# they all have the same types up to that point; just use
# possibleSignatures[0]
caseBody = [
CGArgumentConverter(possibleSignatures[0][1][i],
i, "args", "argc", descriptor)
for i in range(0, distinguishingIndex)]
# Select the right overload from our set.
distinguishingArg = "args.get(%d)" % distinguishingIndex
def pickFirstSignature(condition, filterLambda):
sigs = filter(filterLambda, possibleSignatures)
assert len(sigs) < 2
if len(sigs) > 0:
call = getPerSignatureCall(sigs[0], distinguishingIndex)
if condition is None:
caseBody.append(call)
else:
caseBody.append(CGGeneric("if " + condition + " {"))
caseBody.append(CGIndenter(call))
caseBody.append(CGGeneric("}"))
return True
return False
# First check for null or undefined
pickFirstSignature("%s.isNullOrUndefined()" % distinguishingArg,
lambda s: (s[1][distinguishingIndex].type.nullable() or
s[1][distinguishingIndex].type.isDictionary()))
# Now check for distinguishingArg being an object that implements a
# non-callback interface. That includes typed arrays and
# arraybuffers.
interfacesSigs = [
s for s in possibleSignatures
if (s[1][distinguishingIndex].type.isObject() or
s[1][distinguishingIndex].type.isNonCallbackInterface())]
# There might be more than one of these; we need to check
# which ones we unwrap to.
if len(interfacesSigs) > 0:
# The spec says that we should check for "platform objects
# implementing an interface", but it's enough to guard on these
# being an object. The code for unwrapping non-callback
# interfaces and typed arrays will just bail out and move on to
# the next overload if the object fails to unwrap correctly. We
# could even not do the isObject() check up front here, but in
# cases where we have multiple object overloads it makes sense
# to do it only once instead of for each overload. That will
# also allow the unwrapping test to skip having to do codegen
# for the null-or-undefined case, which we already handled
# above.
caseBody.append(CGGeneric("if %s.get().is_object() {" %
(distinguishingArg)))
for idx, sig in enumerate(interfacesSigs):
caseBody.append(CGIndenter(CGGeneric("loop {")))
type = sig[1][distinguishingIndex].type
# The argument at index distinguishingIndex can't possibly
# be unset here, because we've already checked that argc is
# large enough that we can examine this argument.
info = getJSToNativeConversionInfo(
type, descriptor, failureCode="break;", isDefinitelyObject=True)
template = info.template
declType = info.declType
testCode = instantiateJSToNativeConversionTemplate(
template,
{"val": distinguishingArg},
declType,
"arg%d" % distinguishingIndex)
# Indent by 4, since we need to indent further than our "do" statement
caseBody.append(CGIndenter(testCode, 4))
# If we got this far, we know we unwrapped to the right
# interface, so just do the call. Start conversion with
# distinguishingIndex + 1, since we already converted
# distinguishingIndex.
caseBody.append(CGIndenter(
getPerSignatureCall(sig, distinguishingIndex + 1), 4))
caseBody.append(CGIndenter(CGGeneric("}")))
caseBody.append(CGGeneric("}"))
# XXXbz Now we're supposed to check for distinguishingArg being
# an array or a platform object that supports indexed
# properties... skip that last for now. It's a bit of a pain.
pickFirstSignature("%s.get().isObject() && IsArrayLike(cx, &%s.get().toObject())" %
(distinguishingArg, distinguishingArg),
lambda s:
(s[1][distinguishingIndex].type.isArray() or
s[1][distinguishingIndex].type.isSequence() or
s[1][distinguishingIndex].type.isObject()))
# Check for Date objects
# XXXbz Do we need to worry about security wrappers around the Date?
pickFirstSignature("%s.get().isObject() && JS_ObjectIsDate(cx, &%s.get().toObject())" %
(distinguishingArg, distinguishingArg),
lambda s: (s[1][distinguishingIndex].type.isDate() or
s[1][distinguishingIndex].type.isObject()))
# Check for vanilla JS objects
# XXXbz Do we need to worry about security wrappers?
pickFirstSignature("%s.get().is_object() && !is_platform_object(%s.get().to_object())" %
(distinguishingArg, distinguishingArg),
lambda s: (s[1][distinguishingIndex].type.isCallback() or
s[1][distinguishingIndex].type.isCallbackInterface() or
s[1][distinguishingIndex].type.isDictionary() or
s[1][distinguishingIndex].type.isObject()))
# The remaining cases are mutually exclusive. The
# pickFirstSignature calls are what change caseBody
# Check for strings or enums
if pickFirstSignature(None,
lambda s: (s[1][distinguishingIndex].type.isString() or
s[1][distinguishingIndex].type.isEnum())):
pass
# Check for primitives
elif pickFirstSignature(None,
lambda s: s[1][distinguishingIndex].type.isPrimitive()):
pass
# Check for "any"
elif pickFirstSignature(None,
lambda s: s[1][distinguishingIndex].type.isAny()):
pass
else:
# Just throw; we have no idea what we're supposed to
# do with this.
caseBody.append(CGGeneric("return Throw(cx, NS_ERROR_XPC_BAD_CONVERT_JS);"))
argCountCases.append(CGCase(str(argCount),
CGList(caseBody, "\n")))
overloadCGThings = []
overloadCGThings.append(
CGGeneric("let argcount = cmp::min(argc, %d);" %
maxArgCount))
overloadCGThings.append(
CGSwitch("argcount",
argCountCases,
CGGeneric("throw_type_error(cx, \"Not enough arguments to %s.\");\n"
"return false;" % methodName)))
# XXXjdm Avoid unreachable statement warnings
# overloadCGThings.append(
# CGGeneric('panic!("We have an always-returning default case");\n'
# 'return false;'))
self.cgRoot = CGWrapper(CGList(overloadCGThings, "\n"),
pre="\n")
def define(self):
return self.cgRoot.define()
def dictionaryHasSequenceMember(dictionary):
return (any(typeIsSequenceOrHasSequenceMember(m.type) for m in
dictionary.members) or
(dictionary.parent and
dictionaryHasSequenceMember(dictionary.parent)))
def typeIsSequenceOrHasSequenceMember(type):
if type.nullable():
type = type.inner
if type.isSequence():
return True
if type.isArray():
elementType = type.inner
return typeIsSequenceOrHasSequenceMember(elementType)
if type.isDictionary():
return dictionaryHasSequenceMember(type.inner)
if type.isUnion():
return any(typeIsSequenceOrHasSequenceMember(m.type) for m in
type.flatMemberTypes)
return False
def typeNeedsRooting(type, descriptorProvider):
return (type.isGeckoInterface() and
descriptorProvider.getDescriptor(type.unroll().inner.identifier.name).needsRooting)
def union_native_type(t):
name = t.unroll().name
return 'UnionTypes::%s' % name
class JSToNativeConversionInfo():
"""
An object representing information about a JS-to-native conversion.
"""
def __init__(self, template, default=None, declType=None,
needsRooting=False):
"""
template: A string representing the conversion code. This will have
template substitution performed on it as follows:
${val} is a handle to the JS::Value in question
default: A string or None representing rust code for default value(if any).
declType: A CGThing representing the native C++ type we're converting
to. This is allowed to be None if the conversion code is
supposed to be used as-is.
needsRooting: A boolean indicating whether the caller has to root
the result
"""
assert isinstance(template, str)
assert declType is None or isinstance(declType, CGThing)
self.template = template
self.default = default
self.declType = declType
self.needsRooting = needsRooting
def getJSToNativeConversionInfo(type, descriptorProvider, failureCode=None,
isDefinitelyObject=False,
isMember=False,
isArgument=False,
invalidEnumValueFatal=True,
defaultValue=None,
treatNullAs="Default",
isEnforceRange=False,
isClamp=False,
exceptionCode=None,
allowTreatNonObjectAsNull=False,
isCallbackReturnValue=False,
sourceDescription="value"):
"""
Get a template for converting a JS value to a native object based on the
given type and descriptor. If failureCode is given, then we're actually
testing whether we can convert the argument to the desired type. That
means that failures to convert due to the JS value being the wrong type of
value need to use failureCode instead of throwing exceptions. Failures to
convert that are due to JS exceptions (from toString or valueOf methods) or
out of memory conditions need to throw exceptions no matter what
failureCode is.
If isDefinitelyObject is True, that means we know the value
isObject() and we have no need to recheck that.
if isMember is True, we're being converted from a property of some
JS object, not from an actual method argument, so we can't rely on
our jsval being rooted or outliving us in any way. Any caller
passing true needs to ensure that it is handled correctly in
typeIsSequenceOrHasSequenceMember.
invalidEnumValueFatal controls whether an invalid enum value conversion
attempt will throw (if true) or simply return without doing anything (if
false).
If defaultValue is not None, it's the IDL default value for this conversion
If isEnforceRange is true, we're converting an integer and throwing if the
value is out of range.
If isClamp is true, we're converting an integer and clamping if the
value is out of range.
If allowTreatNonObjectAsNull is true, then [TreatNonObjectAsNull]
extended attributes on nullable callback functions will be honored.
The return value from this function is an object of JSToNativeConversionInfo consisting of four things:
1) A string representing the conversion code. This will have template
substitution performed on it as follows:
${val} replaced by an expression for the JS::Value in question
2) A string or None representing Rust code for the default value (if any).
3) A CGThing representing the native C++ type we're converting to
(declType). This is allowed to be None if the conversion code is
supposed to be used as-is.
4) A boolean indicating whether the caller has to root the result.
"""
# We should not have a defaultValue if we know we're an object
assert not isDefinitelyObject or defaultValue is None
# If exceptionCode is not set, we'll just rethrow the exception we got.
# Note that we can't just set failureCode to exceptionCode, because setting
# failureCode will prevent pending exceptions from being set in cases when
# they really should be!
if exceptionCode is None:
exceptionCode = "return false;"
needsRooting = typeNeedsRooting(type, descriptorProvider)
def handleOptional(template, declType, default):
assert (defaultValue is None) == (default is None)
return JSToNativeConversionInfo(template, default, declType, needsRooting=needsRooting)
# Unfortunately, .capitalize() on a string will lowercase things inside the
# string, which we do not want.
def firstCap(string):
return string[0].upper() + string[1:]
# Helper functions for dealing with failures due to the JS value being the
# wrong type of value.
def onFailureNotAnObject(failureCode):
return CGWrapper(
CGGeneric(
failureCode or
('throw_type_error(cx, "%s is not an object.");\n'
'%s' % (firstCap(sourceDescription), exceptionCode))),
post="\n")
def onFailureNotCallable(failureCode):
return CGWrapper(
CGGeneric(
failureCode or
('throw_type_error(cx, \"%s is not callable.\");\n'
'%s' % (firstCap(sourceDescription), exceptionCode))))
# A helper function for handling null default values. Checks that the
# default value, if it exists, is null.
def handleDefaultNull(nullValue):
if defaultValue is None:
return None
if not isinstance(defaultValue, IDLNullValue):
raise TypeError("Can't handle non-null default value here")
assert type.nullable() or type.isDictionary()
return nullValue
# A helper function for wrapping up the template body for
# possibly-nullable objecty stuff
def wrapObjectTemplate(templateBody, nullValue, isDefinitelyObject, type,
failureCode=None):
if not isDefinitelyObject:
# Handle the non-object cases by wrapping up the whole
# thing in an if cascade.
templateBody = (
"if ${val}.get().is_object() {\n" +
CGIndenter(CGGeneric(templateBody)).define() + "\n")
if type.nullable():
templateBody += (
"} else if ${val}.get().is_null_or_undefined() {\n"
" %s\n") % nullValue
templateBody += (
"} else {\n" +
CGIndenter(onFailureNotAnObject(failureCode)).define() +
"}")
return templateBody
assert not (isEnforceRange and isClamp) # These are mutually exclusive
if type.isArray():
raise TypeError("Can't handle array arguments yet")
if type.isSequence():
# Use the same type that for return values
declType = getRetvalDeclarationForType(type, descriptorProvider)
config = getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs)
templateBody = ("match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(value) => value,\n"
" Err(()) => { %s },\n"
"}" % (config, exceptionCode))
return handleOptional(templateBody, declType, handleDefaultNull("None"))
if type.isUnion():
declType = CGGeneric(union_native_type(type))
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=" >")
templateBody = ("match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(value) => value,\n"
" Err(()) => { %s },\n"
"}" % exceptionCode)
return handleOptional(templateBody, declType, handleDefaultNull("None"))
if type.isGeckoInterface():
assert not isEnforceRange and not isClamp
descriptor = descriptorProvider.getDescriptor(
type.unroll().inner.identifier.name)
if descriptor.interface.isCallback():
name = descriptor.nativeType
declType = CGWrapper(CGGeneric(name), pre="Rc<", post=">")
template = "%s::new(${val}.get().to_object())" % name
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
template = wrapObjectTemplate("Some(%s)" % template, "None",
isDefinitelyObject, type,
failureCode)
return handleOptional(template, declType, handleDefaultNull("None"))
conversionFunction = "root_from_handlevalue"
descriptorType = descriptor.returnType
if isMember == "Variadic":
conversionFunction = "native_from_handlevalue"
descriptorType = descriptor.nativeType
elif isArgument:
descriptorType = descriptor.argumentType
templateBody = ""
if descriptor.interface.isConsequential():
raise TypeError("Consequential interface %s being used as an "
"argument" % descriptor.interface.identifier.name)
if failureCode is None:
substitutions = {
"sourceDescription": sourceDescription,
"interface": descriptor.interface.identifier.name,
"exceptionCode": exceptionCode,
}
unwrapFailureCode = string.Template(
'throw_type_error(cx, "${sourceDescription} does not '
'implement interface ${interface}.");\n'
'${exceptionCode}').substitute(substitutions)
else:
unwrapFailureCode = failureCode
templateBody = unwrapCastableObject(
descriptor, "${val}", unwrapFailureCode, conversionFunction)
declType = CGGeneric(descriptorType)
if type.nullable():
templateBody = "Some(%s)" % templateBody
declType = CGWrapper(declType, pre="Option<", post=">")
templateBody = wrapObjectTemplate(templateBody, "None",
isDefinitelyObject, type, failureCode)
return handleOptional(templateBody, declType, handleDefaultNull("None"))
if type.isSpiderMonkeyInterface():
raise TypeError("Can't handle SpiderMonkey interface arguments yet")
if type.isDOMString():
nullBehavior = getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs)
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(strval) => strval,\n"
" Err(_) => { %s },\n"
"}" % (nullBehavior, exceptionCode))
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
assert type.nullable()
default = "None"
else:
assert defaultValue.type.tag() == IDLType.Tags.domstring
default = 'DOMString::from("%s")' % defaultValue.value
if type.nullable():
default = "Some(%s)" % default
declType = "DOMString"
if type.nullable():
declType = "Option<%s>" % declType
return handleOptional(conversionCode, CGGeneric(declType), default)
if type.isUSVString():
assert not isEnforceRange and not isClamp
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(strval) => strval,\n"
" Err(_) => { %s },\n"
"}" % exceptionCode)
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
assert type.nullable()
default = "None"
else:
assert defaultValue.type.tag() in (IDLType.Tags.domstring, IDLType.Tags.usvstring)
default = 'USVString("%s".to_owned())' % defaultValue.value
if type.nullable():
default = "Some(%s)" % default
declType = "USVString"
if type.nullable():
declType = "Option<%s>" % declType
return handleOptional(conversionCode, CGGeneric(declType), default)
if type.isByteString():
assert not isEnforceRange and not isClamp
conversionCode = (
"match FromJSValConvertible::from_jsval(cx, ${val}, ()) {\n"
" Ok(strval) => strval,\n"
" Err(_) => { %s },\n"
"}" % exceptionCode)
declType = CGGeneric("ByteString")
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
return handleOptional(conversionCode, declType, handleDefaultNull("None"))
if type.isEnum():
assert not isEnforceRange and not isClamp
if type.nullable():
raise TypeError("We don't support nullable enumerated arguments "
"yet")
enum = type.inner.identifier.name
if invalidEnumValueFatal:
handleInvalidEnumValueCode = exceptionCode
else:
handleInvalidEnumValueCode = "return true;"
template = (
"match find_enum_string_index(cx, ${val}, %(values)s) {\n"
" Err(_) => { %(exceptionCode)s },\n"
" Ok(None) => { %(handleInvalidEnumValueCode)s },\n"
" Ok(Some(index)) => {\n"
" //XXXjdm need some range checks up in here.\n"
" mem::transmute(index)\n"
" },\n"
"}" % {"values": enum + "Values::strings",
"exceptionCode": exceptionCode,
"handleInvalidEnumValueCode": handleInvalidEnumValueCode})
if defaultValue is not None:
assert defaultValue.type.tag() == IDLType.Tags.domstring
default = "%s::%s" % (enum, getEnumValueName(defaultValue.value))
else:
default = None
return handleOptional(template, CGGeneric(enum), default)
if type.isCallback():
assert not isEnforceRange and not isClamp
assert not type.treatNonCallableAsNull()
assert not type.treatNonObjectAsNull() or type.nullable()
assert not type.treatNonObjectAsNull() or not type.treatNonCallableAsNull()
callback = type.unroll().callback
declType = CGGeneric('%s::%s' % (getModuleFromObject(callback), callback.identifier.name))
finalDeclType = CGTemplatedType("Rc", declType)
conversion = CGCallbackTempRoot(declType.define())
if type.nullable():
declType = CGTemplatedType("Option", declType)
finalDeclType = CGTemplatedType("Option", finalDeclType)
conversion = CGWrapper(conversion, pre="Some(", post=")")
if allowTreatNonObjectAsNull and type.treatNonObjectAsNull():
if not isDefinitelyObject:
haveObject = "${val}.get().is_object()"
template = CGIfElseWrapper(haveObject,
conversion,
CGGeneric("None")).define()
else:
template = conversion
else:
template = CGIfElseWrapper("IsCallable(${val}.get().to_object())",
conversion,
onFailureNotCallable(failureCode)).define()
template = wrapObjectTemplate(
template,
"None",
isDefinitelyObject,
type,
failureCode)
if defaultValue is not None:
assert allowTreatNonObjectAsNull
assert type.treatNonObjectAsNull()
assert type.nullable()
assert isinstance(defaultValue, IDLNullValue)
default = "None"
else:
default = None
return JSToNativeConversionInfo(template, default, finalDeclType, needsRooting=needsRooting)
if type.isAny():
assert not isEnforceRange and not isClamp
declType = ""
default = ""
if isMember == "Dictionary":
# TODO: Need to properly root dictionaries
# https://github.com/servo/servo/issues/6381
declType = CGGeneric("JSVal")
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
default = "NullValue()"
elif isinstance(defaultValue, IDLUndefinedValue):
default = "UndefinedValue()"
else:
raise TypeError("Can't handle non-null, non-undefined default value here")
else:
declType = CGGeneric("HandleValue")
if defaultValue is None:
default = None
elif isinstance(defaultValue, IDLNullValue):
default = "HandleValue::null()"
elif isinstance(defaultValue, IDLUndefinedValue):
default = "HandleValue::undefined()"
else:
raise TypeError("Can't handle non-null, non-undefined default value here")
return handleOptional("${val}", declType, default)
if type.isObject():
assert not isEnforceRange and not isClamp
# TODO: Need to root somehow
# https://github.com/servo/servo/issues/6382
declType = CGGeneric("*mut JSObject")
templateBody = wrapObjectTemplate("${val}.get().to_object()",
"ptr::null_mut()",
isDefinitelyObject, type, failureCode)
return handleOptional(templateBody, declType,
handleDefaultNull("ptr::null_mut()"))
if type.isDictionary():
if failureCode is not None:
raise TypeError("Can't handle dictionaries when failureCode is not None")
# There are no nullable dictionaries
assert not type.nullable()
typeName = "%s::%s" % (CGDictionary.makeModuleName(type.inner),
CGDictionary.makeDictionaryName(type.inner))
declType = CGGeneric(typeName)
template = ("match %s::new(cx, ${val}) {\n"
" Ok(dictionary) => dictionary,\n"
" Err(_) => { %s },\n"
"}" % (typeName, exceptionCode))
return handleOptional(template, declType, handleDefaultNull("%s::empty(cx)" % typeName))
if type.isVoid():
# This one only happens for return values, and its easy: Just
# ignore the jsval.
return JSToNativeConversionInfo("", None, None, needsRooting=False)
if not type.isPrimitive():
raise TypeError("Need conversion for argument type '%s'" % str(type))
conversionBehavior = getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs)
if failureCode is None:
failureCode = 'return false'
declType = CGGeneric(builtinNames[type.tag()])
if type.nullable():
declType = CGWrapper(declType, pre="Option<", post=">")
template = (
"match FromJSValConvertible::from_jsval(cx, ${val}, %s) {\n"
" Ok(v) => v,\n"
" Err(_) => { %s }\n"
"}" % (conversionBehavior, exceptionCode))
if defaultValue is not None:
if isinstance(defaultValue, IDLNullValue):
assert type.nullable()
defaultStr = "None"
else:
tag = defaultValue.type.tag()
if tag in [IDLType.Tags.float, IDLType.Tags.double]:
defaultStr = "Finite::wrap(%s)" % defaultValue.value
elif tag in numericTags:
defaultStr = str(defaultValue.value)
else:
assert tag == IDLType.Tags.bool
defaultStr = toStringBool(defaultValue.value)
if type.nullable():
defaultStr = "Some(%s)" % defaultStr
else:
defaultStr = None
return handleOptional(template, declType, defaultStr)
def instantiateJSToNativeConversionTemplate(templateBody, replacements,
declType, declName):
"""
Take the templateBody and declType as returned by
getJSToNativeConversionInfo, a set of replacements as required by the
strings in such a templateBody, and a declName, and generate code to
convert into a stack Rust binding with that name.
"""
result = CGList([], "\n")
conversion = CGGeneric(string.Template(templateBody).substitute(replacements))
if declType is not None:
newDecl = [
CGGeneric("let "),
CGGeneric(declName),
CGGeneric(": "),
declType,
CGGeneric(" = "),
conversion,
CGGeneric(";"),
]
result.append(CGList(newDecl))
else:
result.append(conversion)
# Add an empty CGGeneric to get an extra newline after the argument
# conversion.
result.append(CGGeneric(""))
return result
def convertConstIDLValueToJSVal(value):
if isinstance(value, IDLNullValue):
return "NullVal"
tag = value.type.tag()
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8, IDLType.Tags.int16,
IDLType.Tags.uint16, IDLType.Tags.int32]:
return "IntVal(%s)" % (value.value)
if tag == IDLType.Tags.uint32:
return "UintVal(%s)" % (value.value)
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64]:
return "DoubleVal(%s)" % (value.value)
if tag == IDLType.Tags.bool:
return "BoolVal(true)" if value.value else "BoolVal(false)"
if tag in [IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
return "DoubleVal(%s)" % (value.value)
raise TypeError("Const value of unhandled type: " + value.type)
class CGArgumentConverter(CGThing):
"""
A class that takes an IDL argument object, its index in the
argument list, and the argv and argc strings and generates code to
unwrap the argument to the right native type.
"""
def __init__(self, argument, index, args, argc, descriptorProvider,
invalidEnumValueFatal=True):
CGThing.__init__(self)
assert not argument.defaultValue or argument.optional
replacer = {
"index": index,
"argc": argc,
"args": args
}
replacementVariables = {
"val": string.Template("${args}.get(${index})").substitute(replacer),
}
info = getJSToNativeConversionInfo(
argument.type,
descriptorProvider,
invalidEnumValueFatal=invalidEnumValueFatal,
defaultValue=argument.defaultValue,
treatNullAs=argument.treatNullAs,
isEnforceRange=argument.enforceRange,
isClamp=argument.clamp,
isMember="Variadic" if argument.variadic else False,
allowTreatNonObjectAsNull=argument.allowTreatNonCallableAsNull())
template = info.template
default = info.default
declType = info.declType
if not argument.variadic:
if argument.optional:
condition = "{args}.get({index}).is_undefined()".format(**replacer)
if argument.defaultValue:
assert default
template = CGIfElseWrapper(condition,
CGGeneric(default),
CGGeneric(template)).define()
else:
assert not default
declType = CGWrapper(declType, pre="Option<", post=">")
template = CGIfElseWrapper(condition,
CGGeneric("None"),
CGGeneric("Some(%s)" % template)).define()
else:
assert not default
self.converter = instantiateJSToNativeConversionTemplate(
template, replacementVariables, declType, "arg%d" % index)
else:
assert argument.optional
variadicConversion = {
"val": string.Template("${args}.get(variadicArg)").substitute(replacer),
}
innerConverter = [instantiateJSToNativeConversionTemplate(
template, variadicConversion, declType, "slot")]
arg = "arg%d" % index
if argument.type.isGeckoInterface():
vec = "RootedVec::new()"
innerConverter.append(CGGeneric("%s.push(JS::from_ref(&*slot));" % arg))
else:
vec = "vec![]"
innerConverter.append(CGGeneric("%s.push(slot);" % arg))
inner = CGIndenter(CGList(innerConverter, "\n"), 8).define()
self.converter = CGGeneric("""\
let mut %(arg)s = %(vec)s;
if %(argc)s > %(index)s {
%(arg)s.reserve(%(argc)s as usize - %(index)s);
for variadicArg in %(index)s..%(argc)s {
%(inner)s
}
}""" % {'arg': arg, 'argc': argc, 'index': index, 'inner': inner, 'vec': vec})
def define(self):
return self.converter.define()
def wrapForType(jsvalRef, result='result', successCode='return true;', pre=''):
"""
Reflect a Rust value into JS.
* 'jsvalRef': a MutableHandleValue in which to store the result
of the conversion;
* 'result': the name of the variable in which the Rust value is stored;
* 'successCode': the code to run once we have done the conversion.
* 'pre': code to run before the conversion if rooting is necessary
"""
wrap = "%s\n(%s).to_jsval(cx, %s);" % (pre, result, jsvalRef)
if successCode:
wrap += "\n%s" % successCode
return wrap
def typeNeedsCx(type, retVal=False):
if type is None:
return False
if type.nullable():
type = type.inner
if type.isSequence() or type.isArray():
type = type.inner
if type.isUnion():
return any(typeNeedsCx(t) for t in type.unroll().flatMemberTypes)
if retVal and type.isSpiderMonkeyInterface():
return True
return type.isAny() or type.isObject()
# Returns a conversion behavior suitable for a type
def getConversionConfigForType(type, isEnforceRange, isClamp, treatNullAs):
if type.isSequence():
return getConversionConfigForType(type.unroll(), isEnforceRange, isClamp, treatNullAs)
if type.isDOMString():
assert not isEnforceRange and not isClamp
treatAs = {
"Default": "StringificationBehavior::Default",
"EmptyString": "StringificationBehavior::Empty",
}
if treatNullAs not in treatAs:
raise TypeError("We don't support [TreatNullAs=%s]" % treatNullAs)
if type.nullable():
# Note: the actual behavior passed here doesn't matter for nullable
# strings.
return "StringificationBehavior::Default"
else:
return treatAs[treatNullAs]
if type.isInteger():
if isEnforceRange:
return "ConversionBehavior::EnforceRange"
elif isClamp:
return "ConversionBehavior::Clamp"
else:
return "ConversionBehavior::Default"
assert not isEnforceRange and not isClamp
return "()"
# Returns a CGThing containing the type of the return value.
def getRetvalDeclarationForType(returnType, descriptorProvider):
if returnType is None or returnType.isVoid():
# Nothing to declare
return CGGeneric("()")
if returnType.isPrimitive() and returnType.tag() in builtinNames:
result = CGGeneric(builtinNames[returnType.tag()])
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isDOMString():
result = CGGeneric("DOMString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isUSVString():
result = CGGeneric("USVString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isByteString():
result = CGGeneric("ByteString")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isEnum():
result = CGGeneric(returnType.unroll().inner.identifier.name)
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isGeckoInterface():
descriptor = descriptorProvider.getDescriptor(
returnType.unroll().inner.identifier.name)
result = CGGeneric(descriptor.returnType)
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isCallback():
callback = returnType.unroll().callback
result = CGGeneric('Rc<%s::%s>' % (getModuleFromObject(callback), callback.identifier.name))
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isUnion():
result = CGGeneric(union_native_type(returnType))
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
# TODO: Return the value through a MutableHandleValue outparam
# https://github.com/servo/servo/issues/6307
if returnType.isAny():
return CGGeneric("JSVal")
if returnType.isObject() or returnType.isSpiderMonkeyInterface():
return CGGeneric("*mut JSObject")
if returnType.isSequence():
inner = returnType.unroll()
result = getRetvalDeclarationForType(inner, descriptorProvider)
result = CGWrapper(result, pre="Vec<", post=">")
if returnType.nullable():
result = CGWrapper(result, pre="Option<", post=">")
return result
if returnType.isDictionary():
nullable = returnType.nullable()
dictName = returnType.inner.name if nullable else returnType.name
result = CGGeneric(dictName)
if typeNeedsRooting(returnType, descriptorProvider):
raise TypeError("We don't support rootable dictionaries return values")
if nullable:
result = CGWrapper(result, pre="Option<", post=">")
return result
raise TypeError("Don't know how to declare return value for %s" %
returnType)
class PropertyDefiner:
"""
A common superclass for defining things on prototype objects.
Subclasses should implement generateArray to generate the actual arrays of
things we're defining. They should also set self.regular to the list of
things exposed to web pages.
"""
def __init__(self, descriptor, name):
self.descriptor = descriptor
self.name = name
def variableName(self):
return "s" + self.name
def length(self):
return len(self.regular)
def __str__(self):
# We only need to generate id arrays for things that will end
# up used via ResolveProperty or EnumerateProperties.
return self.generateArray(self.regular, self.variableName())
def generatePrefableArray(self, array, name, specTemplate, specTerminator,
specType, getDataTuple):
"""
This method generates our various arrays.
array is an array of interface members as passed to generateArray
name is the name as passed to generateArray
specTemplate is a template for each entry of the spec array
specTerminator is a terminator for the spec array (inserted at the end
of the array), or None
specType is the actual typename of our spec
getDataTuple is a callback function that takes an array entry and
returns a tuple suitable for substitution into specTemplate.
"""
assert len(array) != 0
specs = []
for member in array:
specs.append(specTemplate % getDataTuple(member))
if specTerminator:
specs.append(specTerminator)
return (("const %s: &'static [%s] = &[\n" +
",\n".join(specs) + "\n" +
"];\n") % (name, specType))
# The length of a method is the minimum of the lengths of the
# argument lists of all its overloads.
def methodLength(method):
signatures = method.signatures()
return min(
len([arg for arg in arguments if not arg.optional and not arg.variadic])
for (_, arguments) in signatures)
class MethodDefiner(PropertyDefiner):
"""
A class for defining methods on a prototype object.
"""
def __init__(self, descriptor, name, static, unforgeable):
assert not (static and unforgeable)
PropertyDefiner.__init__(self, descriptor, name)
# FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=772822
# We should be able to check for special operations without an
# identifier. For now we check if the name starts with __
# Ignore non-static methods for callback interfaces
if not descriptor.interface.isCallback() or static:
methods = [m for m in descriptor.interface.members if
m.isMethod() and m.isStatic() == static and
not m.isIdentifierLess() and
MemberIsUnforgeable(m, descriptor) == unforgeable]
else:
methods = []
self.regular = [{"name": m.identifier.name,
"methodInfo": not m.isStatic(),
"length": methodLength(m)} for m in methods]
# FIXME Check for an existing iterator on the interface first.
if any(m.isGetter() and m.isIndexed() for m in methods):
self.regular.append({"name": '@@iterator',
"methodInfo": False,
"selfHostedName": "ArrayValues",
"length": 0})
isUnforgeableInterface = bool(descriptor.interface.getExtendedAttribute("Unforgeable"))
if not static and unforgeable == isUnforgeableInterface:
stringifier = descriptor.operations['Stringifier']
if stringifier:
self.regular.append({
"name": "toString",
"nativeName": stringifier.identifier.name,
"length": 0,
})
self.unforgeable = unforgeable
def generateArray(self, array, name):
if len(array) == 0:
return ""
flags = "JSPROP_ENUMERATE"
if self.unforgeable:
flags += " | JSPROP_PERMANENT | JSPROP_READONLY"
def specData(m):
# TODO: Use something like JS_FNSPEC
# https://github.com/servo/servo/issues/6391
if "selfHostedName" in m:
selfHostedName = '%s as *const u8 as *const libc::c_char' % str_to_const_array(m["selfHostedName"])
assert not m.get("methodInfo", True)
accessor = "None"
jitinfo = "0 as *const JSJitInfo"
else:
selfHostedName = "0 as *const libc::c_char"
if m.get("methodInfo", True):
identifier = m.get("nativeName", m["name"])
# Go through an intermediate type here, because it's not
# easy to tell whether the methodinfo is a JSJitInfo or
# a JSTypedMethodJitInfo here. The compiler knows, though,
# so let it do the work.
jitinfo = "&%s_methodinfo as *const _ as *const JSJitInfo" % identifier
accessor = "Some(generic_method)"
else:
jitinfo = "0 as *const JSJitInfo"
accessor = 'Some(%s)' % m.get("nativeName", m["name"])
if m["name"].startswith("@@"):
return ('(SymbolCode::%s as i32 + 1)'
% m["name"][2:], accessor, jitinfo, m["length"], flags, selfHostedName)
return (str_to_const_array(m["name"]), accessor, jitinfo, m["length"], flags, selfHostedName)
return self.generatePrefableArray(
array, name,
' JSFunctionSpec {\n'
' name: %s as *const u8 as *const libc::c_char,\n'
' call: JSNativeWrapper { op: %s, info: %s },\n'
' nargs: %s,\n'
' flags: (%s) as u16,\n'
' selfHostedName: %s\n'
' }',
' JSFunctionSpec {\n'
' name: 0 as *const libc::c_char,\n'
' call: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo },\n'
' nargs: 0,\n'
' flags: 0,\n'
' selfHostedName: 0 as *const libc::c_char\n'
' }',
'JSFunctionSpec',
specData)
class AttrDefiner(PropertyDefiner):
def __init__(self, descriptor, name, static, unforgeable):
assert not (static and unforgeable)
PropertyDefiner.__init__(self, descriptor, name)
self.name = name
self.descriptor = descriptor
self.regular = [
m
for m in descriptor.interface.members if
m.isAttr() and m.isStatic() == static and
MemberIsUnforgeable(m, descriptor) == unforgeable
]
self.static = static
self.unforgeable = unforgeable
def generateArray(self, array, name):
if len(array) == 0:
return ""
flags = "JSPROP_ENUMERATE | JSPROP_SHARED"
if self.unforgeable:
flags += " | JSPROP_READONLY | JSPROP_PERMANENT"
def getter(attr):
if self.static:
accessor = 'get_' + self.descriptor.internalNameFor(attr.identifier.name)
jitinfo = "0 as *const JSJitInfo"
else:
if attr.hasLenientThis():
accessor = "generic_lenient_getter"
else:
accessor = "generic_getter"
jitinfo = "&%s_getterinfo" % self.descriptor.internalNameFor(attr.identifier.name)
return ("JSNativeWrapper { op: Some(%(native)s), info: %(info)s }"
% {"info": jitinfo,
"native": accessor})
def setter(attr):
if attr.readonly and not attr.getExtendedAttribute("PutForwards"):
return "JSNativeWrapper { op: None, info: 0 as *const JSJitInfo }"
if self.static:
accessor = 'set_' + self.descriptor.internalNameFor(attr.identifier.name)
jitinfo = "0 as *const JSJitInfo"
else:
if attr.hasLenientThis():
accessor = "generic_lenient_setter"
else:
accessor = "generic_setter"
jitinfo = "&%s_setterinfo" % self.descriptor.internalNameFor(attr.identifier.name)
return ("JSNativeWrapper { op: Some(%(native)s), info: %(info)s }"
% {"info": jitinfo,
"native": accessor})
def specData(attr):
return (str_to_const_array(attr.identifier.name), flags, getter(attr),
setter(attr))
return self.generatePrefableArray(
array, name,
' JSPropertySpec {\n'
' name: %s as *const u8 as *const libc::c_char,\n'
' flags: ((%s) & 0xFF) as u8,\n'
' getter: %s,\n'
' setter: %s\n'
' }',
' JSPropertySpec {\n'
' name: 0 as *const libc::c_char,\n'
' flags: 0,\n'
' getter: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo },\n'
' setter: JSNativeWrapper { op: None, info: 0 as *const JSJitInfo }\n'
' }',
'JSPropertySpec',
specData)
class ConstDefiner(PropertyDefiner):
"""
A class for definining constants on the interface object
"""
def __init__(self, descriptor, name):
PropertyDefiner.__init__(self, descriptor, name)
self.name = name
self.regular = [m for m in descriptor.interface.members if m.isConst()]
def generateArray(self, array, name):
if len(array) == 0:
return ""
def specData(const):
return (str_to_const_array(const.identifier.name),
convertConstIDLValueToJSVal(const.value))
return self.generatePrefableArray(
array, name,
' ConstantSpec { name: %s, value: %s }',
None,
'ConstantSpec',
specData)
# We'll want to insert the indent at the beginnings of lines, but we
# don't want to indent empty lines. So only indent lines that have a
# non-newline character on them.
lineStartDetector = re.compile("^(?=[^\n])", re.MULTILINE)
class CGIndenter(CGThing):
"""
A class that takes another CGThing and generates code that indents that
CGThing by some number of spaces. The default indent is two spaces.
"""
def __init__(self, child, indentLevel=4):
CGThing.__init__(self)
self.child = child
self.indent = " " * indentLevel
def define(self):
defn = self.child.define()
if defn != "":
return re.sub(lineStartDetector, self.indent, defn)
else:
return defn
class CGWrapper(CGThing):
"""
Generic CGThing that wraps other CGThings with pre and post text.
"""
def __init__(self, child, pre="", post="", reindent=False):
CGThing.__init__(self)
self.child = child
self.pre = pre
self.post = post
self.reindent = reindent
def define(self):
defn = self.child.define()
if self.reindent:
# We don't use lineStartDetector because we don't want to
# insert whitespace at the beginning of our _first_ line.
defn = stripTrailingWhitespace(
defn.replace("\n", "\n" + (" " * len(self.pre))))
return self.pre + defn + self.post
class CGImports(CGWrapper):
"""
Generates the appropriate import/use statements.
"""
def __init__(self, child, descriptors, callbacks, imports, ignored_warnings=None):
"""
Adds a set of imports.
"""
if ignored_warnings is None:
ignored_warnings = [
'non_camel_case_types',
'non_upper_case_globals',
'unused_imports',
'unused_variables',
'unused_assignments',
]
def componentTypes(type):
if type.nullable():
type = type.unroll()
if type.isUnion():
return type.flatMemberTypes
return [type]
def isImportable(type):
if not type.isType():
assert type.isInterface()
return not type.isCallback()
return type.isNonCallbackInterface() and not type.builtin
def relatedTypesForSignatures(method):
types = []
for (returnType, arguments) in method.signatures():
types += componentTypes(returnType)
for arg in arguments:
types += componentTypes(arg.type)
return types
def getIdentifier(t):
if t.isType():
return t.inner.identifier
assert t.isInterface()
return t.identifier
types = []
for d in descriptors:
types += [d.interface]
members = d.interface.members + d.interface.namedConstructors
constructor = d.interface.ctor()
if constructor:
members += [constructor]
if d.proxy:
members += [o for o in d.operations.values() if o]
for m in members:
if m.isMethod():
types += relatedTypesForSignatures(m)
elif m.isAttr():
types += componentTypes(m.type)
for c in callbacks:
types += relatedTypesForSignatures(c)
imports += ['dom::types::%s' % getIdentifier(t).name for t in types if isImportable(t)]
statements = []
if len(ignored_warnings) > 0:
statements.append('#![allow(%s)]' % ','.join(ignored_warnings))
statements.extend('use %s;' % i for i in sorted(set(imports)))
CGWrapper.__init__(self, child,
pre='\n'.join(statements) + '\n\n')
class CGIfWrapper(CGWrapper):
def __init__(self, condition, child):
pre = CGWrapper(CGGeneric(condition), pre="if ", post=" {\n",
reindent=True)
CGWrapper.__init__(self, CGIndenter(child), pre=pre.define(),
post="\n}")
class CGTemplatedType(CGWrapper):
def __init__(self, templateName, child):
CGWrapper.__init__(self, child, pre=templateName + "<", post=">")
class CGNamespace(CGWrapper):
def __init__(self, namespace, child, public=False):
pre = "%smod %s {\n" % ("pub " if public else "", namespace)
post = "} // mod %s" % namespace
CGWrapper.__init__(self, child, pre=pre, post=post)
@staticmethod
def build(namespaces, child, public=False):
"""
Static helper method to build multiple wrapped namespaces.
"""
if not namespaces:
return child
inner = CGNamespace.build(namespaces[1:], child, public=public)
return CGNamespace(namespaces[0], inner, public=public)
def DOMClassTypeId(desc):
protochain = desc.prototypeChain
inner = ""
if desc.hasDescendants():
if desc.interface.getExtendedAttribute("Abstract"):
return "::dom::bindings::codegen::InheritTypes::TopTypeId::Abstract"
name = desc.interface.identifier.name
inner = "(::dom::bindings::codegen::InheritTypes::%sTypeId::%s)" % (name, name)
elif len(protochain) == 1:
return "::dom::bindings::codegen::InheritTypes::TopTypeId::Alone"
reversed_protochain = list(reversed(protochain))
for (child, parent) in zip(reversed_protochain, reversed_protochain[1:]):
inner = "(::dom::bindings::codegen::InheritTypes::%sTypeId::%s%s)" % (parent, child, inner)
return "::dom::bindings::codegen::InheritTypes::TopTypeId::%s%s" % (protochain[0], inner)
def DOMClass(descriptor):
protoList = ['PrototypeList::ID::' + proto for proto in descriptor.prototypeChain]
# Pad out the list to the right length with ID::Last so we
# guarantee that all the lists are the same length. ID::Last
# is never the ID of any prototype, so it's safe to use as
# padding.
protoList.extend(['PrototypeList::ID::Last'] * (descriptor.config.maxProtoChainLength - len(protoList)))
prototypeChainString = ', '.join(protoList)
heapSizeOf = 'heap_size_of_raw_self_and_children::<%s>' % descriptor.interface.identifier.name
return """\
DOMClass {
interface_chain: [ %s ],
type_id: %s,
heap_size_of: %s as unsafe fn(_) -> _,
}""" % (prototypeChainString, DOMClassTypeId(descriptor), heapSizeOf)
class CGDOMJSClass(CGThing):
"""
Generate a DOMJSClass for a given descriptor
"""
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
traceHook = 'Some(%s)' % TRACE_HOOK_NAME
if self.descriptor.isGlobal():
assert not self.descriptor.weakReferenceable
traceHook = "Some(js::jsapi::JS_GlobalObjectTraceHook)"
flags = "JSCLASS_IS_GLOBAL | JSCLASS_DOM_GLOBAL"
slots = "JSCLASS_GLOBAL_SLOT_COUNT + 1"
else:
flags = "0"
if self.descriptor.weakReferenceable:
slots = "2"
else:
slots = "1"
return """\
static Class: DOMJSClass = DOMJSClass {
base: js::jsapi::Class {
name: %s as *const u8 as *const libc::c_char,
flags: JSCLASS_IS_DOMJSCLASS | JSCLASS_IMPLEMENTS_BARRIERS | %s |
(((%s) & JSCLASS_RESERVED_SLOTS_MASK) <<
JSCLASS_RESERVED_SLOTS_SHIFT), //JSCLASS_HAS_RESERVED_SLOTS(%s),
addProperty: None,
delProperty: None,
getProperty: None,
setProperty: None,
enumerate: None,
resolve: None,
convert: None,
finalize: Some(%s),
call: None,
hasInstance: None,
construct: None,
trace: %s,
spec: js::jsapi::ClassSpec {
createConstructor: None,
createPrototype: None,
constructorFunctions: 0 as *const js::jsapi::JSFunctionSpec,
constructorProperties: 0 as *const js::jsapi::JSPropertySpec,
prototypeFunctions: 0 as *const js::jsapi::JSFunctionSpec,
prototypeProperties: 0 as *const js::jsapi::JSPropertySpec,
finishInit: None,
flags: 0,
},
ext: js::jsapi::ClassExtension {
outerObject: %s,
innerObject: None,
isWrappedNative: false,
weakmapKeyDelegateOp: None,
objectMovedOp: None,
},
ops: js::jsapi::ObjectOps {
lookupProperty: None,
defineProperty: None,
hasProperty: None,
getProperty: None,
setProperty: None,
getOwnPropertyDescriptor: None,
deleteProperty: None,
watch: None,
unwatch: None,
getElements: None,
enumerate: None,
thisObject: %s,
funToString: None,
},
},
dom_class: %s
};""" % (str_to_const_array(self.descriptor.interface.identifier.name),
flags, slots, slots,
FINALIZE_HOOK_NAME, traceHook,
self.descriptor.outerObjectHook,
self.descriptor.outerObjectHook,
CGGeneric(DOMClass(self.descriptor)).define())
def str_to_const_array(s):
return "b\"%s\\0\"" % s
class CGPrototypeJSClass(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
name = str_to_const_array(self.descriptor.interface.identifier.name + "Prototype")
slotCount = 0
if self.descriptor.hasUnforgeableMembers:
slotCount += 1
return """\
static PrototypeClass: JSClass = JSClass {
name: %(name)s as *const u8 as *const libc::c_char,
flags:
// JSCLASS_HAS_RESERVED_SLOTS(%(slotCount)s)
(%(slotCount)s & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT,
addProperty: None,
delProperty: None,
getProperty: None,
setProperty: None,
enumerate: None,
resolve: None,
convert: None,
finalize: None,
call: None,
hasInstance: None,
construct: None,
trace: None,
reserved: [0 as *mut libc::c_void; 26]
};
""" % {'name': name, 'slotCount': slotCount}
class CGInterfaceObjectJSClass(CGThing):
def __init__(self, descriptor):
assert descriptor.interface.hasInterfaceObject() and not descriptor.interface.isCallback()
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
if self.descriptor.interface.ctor():
constructor = CONSTRUCT_HOOK_NAME
else:
constructor = "throwing_constructor"
args = {
"constructor": constructor,
"hasInstance": HASINSTANCE_HOOK_NAME,
"name": self.descriptor.interface.identifier.name,
}
return """\
static InterfaceObjectClass: NonCallbackInterfaceObjectClass =
NonCallbackInterfaceObjectClass::new(%(constructor)s, %(hasInstance)s,
fun_to_string);
""" % args
class CGList(CGThing):
"""
Generate code for a list of GCThings. Just concatenates them together, with
an optional joiner string. "\n" is a common joiner.
"""
def __init__(self, children, joiner=""):
CGThing.__init__(self)
self.children = children
self.joiner = joiner
def append(self, child):
self.children.append(child)
def prepend(self, child):
self.children.insert(0, child)
def join(self, generator):
return self.joiner.join(filter(lambda s: len(s) > 0, (child for child in generator)))
def define(self):
return self.join(child.define() for child in self.children if child is not None)
class CGIfElseWrapper(CGList):
def __init__(self, condition, ifTrue, ifFalse):
kids = [CGIfWrapper(condition, ifTrue),
CGWrapper(CGIndenter(ifFalse), pre=" else {\n", post="\n}")]
CGList.__init__(self, kids)
class CGGeneric(CGThing):
"""
A class that spits out a fixed string into the codegen. Can spit out a
separate string for the declaration too.
"""
def __init__(self, text):
self.text = text
def define(self):
return self.text
class CGCallbackTempRoot(CGGeneric):
def __init__(self, name):
CGGeneric.__init__(self, "%s::new(${val}.get().to_object())" % name)
def getAllTypes(descriptors, dictionaries, callbacks):
"""
Generate all the types we're dealing with. For each type, a tuple
containing type, descriptor, dictionary is yielded. The
descriptor and dictionary can be None if the type does not come
from a descriptor or dictionary; they will never both be non-None.
"""
for d in descriptors:
for t in getTypesFromDescriptor(d):
yield (t, d, None)
for dictionary in dictionaries:
for t in getTypesFromDictionary(dictionary):
yield (t, None, dictionary)
for callback in callbacks:
for t in getTypesFromCallback(callback):
yield (t, None, None)
def UnionTypes(descriptors, dictionaries, callbacks, config):
"""
Returns a CGList containing CGUnionStructs for every union.
"""
imports = [
'dom::bindings::codegen::PrototypeList',
'dom::bindings::conversions::FromJSValConvertible',
'dom::bindings::conversions::ToJSValConvertible',
'dom::bindings::conversions::ConversionBehavior',
'dom::bindings::conversions::root_from_handlevalue',
'dom::bindings::conversions::StringificationBehavior',
'dom::bindings::error::throw_not_in_union',
'dom::bindings::js::Root',
'dom::bindings::str::USVString',
'dom::types::*',
'js::jsapi::JSContext',
'js::jsapi::{HandleValue, MutableHandleValue}',
'js::jsval::JSVal',
'util::str::DOMString',
]
# Now find all the things we'll need as arguments and return values because
# we need to wrap or unwrap them.
unionStructs = dict()
for (t, descriptor, dictionary) in getAllTypes(descriptors, dictionaries, callbacks):
assert not descriptor or not dictionary
t = t.unroll()
if not t.isUnion():
continue
name = str(t)
if name not in unionStructs:
provider = descriptor or config.getDescriptorProvider()
unionStructs[name] = CGList([
CGUnionStruct(t, provider),
CGUnionConversionStruct(t, provider)
])
# Sort unionStructs by key, retrieve value
unionStructs = (i[1] for i in sorted(unionStructs.items(), key=operator.itemgetter(0)))
return CGImports(CGList(unionStructs, "\n\n"), [], [], imports, ignored_warnings=[])
class Argument():
"""
A class for outputting the type and name of an argument
"""
def __init__(self, argType, name, default=None, mutable=False):
self.argType = argType
self.name = name
self.default = default
self.mutable = mutable
def declare(self):
string = ('mut ' if self.mutable else '') + self.name + ((': ' + self.argType) if self.argType else '')
# XXXjdm Support default arguments somehow :/
# if self.default is not None:
# string += " = " + self.default
return string
def define(self):
return self.argType + ' ' + self.name
class CGAbstractMethod(CGThing):
"""
An abstract class for generating code for a method. Subclasses
should override definition_body to create the actual code.
descriptor is the descriptor for the interface the method is associated with
name is the name of the method as a string
returnType is the IDLType of the return value
args is a list of Argument objects
inline should be True to generate an inline method, whose body is
part of the declaration.
alwaysInline should be True to generate an inline method annotated with
MOZ_ALWAYS_INLINE.
If templateArgs is not None it should be a list of strings containing
template arguments, and the function will be templatized using those
arguments.
docs is None or documentation for the method in a string.
"""
def __init__(self, descriptor, name, returnType, args, inline=False,
alwaysInline=False, extern=False, pub=False, templateArgs=None,
unsafe=False, docs=None):
CGThing.__init__(self)
self.descriptor = descriptor
self.name = name
self.returnType = returnType
self.args = args
self.alwaysInline = alwaysInline
self.extern = extern
self.templateArgs = templateArgs
self.pub = pub
self.unsafe = unsafe
self.docs = docs
def _argstring(self):
return ', '.join([a.declare() for a in self.args])
def _template(self):
if self.templateArgs is None:
return ''
return '<%s>\n' % ', '.join(self.templateArgs)
def _docs(self):
if self.docs is None:
return ''
lines = self.docs.splitlines()
return ''.join('/// %s\n' % line for line in lines)
def _decorators(self):
decorators = []
if self.alwaysInline:
decorators.append('#[inline]')
if self.extern:
decorators.append('unsafe')
decorators.append('extern')
if self.pub:
decorators.append('pub')
if not decorators:
return ''
return ' '.join(decorators) + ' '
def _returnType(self):
return (" -> %s" % self.returnType) if self.returnType != "void" else ""
def define(self):
body = self.definition_body()
# Method will already be marked `unsafe` if `self.extern == True`
if self.unsafe and not self.extern:
body = CGWrapper(CGIndenter(body), pre="unsafe {\n", post="\n}")
return CGWrapper(CGIndenter(body),
pre=self.definition_prologue(),
post=self.definition_epilogue()).define()
def definition_prologue(self):
return "%s%sfn %s%s(%s)%s {\n" % (self._docs(), self._decorators(),
self.name, self._template(),
self._argstring(), self._returnType())
def definition_epilogue(self):
return "\n}\n"
def definition_body(self):
raise NotImplementedError # Override me!
def CreateBindingJSObject(descriptor, parent=None):
create = "let raw = Box::into_raw(object);\nlet _rt = RootedTraceable::new(&*raw);\n"
if descriptor.proxy:
assert not descriptor.isGlobal()
create += """
let handler = RegisterBindings::proxy_handlers[PrototypeList::Proxies::%s as usize];
let private = RootedValue::new(cx, PrivateValue(raw as *const libc::c_void));
let obj = NewProxyObject(cx, handler,
private.handle(),
proto.ptr, %s.get(),
ptr::null_mut(), ptr::null_mut());
assert!(!obj.is_null());
let obj = RootedObject::new(cx, obj);\
""" % (descriptor.name, parent)
elif descriptor.isGlobal():
create += ("let obj = RootedObject::new(\n"
" cx,\n"
" create_dom_global(\n"
" cx,\n"
" &Class.base as *const js::jsapi::Class as *const JSClass,\n"
" raw as *const libc::c_void,\n"
" Some(%s))\n"
");\n"
"assert!(!obj.ptr.is_null());" % TRACE_HOOK_NAME)
else:
create += ("let obj = RootedObject::new(cx, JS_NewObjectWithGivenProto(\n"
" cx, &Class.base as *const js::jsapi::Class as *const JSClass, proto.handle()));\n"
"assert!(!obj.ptr.is_null());\n"
"\n"
"JS_SetReservedSlot(obj.ptr, DOM_OBJECT_SLOT,\n"
" PrivateValue(raw as *const libc::c_void));")
if descriptor.weakReferenceable:
create += """
JS_SetReservedSlot(obj.ptr, DOM_WEAK_SLOT, PrivateValue(ptr::null()));"""
return create
def InitUnforgeablePropertiesOnHolder(descriptor, properties):
"""
Define the unforgeable properties on the unforgeable holder for
the interface represented by descriptor.
properties is a PropertyArrays instance.
"""
unforgeables = []
defineUnforgeableAttrs = "define_properties(cx, unforgeable_holder.handle(), %s).unwrap();"
defineUnforgeableMethods = "define_methods(cx, unforgeable_holder.handle(), %s).unwrap();"
unforgeableMembers = [
(defineUnforgeableAttrs, properties.unforgeable_attrs),
(defineUnforgeableMethods, properties.unforgeable_methods),
]
for template, array in unforgeableMembers:
if array.length() > 0:
unforgeables.append(CGGeneric(template % array.variableName()))
return CGList(unforgeables, "\n")
def CopyUnforgeablePropertiesToInstance(descriptor):
"""
Copy the unforgeable properties from the unforgeable holder for
this interface to the instance object we have.
"""
if not descriptor.hasUnforgeableMembers:
return ""
copyCode = ""
# For proxies, we want to define on the expando object, not directly on the
# reflector, so we can make sure we don't get confused by named getters.
if descriptor.proxy:
copyCode += """\
let expando = RootedObject::new(cx, ensure_expando_object(cx, obj.handle()));
"""
obj = "expando"
else:
obj = "obj"
# We can't do the fast copy for globals, because we can't allocate the
# unforgeable holder for those with the right JSClass. Luckily, there
# aren't too many globals being created.
if descriptor.isGlobal():
copyFunc = "JS_CopyPropertiesFrom"
else:
copyFunc = "JS_InitializePropertiesFromCompatibleNativeObject"
copyCode += """\
let mut unforgeable_holder = RootedObject::new(cx, ptr::null_mut());
unforgeable_holder.handle_mut().set(
JS_GetReservedSlot(proto.ptr, DOM_PROTO_UNFORGEABLE_HOLDER_SLOT).to_object());
assert!(%(copyFunc)s(cx, %(obj)s.handle(), unforgeable_holder.handle()));
""" % {'copyFunc': copyFunc, 'obj': obj}
return copyCode
class CGWrapMethod(CGAbstractMethod):
"""
Class that generates the FooBinding::Wrap function for non-callback
interfaces.
"""
def __init__(self, descriptor):
assert not descriptor.interface.isCallback()
if not descriptor.isGlobal():
args = [Argument('*mut JSContext', 'cx'), Argument('GlobalRef', 'scope'),
Argument("Box<%s>" % descriptor.concreteType, 'object')]
else:
args = [Argument('*mut JSContext', 'cx'),
Argument("Box<%s>" % descriptor.concreteType, 'object')]
retval = 'Root<%s>' % descriptor.concreteType
CGAbstractMethod.__init__(self, descriptor, 'Wrap', retval, args,
pub=True, unsafe=True)
def definition_body(self):
unforgeable = CopyUnforgeablePropertiesToInstance(self.descriptor)
if not self.descriptor.isGlobal():
create = CreateBindingJSObject(self.descriptor, "scope")
return CGGeneric("""\
let _ar = JSAutoRequest::new(cx);
let scope = scope.reflector().get_jsobject();
assert!(!scope.get().is_null());
assert!(((*JS_GetClass(scope.get())).flags & JSCLASS_IS_GLOBAL) != 0);
let mut proto = RootedObject::new(cx, ptr::null_mut());
let _ac = JSAutoCompartment::new(cx, scope.get());
GetProtoObject(cx, scope, scope, proto.handle_mut());
assert!(!proto.ptr.is_null());
%(createObject)s
%(copyUnforgeable)s
(*raw).init_reflector(obj.ptr);
Root::from_ref(&*raw)""" % {'copyUnforgeable': unforgeable, 'createObject': create})
else:
create = CreateBindingJSObject(self.descriptor)
return CGGeneric("""\
let _ar = JSAutoRequest::new(cx);
%(createObject)s
let _ac = JSAutoCompartment::new(cx, obj.ptr);
let mut proto = RootedObject::new(cx, ptr::null_mut());
GetProtoObject(cx, obj.handle(), obj.handle(), proto.handle_mut());
JS_SetPrototype(cx, obj.handle(), proto.handle());
%(copyUnforgeable)s
(*raw).init_reflector(obj.ptr);
let ret = Root::from_ref(&*raw);
RegisterBindings::Register(cx, obj.handle());
ret""" % {'copyUnforgeable': unforgeable, 'createObject': create})
class CGIDLInterface(CGThing):
"""
Class for codegen of an implementation of the IDLInterface trait.
"""
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
interface = self.descriptor.interface
name = self.descriptor.name
if (interface.getUserData("hasConcreteDescendant", False) or
interface.getUserData("hasProxyDescendant", False)):
depth = self.descriptor.prototypeDepth
check = "class.interface_chain[%s] == PrototypeList::ID::%s" % (depth, name)
elif self.descriptor.proxy:
check = "class as *const _ == &Class as *const _"
else:
check = "class as *const _ == &Class.dom_class as *const _"
return """\
impl IDLInterface for %(name)s {
#[inline]
fn derives(class: &'static DOMClass) -> bool {
%(check)s
}
}
impl PartialEq for %(name)s {
fn eq(&self, other: &%(name)s) -> bool {
self as *const %(name)s == &*other
}
}
""" % {'check': check, 'name': name}
class CGAbstractExternMethod(CGAbstractMethod):
"""
Abstract base class for codegen of implementation-only (no
declaration) static methods.
"""
def __init__(self, descriptor, name, returnType, args):
CGAbstractMethod.__init__(self, descriptor, name, returnType, args,
inline=False, extern=True)
class PropertyArrays():
def __init__(self, descriptor):
self.static_methods = MethodDefiner(descriptor, "StaticMethods",
static=True, unforgeable=False)
self.static_attrs = AttrDefiner(descriptor, "StaticAttributes",
static=True, unforgeable=False)
self.methods = MethodDefiner(descriptor, "Methods", static=False, unforgeable=False)
self.unforgeable_methods = MethodDefiner(descriptor, "UnforgeableMethods",
static=False, unforgeable=True)
self.attrs = AttrDefiner(descriptor, "Attributes", static=False, unforgeable=False)
self.unforgeable_attrs = AttrDefiner(descriptor, "UnforgeableAttributes",
static=False, unforgeable=True)
self.consts = ConstDefiner(descriptor, "Constants")
pass
@staticmethod
def arrayNames():
return [
"static_methods",
"static_attrs",
"methods",
"unforgeable_methods",
"attrs",
"unforgeable_attrs",
"consts",
]
def variableNames(self):
names = {}
for array in self.arrayNames():
names[array] = getattr(self, array).variableName()
return names
def __str__(self):
define = ""
for array in self.arrayNames():
define += str(getattr(self, array))
return define
class CGCreateInterfaceObjectsMethod(CGAbstractMethod):
"""
Generate the CreateInterfaceObjects method for an interface descriptor.
properties should be a PropertyArrays instance.
"""
def __init__(self, descriptor, properties):
args = [Argument('*mut JSContext', 'cx')]
if not descriptor.interface.isCallback():
args += [Argument('HandleObject', 'global'),
Argument('*mut ProtoOrIfaceArray', 'cache')]
args.append(Argument('HandleObject', 'receiver'))
CGAbstractMethod.__init__(self, descriptor, 'CreateInterfaceObjects', 'void', args,
unsafe=True)
self.properties = properties
def definition_body(self):
name = self.descriptor.interface.identifier.name
if self.descriptor.interface.isCallback():
assert not self.descriptor.interface.ctor() and self.descriptor.interface.hasConstants()
return CGGeneric("""\
create_callback_interface_object(cx, receiver, sConstants, %s);""" % str_to_const_array(name))
protoChain = self.descriptor.prototypeChain
if len(protoChain) == 1:
getPrototypeProto = "prototype_proto.ptr = JS_GetObjectPrototype(cx, global)"
else:
getPrototypeProto = ("%s::GetProtoObject(cx, global, receiver, prototype_proto.handle_mut())" %
toBindingNamespace(self.descriptor.prototypeChain[-2]))
code = [CGGeneric("""\
let mut prototype_proto = RootedObject::new(cx, ptr::null_mut());
%s;
assert!(!prototype_proto.ptr.is_null());""" % getPrototypeProto)]
properties = {"id": name}
for arrayName in self.properties.arrayNames():
array = getattr(self.properties, arrayName)
if arrayName == "consts":
if array.length():
properties[arrayName] = array.variableName()
else:
properties[arrayName] = "&[]"
elif array.length():
properties[arrayName] = "Some(%s)" % array.variableName()
else:
properties[arrayName] = "None"
code.append(CGGeneric("""
let mut prototype = RootedObject::new(cx, ptr::null_mut());
create_interface_prototype_object(cx,
prototype_proto.handle(),
&PrototypeClass,
%(methods)s,
%(attrs)s,
%(consts)s,
prototype.handle_mut());
assert!(!prototype.ptr.is_null());
(*cache)[PrototypeList::ID::%(id)s as usize] = prototype.ptr;
if <*mut JSObject>::needs_post_barrier(prototype.ptr) {
<*mut JSObject>::post_barrier((*cache).as_mut_ptr().offset(PrototypeList::ID::%(id)s as isize));
}""" % properties))
if self.descriptor.interface.hasInterfaceObject():
properties["name"] = str_to_const_array(name)
if self.descriptor.interface.ctor():
properties["constructor"] = CONSTRUCT_HOOK_NAME
properties["length"] = methodLength(self.descriptor.interface.ctor())
else:
properties["constructor"] = "throwing_constructor"
properties["length"] = 0
if self.descriptor.interface.parent:
parentName = toBindingNamespace(self.descriptor.getParentName())
code.append(CGGeneric("""
let mut interface_proto = RootedObject::new(cx, ptr::null_mut());
%s::GetConstructorObject(cx, global, receiver, interface_proto.handle_mut());""" % parentName))
else:
code.append(CGGeneric("""
let interface_proto = RootedObject::new(cx, JS_GetFunctionPrototype(cx, global));"""))
code.append(CGGeneric("""\
assert!(!interface_proto.ptr.is_null());
let mut interface = RootedObject::new(cx, ptr::null_mut());
create_noncallback_interface_object(cx,
receiver,
interface_proto.handle(),
&InterfaceObjectClass,
%(static_methods)s,
%(static_attrs)s,
%(consts)s,
prototype.handle(),
%(name)s,
%(length)s,
interface.handle_mut());
assert!(!interface.ptr.is_null());""" % properties))
if self.descriptor.hasDescendants():
code.append(CGGeneric("""\
(*cache)[PrototypeList::Constructor::%(id)s as usize] = interface.ptr;
if <*mut JSObject>::needs_post_barrier(prototype.ptr) {
<*mut JSObject>::post_barrier((*cache).as_mut_ptr().offset(PrototypeList::Constructor::%(id)s as isize));
}""" % properties))
constructors = self.descriptor.interface.namedConstructors
if constructors:
decl = "let named_constructors: [(NonNullJSNative, &'static [u8], u32); %d]" % len(constructors)
specs = []
for constructor in constructors:
hook = CONSTRUCT_HOOK_NAME + "_" + constructor.identifier.name
name = str_to_const_array(constructor.identifier.name)
length = methodLength(constructor)
specs.append(CGGeneric("(%s as NonNullJSNative, %s, %d)" % (hook, name, length)))
values = CGIndenter(CGList(specs, "\n"), 4)
code.append(CGWrapper(values, pre="%s = [\n" % decl, post="\n];"))
code.append(CGGeneric("create_named_constructors(cx, receiver, &named_constructors, prototype.handle());"))
if self.descriptor.hasUnforgeableMembers:
# We want to use the same JSClass and prototype as the object we'll
# end up defining the unforgeable properties on in the end, so that
# we can use JS_InitializePropertiesFromCompatibleNativeObject to do
# a fast copy. In the case of proxies that's null, because the
# expando object is a vanilla object, but in the case of other DOM
# objects it's whatever our class is.
#
# Also, for a global we can't use the global's class; just use
# nullpr and when we do the copy off the holder we'll take a slower
# path. This also means that we don't need to worry about matching
# the prototype.
if self.descriptor.proxy or self.descriptor.isGlobal():
holderClass = "ptr::null()"
holderProto = "HandleObject::null()"
else:
holderClass = "&Class.base as *const js::jsapi::Class as *const JSClass"
holderProto = "prototype.handle()"
code.append(CGGeneric("""
let mut unforgeable_holder = RootedObject::new(cx, ptr::null_mut());
unforgeable_holder.handle_mut().set(
JS_NewObjectWithoutMetadata(cx, %(holderClass)s, %(holderProto)s));
assert!(!unforgeable_holder.ptr.is_null());
""" % {'holderClass': holderClass, 'holderProto': holderProto}))
code.append(InitUnforgeablePropertiesOnHolder(self.descriptor, self.properties))
code.append(CGGeneric("""\
JS_SetReservedSlot(prototype.ptr, DOM_PROTO_UNFORGEABLE_HOLDER_SLOT,
ObjectValue(&*unforgeable_holder.ptr))"""))
return CGList(code, "\n")
class CGGetPerInterfaceObject(CGAbstractMethod):
"""
A method for getting a per-interface object (a prototype object or interface
constructor object).
"""
def __init__(self, descriptor, name, idPrefix="", pub=False):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'global'),
Argument('HandleObject', 'receiver'),
Argument('MutableHandleObject', 'rval')]
CGAbstractMethod.__init__(self, descriptor, name,
'void', args, pub=pub, unsafe=True)
self.id = idPrefix + "::" + self.descriptor.name
def definition_body(self):
return CGGeneric("""
/* global and receiver are usually the same, but they can be different
too. For example a sandbox often has an xray wrapper for a window as the
prototype of the sandbox's global. In that case receiver is the xray
wrapper and global is the sandbox's global.
*/
assert!(((*JS_GetClass(global.get())).flags & JSCLASS_DOM_GLOBAL) != 0);
/* Check to see whether the interface objects are already installed */
let proto_or_iface_array = get_proto_or_iface_array(global.get());
rval.set((*proto_or_iface_array)[%(id)s as usize]);
if !rval.get().is_null() {
return;
}
CreateInterfaceObjects(cx, global, proto_or_iface_array, receiver);
rval.set((*proto_or_iface_array)[%(id)s as usize]);
assert!(!rval.get().is_null());
""" % {"id": self.id})
class CGGetProtoObjectMethod(CGGetPerInterfaceObject):
"""
A method for getting the interface prototype object.
"""
def __init__(self, descriptor):
CGGetPerInterfaceObject.__init__(self, descriptor, "GetProtoObject",
"PrototypeList::ID", pub=descriptor.hasDescendants())
def definition_body(self):
return CGList([
CGGeneric("""\
/* Get the interface prototype object for this class. This will create the
object as needed. */"""),
CGGetPerInterfaceObject.definition_body(self),
])
class CGGetConstructorObjectMethod(CGGetPerInterfaceObject):
"""
A method for getting the interface constructor object.
"""
def __init__(self, descriptor):
CGGetPerInterfaceObject.__init__(self, descriptor, "GetConstructorObject",
"PrototypeList::Constructor",
pub=descriptor.hasDescendants())
def definition_body(self):
return CGList([
CGGeneric("""\
/* Get the interface object for this class. This will create the object as
needed. */"""),
CGGetPerInterfaceObject.definition_body(self),
])
class CGDefineProxyHandler(CGAbstractMethod):
"""
A method to create and cache the proxy trap for a given interface.
"""
def __init__(self, descriptor):
assert descriptor.proxy
CGAbstractMethod.__init__(self, descriptor, 'DefineProxyHandler',
'*const libc::c_void', [],
pub=True, unsafe=True)
def define(self):
return CGAbstractMethod.define(self)
def definition_body(self):
customDefineProperty = 'proxyhandler::define_property'
if self.descriptor.operations['IndexedSetter'] or self.descriptor.operations['NamedSetter']:
customDefineProperty = 'defineProperty'
customDelete = 'proxyhandler::delete'
if self.descriptor.operations['NamedDeleter']:
customDelete = 'delete'
body = """\
let traps = ProxyTraps {
enter: None,
getOwnPropertyDescriptor: Some(getOwnPropertyDescriptor),
defineProperty: Some(%s),
ownPropertyKeys: Some(own_property_keys),
delete_: Some(%s),
enumerate: None,
preventExtensions: Some(proxyhandler::prevent_extensions),
isExtensible: Some(proxyhandler::is_extensible),
has: None,
get: Some(get),
set: None,
call: None,
construct: None,
getPropertyDescriptor: Some(get_property_descriptor),
hasOwn: Some(hasOwn),
getOwnEnumerablePropertyKeys: None,
nativeCall: None,
hasInstance: None,
objectClassIs: None,
className: Some(className),
fun_toString: None,
boxedValue_unbox: None,
defaultValue: None,
trace: Some(%s),
finalize: Some(%s),
objectMoved: None,
isCallable: None,
isConstructor: None,
};
CreateProxyHandler(&traps, &Class as *const _ as *const _)\
""" % (customDefineProperty, customDelete, TRACE_HOOK_NAME, FINALIZE_HOOK_NAME)
return CGGeneric(body)
class CGDefineDOMInterfaceMethod(CGAbstractMethod):
"""
A method for resolve hooks to try to lazily define the interface object for
a given interface.
"""
def __init__(self, descriptor):
assert descriptor.interface.hasInterfaceObject()
args = [
Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'global'),
]
CGAbstractMethod.__init__(self, descriptor, 'DefineDOMInterface', 'void', args, pub=True)
def define(self):
return CGAbstractMethod.define(self)
def definition_body(self):
if self.descriptor.interface.isCallback():
code = "CreateInterfaceObjects(cx, global);"
else:
code = """\
let mut proto = RootedObject::new(cx, ptr::null_mut());
GetProtoObject(cx, global, global, proto.handle_mut());
assert!(!proto.ptr.is_null());
"""
return CGGeneric("assert!(!global.get().is_null());\n" + code)
def needCx(returnType, arguments, considerTypes):
return (considerTypes and
(typeNeedsCx(returnType, True) or
any(typeNeedsCx(a.type) for a in arguments)))
class CGCallGenerator(CGThing):
"""
A class to generate an actual call to a C++ object. Assumes that the C++
object is stored in a variable whose name is given by the |object| argument.
errorResult should be a string for the value to return in case of an
exception from the native code, or None if no error reporting is needed.
"""
def __init__(self, errorResult, arguments, argsPre, returnType,
extendedAttributes, descriptorProvider, nativeMethodName,
static, object="this"):
CGThing.__init__(self)
assert errorResult is None or isinstance(errorResult, str)
isFallible = errorResult is not None
result = getRetvalDeclarationForType(returnType, descriptorProvider)
if isFallible:
result = CGWrapper(result, pre="Result<", post=", Error>")
args = CGList([CGGeneric(arg) for arg in argsPre], ", ")
for (a, name) in arguments:
# XXXjdm Perhaps we should pass all nontrivial types by borrowed pointer
if a.type.isDictionary():
name = "&" + name
args.append(CGGeneric(name))
needsCx = needCx(returnType, (a for (a, _) in arguments), True)
if "cx" not in argsPre and needsCx:
args.prepend(CGGeneric("cx"))
# Build up our actual call
self.cgRoot = CGList([], "\n")
call = CGGeneric(nativeMethodName)
if static:
call = CGWrapper(call, pre="%s::" % descriptorProvider.interface.identifier.name)
else:
call = CGWrapper(call, pre="%s." % object)
call = CGList([call, CGWrapper(args, pre="(", post=")")])
self.cgRoot.append(CGList([
CGGeneric("let result: "),
result,
CGGeneric(" = "),
call,
CGGeneric(";"),
]))
if isFallible:
if static:
glob = ""
else:
glob = " let global = global_root_from_reflector(this);\n"
self.cgRoot.append(CGGeneric(
"let result = match result {\n"
" Ok(result) => result,\n"
" Err(e) => {\n"
"%s"
" throw_dom_exception(cx, global.r(), e);\n"
" return%s;\n"
" },\n"
"};" % (glob, errorResult)))
def define(self):
return self.cgRoot.define()
class CGPerSignatureCall(CGThing):
"""
This class handles the guts of generating code for a particular
call signature. A call signature consists of four things:
1) A return type, which can be None to indicate that there is no
actual return value (e.g. this is an attribute setter) or an
IDLType if there's an IDL type involved (including |void|).
2) An argument list, which is allowed to be empty.
3) A name of a native method to call.
4) Whether or not this method is static.
We also need to know whether this is a method or a getter/setter
to do error reporting correctly.
The idlNode parameter can be either a method or an attr. We can query
|idlNode.identifier| in both cases, so we can be agnostic between the two.
"""
# XXXbz For now each entry in the argument list is either an
# IDLArgument or a FakeArgument, but longer-term we may want to
# have ways of flagging things like JSContext* or optional_argc in
# there.
def __init__(self, returnType, argsPre, arguments, nativeMethodName, static,
descriptor, idlNode, argConversionStartsAt=0,
getter=False, setter=False):
CGThing.__init__(self)
self.returnType = returnType
self.descriptor = descriptor
self.idlNode = idlNode
self.extendedAttributes = descriptor.getExtendedAttributes(idlNode,
getter=getter,
setter=setter)
self.argsPre = argsPre
self.arguments = arguments
self.argCount = len(arguments)
cgThings = []
cgThings.extend([CGArgumentConverter(arguments[i], i, self.getArgs(),
self.getArgc(), self.descriptor,
invalidEnumValueFatal=not setter) for
i in range(argConversionStartsAt, self.argCount)])
errorResult = None
if self.isFallible():
errorResult = " false"
cgThings.append(CGCallGenerator(
errorResult,
self.getArguments(), self.argsPre, returnType,
self.extendedAttributes, descriptor, nativeMethodName,
static))
self.cgRoot = CGList(cgThings, "\n")
def getArgs(self):
return "args" if self.argCount > 0 else ""
def getArgc(self):
return "argc"
def getArguments(self):
def process(arg, i):
argVal = "arg" + str(i)
if arg.type.isGeckoInterface() and not arg.type.unroll().inner.isCallback():
argVal += ".r()"
return argVal
return [(a, process(a, i)) for (i, a) in enumerate(self.arguments)]
def isFallible(self):
return 'infallible' not in self.extendedAttributes
def wrap_return_value(self):
return wrapForType('args.rval()')
def define(self):
return (self.cgRoot.define() + "\n" + self.wrap_return_value())
class CGSwitch(CGList):
"""
A class to generate code for a switch statement.
Takes three constructor arguments: an expression, a list of cases,
and an optional default.
Each case is a CGCase. The default is a CGThing for the body of
the default case, if any.
"""
def __init__(self, expression, cases, default=None):
CGList.__init__(self, [CGIndenter(c) for c in cases], "\n")
self.prepend(CGWrapper(CGGeneric(expression),
pre="match ", post=" {"))
if default is not None:
self.append(
CGIndenter(
CGWrapper(
CGIndenter(default),
pre="_ => {\n",
post="\n}"
)
)
)
self.append(CGGeneric("}"))
class CGCase(CGList):
"""
A class to generate code for a case statement.
Takes three constructor arguments: an expression, a CGThing for
the body (allowed to be None if there is no body), and an optional
argument (defaulting to False) for whether to fall through.
"""
def __init__(self, expression, body, fallThrough=False):
CGList.__init__(self, [], "\n")
self.append(CGWrapper(CGGeneric(expression), post=" => {"))
bodyList = CGList([body], "\n")
if fallThrough:
raise TypeError("fall through required but unsupported")
# bodyList.append(CGGeneric('panic!("fall through unsupported"); /* Fall through */'))
self.append(CGIndenter(bodyList))
self.append(CGGeneric("}"))
class CGGetterCall(CGPerSignatureCall):
"""
A class to generate a native object getter call for a particular IDL
getter.
"""
def __init__(self, argsPre, returnType, nativeMethodName, descriptor, attr):
CGPerSignatureCall.__init__(self, returnType, argsPre, [],
nativeMethodName, attr.isStatic(), descriptor,
attr, getter=True)
class FakeArgument():
"""
A class that quacks like an IDLArgument. This is used to make
setters look like method calls or for special operations.
"""
def __init__(self, type, interfaceMember, allowTreatNonObjectAsNull=False):
self.type = type
self.optional = False
self.variadic = False
self.defaultValue = None
self._allowTreatNonObjectAsNull = allowTreatNonObjectAsNull
self.treatNullAs = interfaceMember.treatNullAs
self.enforceRange = False
self.clamp = False
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonObjectAsNull
class CGSetterCall(CGPerSignatureCall):
"""
A class to generate a native object setter call for a particular IDL
setter.
"""
def __init__(self, argsPre, argType, nativeMethodName, descriptor, attr):
CGPerSignatureCall.__init__(self, None, argsPre,
[FakeArgument(argType, attr, allowTreatNonObjectAsNull=True)],
nativeMethodName, attr.isStatic(), descriptor, attr,
setter=True)
def wrap_return_value(self):
# We have no return value
return "\nreturn true;"
def getArgc(self):
return "1"
class CGAbstractStaticBindingMethod(CGAbstractMethod):
"""
Common class to generate the JSNatives for all our static methods, getters
and setters. This will generate the function declaration and unwrap the
global object. Subclasses are expected to override the generate_code
function to do the rest of the work. This function should return a
CGThing which is already properly indented.
"""
def __init__(self, descriptor, name):
args = [
Argument('*mut JSContext', 'cx'),
Argument('libc::c_uint', 'argc'),
Argument('*mut JSVal', 'vp'),
]
CGAbstractMethod.__init__(self, descriptor, name, "bool", args, extern=True)
def definition_body(self):
preamble = CGGeneric("""\
let global = global_root_from_object(JS_CALLEE(cx, vp).to_object());
""")
return CGList([preamble, self.generate_code()])
def generate_code(self):
raise NotImplementedError # Override me!
class CGSpecializedMethod(CGAbstractExternMethod):
"""
A class for generating the C++ code for a specialized method that the JIT
can call with lower overhead.
"""
def __init__(self, descriptor, method):
self.method = method
name = method.identifier.name
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', '_obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('*const JSJitMethodCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, 'bool', args)
def definition_body(self):
nativeName = CGSpecializedMethod.makeNativeName(self.descriptor,
self.method)
return CGWrapper(CGMethodCall([], nativeName, self.method.isStatic(),
self.descriptor, self.method),
pre="let this = &*this;\n"
"let args = &*args;\n"
"let argc = args._base.argc_;\n")
@staticmethod
def makeNativeName(descriptor, method):
name = method.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
return MakeNativeName(nativeName)
class CGStaticMethod(CGAbstractStaticBindingMethod):
"""
A class for generating the Rust code for an IDL static method.
"""
def __init__(self, descriptor, method):
self.method = method
name = method.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedMethod.makeNativeName(self.descriptor,
self.method)
setupArgs = CGGeneric("let args = CallArgs::from_vp(vp, argc);\n")
call = CGMethodCall(["global.r()"], nativeName, True, self.descriptor, self.method)
return CGList([setupArgs, call])
class CGSpecializedGetter(CGAbstractExternMethod):
"""
A class for generating the code for a specialized attribute getter
that the JIT can call with lower overhead.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'get_' + descriptor.internalNameFor(attr.identifier.name)
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', '_obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('JSJitGetterCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, "bool", args)
def definition_body(self):
nativeName = CGSpecializedGetter.makeNativeName(self.descriptor,
self.attr)
return CGWrapper(CGGetterCall([], self.attr.type, nativeName,
self.descriptor, self.attr),
pre="let this = &*this;\n")
@staticmethod
def makeNativeName(descriptor, attr):
name = attr.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
nativeName = MakeNativeName(nativeName)
infallible = ('infallible' in
descriptor.getExtendedAttributes(attr, getter=True))
if attr.type.nullable() or not infallible:
return "Get" + nativeName
return nativeName
class CGStaticGetter(CGAbstractStaticBindingMethod):
"""
A class for generating the C++ code for an IDL static attribute getter.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'get_' + attr.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedGetter.makeNativeName(self.descriptor,
self.attr)
setupArgs = CGGeneric("let args = CallArgs::from_vp(vp, argc);\n")
call = CGGetterCall(["global.r()"], self.attr.type, nativeName, self.descriptor,
self.attr)
return CGList([setupArgs, call])
class CGSpecializedSetter(CGAbstractExternMethod):
"""
A class for generating the code for a specialized attribute setter
that the JIT can call with lower overhead.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'set_' + descriptor.internalNameFor(attr.identifier.name)
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'obj'),
Argument('*const %s' % descriptor.concreteType, 'this'),
Argument('JSJitSetterCallArgs', 'args')]
CGAbstractExternMethod.__init__(self, descriptor, name, "bool", args)
def definition_body(self):
nativeName = CGSpecializedSetter.makeNativeName(self.descriptor,
self.attr)
return CGWrapper(CGSetterCall([], self.attr.type, nativeName,
self.descriptor, self.attr),
pre="let this = &*this;\n")
@staticmethod
def makeNativeName(descriptor, attr):
name = attr.identifier.name
nativeName = descriptor.binaryNameFor(name)
if nativeName == name:
nativeName = descriptor.internalNameFor(name)
return "Set" + MakeNativeName(nativeName)
class CGStaticSetter(CGAbstractStaticBindingMethod):
"""
A class for generating the C++ code for an IDL static attribute setter.
"""
def __init__(self, descriptor, attr):
self.attr = attr
name = 'set_' + attr.identifier.name
CGAbstractStaticBindingMethod.__init__(self, descriptor, name)
def generate_code(self):
nativeName = CGSpecializedSetter.makeNativeName(self.descriptor,
self.attr)
checkForArg = CGGeneric(
"let args = CallArgs::from_vp(vp, argc);\n"
"if argc == 0 {\n"
" throw_type_error(cx, \"Not enough arguments to %s setter.\");\n"
" return false;\n"
"}" % self.attr.identifier.name)
call = CGSetterCall(["global.r()"], self.attr.type, nativeName, self.descriptor,
self.attr)
return CGList([checkForArg, call])
class CGSpecializedForwardingSetter(CGSpecializedSetter):
"""
A class for generating the code for an IDL attribute forwarding setter.
"""
def __init__(self, descriptor, attr):
CGSpecializedSetter.__init__(self, descriptor, attr)
def definition_body(self):
attrName = self.attr.identifier.name
forwardToAttrName = self.attr.getExtendedAttribute("PutForwards")[0]
# JS_GetProperty and JS_SetProperty can only deal with ASCII
assert all(ord(c) < 128 for c in attrName)
assert all(ord(c) < 128 for c in forwardToAttrName)
return CGGeneric("""\
let mut v = RootedValue::new(cx, UndefinedValue());
if !JS_GetProperty(cx, obj, %s as *const u8 as *const libc::c_char, v.handle_mut()) {
return false;
}
if !v.ptr.is_object() {
throw_type_error(cx, "Value.%s is not an object.");
return false;
}
let target_obj = RootedObject::new(cx, v.ptr.to_object());
JS_SetProperty(cx, target_obj.handle(), %s as *const u8 as *const libc::c_char, args.get(0))
""" % (str_to_const_array(attrName), attrName, str_to_const_array(forwardToAttrName)))
class CGMemberJITInfo(CGThing):
"""
A class for generating the JITInfo for a property that points to
our specialized getter and setter.
"""
def __init__(self, descriptor, member):
self.member = member
self.descriptor = descriptor
def defineJitInfo(self, infoName, opName, opType, infallible, movable,
aliasSet, alwaysInSlot, lazilyInSlot, slotIndex,
returnTypes, args):
"""
aliasSet is a JSJitInfo::AliasSet value, without the "JSJitInfo::" bit.
args is None if we don't want to output argTypes for some
reason (e.g. we have overloads or we're not a method) and
otherwise an iterable of the arguments for this method.
"""
assert not movable or aliasSet != "AliasEverything" # Can't move write-aliasing things
assert not alwaysInSlot or movable # Things always in slots had better be movable
def jitInfoInitializer(isTypedMethod):
initializer = fill(
"""
JSJitInfo {
call: ${opName} as *const ::libc::c_void,
protoID: PrototypeList::ID::${name} as u16,
depth: ${depth},
_bitfield_1:
JSJitInfo::new_bitfield_1(
OpType::${opType} as u8,
AliasSet::${aliasSet} as u8,
JSValueType::${returnType} as u8,
${isInfallible},
${isMovable},
${isAlwaysInSlot},
${isLazilyCachedInSlot},
${isTypedMethod},
${slotIndex} as u16,
)
}
""",
opName=opName,
name=self.descriptor.name,
depth=self.descriptor.interface.inheritanceDepth(),
opType=opType,
aliasSet=aliasSet,
returnType=reduce(CGMemberJITInfo.getSingleReturnType, returnTypes,
""),
isInfallible=toStringBool(infallible),
isMovable=toStringBool(movable),
isAlwaysInSlot=toStringBool(alwaysInSlot),
isLazilyCachedInSlot=toStringBool(lazilyInSlot),
isTypedMethod=toStringBool(isTypedMethod),
slotIndex=slotIndex)
return initializer.rstrip()
if args is not None:
argTypes = "%s_argTypes" % infoName
args = [CGMemberJITInfo.getJSArgType(arg.type) for arg in args]
args.append("ArgType::ArgTypeListEnd as i32")
argTypesDecl = (
"const %s: [i32; %d] = [ %s ];\n" %
(argTypes, len(args), ", ".join(args)))
return fill(
"""
$*{argTypesDecl}
const ${infoName}: JSTypedMethodJitInfo = JSTypedMethodJitInfo {
base: ${jitInfo},
argTypes: &${argTypes} as *const _ as *const ArgType,
};
""",
argTypesDecl=argTypesDecl,
infoName=infoName,
jitInfo=indent(jitInfoInitializer(True)),
argTypes=argTypes)
return ("\n"
"const %s: JSJitInfo = %s;\n"
% (infoName, jitInfoInitializer(False)))
def define(self):
if self.member.isAttr():
internalMemberName = self.descriptor.internalNameFor(self.member.identifier.name)
getterinfo = ("%s_getterinfo" % internalMemberName)
getter = ("get_%s" % internalMemberName)
getterinfal = "infallible" in self.descriptor.getExtendedAttributes(self.member, getter=True)
movable = self.mayBeMovable() and getterinfal
aliasSet = self.aliasSet()
isAlwaysInSlot = self.member.getExtendedAttribute("StoreInSlot")
if self.member.slotIndex is not None:
assert isAlwaysInSlot or self.member.getExtendedAttribute("Cached")
isLazilyCachedInSlot = not isAlwaysInSlot
slotIndex = memberReservedSlot(self.member) # noqa:FIXME: memberReservedSlot is not defined
# We'll statically assert that this is not too big in
# CGUpdateMemberSlotsMethod, in the case when
# isAlwaysInSlot is true.
else:
isLazilyCachedInSlot = False
slotIndex = "0"
result = self.defineJitInfo(getterinfo, getter, "Getter",
getterinfal, movable, aliasSet,
isAlwaysInSlot, isLazilyCachedInSlot,
slotIndex,
[self.member.type], None)
if (not self.member.readonly or self.member.getExtendedAttribute("PutForwards")):
setterinfo = ("%s_setterinfo" % internalMemberName)
setter = ("set_%s" % internalMemberName)
# Setters are always fallible, since they have to do a typed unwrap.
result += self.defineJitInfo(setterinfo, setter, "Setter",
False, False, "AliasEverything",
False, False, "0",
[BuiltinTypes[IDLBuiltinType.Types.void]],
None)
return result
if self.member.isMethod():
methodinfo = ("%s_methodinfo" % self.member.identifier.name)
method = ("%s" % self.member.identifier.name)
# Methods are infallible if they are infallible, have no arguments
# to unwrap, and have a return type that's infallible to wrap up for
# return.
sigs = self.member.signatures()
if len(sigs) != 1:
# Don't handle overloading. If there's more than one signature,
# one of them must take arguments.
methodInfal = False
args = None
movable = False
else:
sig = sigs[0]
# For methods that affect nothing, it's OK to set movable to our
# notion of infallible on the C++ side, without considering
# argument conversions, since argument conversions that can
# reliably throw would be effectful anyway and the jit doesn't
# move effectful things.
hasInfallibleImpl = "infallible" in self.descriptor.getExtendedAttributes(self.member)
movable = self.mayBeMovable() and hasInfallibleImpl
# XXXbz can we move the smarts about fallibility due to arg
# conversions into the JIT, using our new args stuff?
if (len(sig[1]) != 0):
# We have arguments or our return-value boxing can fail
methodInfal = False
else:
methodInfal = hasInfallibleImpl
# For now, only bother to output args if we're side-effect-free.
if self.member.affects == "Nothing":
args = sig[1]
else:
args = None
aliasSet = self.aliasSet()
result = self.defineJitInfo(methodinfo, method, "Method",
methodInfal, movable, aliasSet,
False, False, "0",
[s[0] for s in sigs], args)
return result
raise TypeError("Illegal member type to CGPropertyJITInfo")
def mayBeMovable(self):
"""
Returns whether this attribute or method may be movable, just
based on Affects/DependsOn annotations.
"""
affects = self.member.affects
dependsOn = self.member.dependsOn
assert affects in IDLInterfaceMember.AffectsValues
assert dependsOn in IDLInterfaceMember.DependsOnValues
# Things that are DependsOn=DeviceState are not movable, because we
# don't want them coalesced with each other or loop-hoisted, since
# their return value can change even if nothing is going on from our
# point of view.
return (affects == "Nothing" and
(dependsOn != "Everything" and dependsOn != "DeviceState"))
def aliasSet(self):
"""Returns the alias set to store in the jitinfo. This may not be the
effective alias set the JIT uses, depending on whether we have enough
information about our args to allow the JIT to prove that effectful
argument conversions won't happen.
"""
dependsOn = self.member.dependsOn
assert dependsOn in IDLInterfaceMember.DependsOnValues
if dependsOn == "Nothing" or dependsOn == "DeviceState":
assert self.member.affects == "Nothing"
return "AliasNone"
if dependsOn == "DOMState":
assert self.member.affects == "Nothing"
return "AliasDOMSets"
return "AliasEverything"
@staticmethod
def getJSReturnTypeTag(t):
if t.nullable():
# Sometimes it might return null, sometimes not
return "JSVAL_TYPE_UNKNOWN"
if t.isVoid():
# No return, every time
return "JSVAL_TYPE_UNDEFINED"
if t.isArray():
# No idea yet
assert False
if t.isSequence():
return "JSVAL_TYPE_OBJECT"
if t.isMozMap():
return "JSVAL_TYPE_OBJECT"
if t.isGeckoInterface():
return "JSVAL_TYPE_OBJECT"
if t.isString():
return "JSVAL_TYPE_STRING"
if t.isEnum():
return "JSVAL_TYPE_STRING"
if t.isCallback():
return "JSVAL_TYPE_OBJECT"
if t.isAny():
# The whole point is to return various stuff
return "JSVAL_TYPE_UNKNOWN"
if t.isObject():
return "JSVAL_TYPE_OBJECT"
if t.isSpiderMonkeyInterface():
return "JSVAL_TYPE_OBJECT"
if t.isUnion():
u = t.unroll()
if u.hasNullableType:
# Might be null or not
return "JSVAL_TYPE_UNKNOWN"
return reduce(CGMemberJITInfo.getSingleReturnType,
u.flatMemberTypes, "")
if t.isDictionary():
return "JSVAL_TYPE_OBJECT"
if t.isDate():
return "JSVAL_TYPE_OBJECT"
if not t.isPrimitive():
raise TypeError("No idea what type " + str(t) + " is.")
tag = t.tag()
if tag == IDLType.Tags.bool:
return "JSVAL_TYPE_BOOLEAN"
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32]:
return "JSVAL_TYPE_INT32"
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
# These all use JS_NumberValue, which can return int or double.
# But TI treats "double" as meaning "int or double", so we're
# good to return JSVAL_TYPE_DOUBLE here.
return "JSVAL_TYPE_DOUBLE"
if tag != IDLType.Tags.uint32:
raise TypeError("No idea what type " + str(t) + " is.")
# uint32 is sometimes int and sometimes double.
return "JSVAL_TYPE_DOUBLE"
@staticmethod
def getSingleReturnType(existingType, t):
type = CGMemberJITInfo.getJSReturnTypeTag(t)
if existingType == "":
# First element of the list; just return its type
return type
if type == existingType:
return existingType
if ((type == "JSVAL_TYPE_DOUBLE" and
existingType == "JSVAL_TYPE_INT32") or
(existingType == "JSVAL_TYPE_DOUBLE" and
type == "JSVAL_TYPE_INT32")):
# Promote INT32 to DOUBLE as needed
return "JSVAL_TYPE_DOUBLE"
# Different types
return "JSVAL_TYPE_UNKNOWN"
@staticmethod
def getJSArgType(t):
assert not t.isVoid()
if t.nullable():
# Sometimes it might return null, sometimes not
return "ArgType::Null as i32 | %s" % CGMemberJITInfo.getJSArgType(t.inner)
if t.isArray():
# No idea yet
assert False
if t.isSequence():
return "ArgType::Object as i32"
if t.isGeckoInterface():
return "ArgType::Object as i32"
if t.isString():
return "ArgType::String as i32"
if t.isEnum():
return "ArgType::String as i32"
if t.isCallback():
return "ArgType::Object as i32"
if t.isAny():
# The whole point is to return various stuff
return "ArgType::Any as i32"
if t.isObject():
return "ArgType::Object as i32"
if t.isSpiderMonkeyInterface():
return "ArgType::Object as i32"
if t.isUnion():
u = t.unroll()
type = "JSJitInfo::Null as i32" if u.hasNullableType else ""
return reduce(CGMemberJITInfo.getSingleArgType,
u.flatMemberTypes, type)
if t.isDictionary():
return "ArgType::Object as i32"
if t.isDate():
return "ArgType::Object as i32"
if not t.isPrimitive():
raise TypeError("No idea what type " + str(t) + " is.")
tag = t.tag()
if tag == IDLType.Tags.bool:
return "ArgType::Boolean as i32"
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32]:
return "ArgType::Integer as i32"
if tag in [IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
# These all use JS_NumberValue, which can return int or double.
# But TI treats "double" as meaning "int or double", so we're
# good to return JSVAL_TYPE_DOUBLE here.
return "ArgType::Double as i32"
if tag != IDLType.Tags.uint32:
raise TypeError("No idea what type " + str(t) + " is.")
# uint32 is sometimes int and sometimes double.
return "ArgType::Double as i32"
@staticmethod
def getSingleArgType(existingType, t):
type = CGMemberJITInfo.getJSArgType(t)
if existingType == "":
# First element of the list; just return its type
return type
if type == existingType:
return existingType
return "%s | %s" % (existingType, type)
def getEnumValueName(value):
# Some enum values can be empty strings. Others might have weird
# characters in them. Deal with the former by returning "_empty",
# deal with possible name collisions from that by throwing if the
# enum value is actually "_empty", and throw on any value
# containing non-ASCII chars for now. Replace all chars other than
# [0-9A-Za-z_] with '_'.
if re.match("[^\x20-\x7E]", value):
raise SyntaxError('Enum value "' + value + '" contains non-ASCII characters')
if re.match("^[0-9]", value):
raise SyntaxError('Enum value "' + value + '" starts with a digit')
value = re.sub(r'[^0-9A-Za-z_]', '_', value)
if re.match("^_[A-Z]|__", value):
raise SyntaxError('Enum value "' + value + '" is reserved by the C++ spec')
if value == "_empty":
raise SyntaxError('"_empty" is not an IDL enum value we support yet')
if value == "":
return "_empty"
return MakeNativeName(value)
class CGEnum(CGThing):
def __init__(self, enum):
CGThing.__init__(self)
decl = """\
#[repr(usize)]
#[derive(JSTraceable, PartialEq, Copy, Clone, HeapSizeOf)]
pub enum %s {
%s
}
""" % (enum.identifier.name, ",\n ".join(map(getEnumValueName, enum.values())))
inner = """\
use dom::bindings::conversions::ToJSValConvertible;
use js::jsapi::{JSContext, MutableHandleValue};
use js::jsval::JSVal;
pub const strings: &'static [&'static str] = &[
%s,
];
impl ToJSValConvertible for super::%s {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
strings[*self as usize].to_jsval(cx, rval);
}
}
""" % (",\n ".join(['"%s"' % val for val in enum.values()]), enum.identifier.name)
self.cgRoot = CGList([
CGGeneric(decl),
CGNamespace.build([enum.identifier.name + "Values"],
CGIndenter(CGGeneric(inner)), public=True),
])
def define(self):
return self.cgRoot.define()
def convertConstIDLValueToRust(value):
tag = value.type.tag()
if tag in [IDLType.Tags.int8, IDLType.Tags.uint8,
IDLType.Tags.int16, IDLType.Tags.uint16,
IDLType.Tags.int32, IDLType.Tags.uint32,
IDLType.Tags.int64, IDLType.Tags.uint64,
IDLType.Tags.unrestricted_float, IDLType.Tags.float,
IDLType.Tags.unrestricted_double, IDLType.Tags.double]:
return str(value.value)
if tag == IDLType.Tags.bool:
return toStringBool(value.value)
raise TypeError("Const value of unhandled type: " + value.type)
class CGConstant(CGThing):
def __init__(self, constants):
CGThing.__init__(self)
self.constants = constants
def define(self):
def stringDecl(const):
name = const.identifier.name
value = convertConstIDLValueToRust(const.value)
return CGGeneric("pub const %s: %s = %s;\n" % (name, builtinNames[const.value.type.tag()], value))
return CGIndenter(CGList(stringDecl(m) for m in self.constants)).define()
def getUnionTypeTemplateVars(type, descriptorProvider):
# For dictionaries and sequences we need to pass None as the failureCode
# for getJSToNativeConversionInfo.
# Also, for dictionaries we would need to handle conversion of
# null/undefined to the dictionary correctly.
if type.isDictionary() or type.isSequence():
raise TypeError("Can't handle dictionaries or sequences in unions")
if type.isGeckoInterface():
name = type.inner.identifier.name
typeName = descriptorProvider.getDescriptor(name).returnType
elif type.isEnum():
name = type.inner.identifier.name
typeName = name
elif type.isArray() or type.isSequence():
name = str(type)
# XXXjdm dunno about typeName here
typeName = "/*" + type.name + "*/"
elif type.isDOMString():
name = type.name
typeName = "DOMString"
elif type.isUSVString():
name = type.name
typeName = "USVString"
elif type.isPrimitive():
name = type.name
typeName = builtinNames[type.tag()]
else:
name = type.name
typeName = "/*" + type.name + "*/"
info = getJSToNativeConversionInfo(
type, descriptorProvider, failureCode="return Ok(None);",
exceptionCode='return Err(());',
isDefinitelyObject=True)
template = info.template
assert not type.isObject()
jsConversion = string.Template(template).substitute({
"val": "value",
})
jsConversion = CGWrapper(CGGeneric(jsConversion), pre="Ok(Some(", post="))")
return {
"name": name,
"typeName": typeName,
"jsConversion": jsConversion,
}
class CGUnionStruct(CGThing):
def __init__(self, type, descriptorProvider):
assert not type.nullable()
assert not type.hasNullableType
CGThing.__init__(self)
self.type = type
self.descriptorProvider = descriptorProvider
def define(self):
templateVars = map(lambda t: getUnionTypeTemplateVars(t, self.descriptorProvider),
self.type.flatMemberTypes)
enumValues = [
" e%s(%s)," % (v["name"], v["typeName"]) for v in templateVars
]
enumConversions = [
" %s::e%s(ref inner) => inner.to_jsval(cx, rval),"
% (self.type, v["name"]) for v in templateVars
]
return ("""\
pub enum %s {
%s
}
impl ToJSValConvertible for %s {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
match *self {
%s
}
}
}
""") % (self.type, "\n".join(enumValues), self.type, "\n".join(enumConversions))
class CGUnionConversionStruct(CGThing):
def __init__(self, type, descriptorProvider):
assert not type.nullable()
assert not type.hasNullableType
CGThing.__init__(self)
self.type = type
self.descriptorProvider = descriptorProvider
def from_jsval(self):
memberTypes = self.type.flatMemberTypes
names = []
conversions = []
interfaceMemberTypes = filter(lambda t: t.isNonCallbackInterface(), memberTypes)
if len(interfaceMemberTypes) > 0:
def get_name(memberType):
if self.type.isGeckoInterface():
return memberType.inner.identifier.name
return memberType.name
def get_match(name):
return (
"match %s::TryConvertTo%s(cx, value) {\n"
" Err(_) => return Err(()),\n"
" Ok(Some(value)) => return Ok(%s::e%s(value)),\n"
" Ok(None) => (),\n"
"}\n") % (self.type, name, self.type, name)
typeNames = [get_name(memberType) for memberType in interfaceMemberTypes]
interfaceObject = CGList(CGGeneric(get_match(typeName)) for typeName in typeNames)
names.extend(typeNames)
else:
interfaceObject = None
arrayObjectMemberTypes = filter(lambda t: t.isArray() or t.isSequence(), memberTypes)
if len(arrayObjectMemberTypes) > 0:
assert len(arrayObjectMemberTypes) == 1
raise TypeError("Can't handle arrays or sequences in unions.")
else:
arrayObject = None
dateObjectMemberTypes = filter(lambda t: t.isDate(), memberTypes)
if len(dateObjectMemberTypes) > 0:
assert len(dateObjectMemberTypes) == 1
raise TypeError("Can't handle dates in unions.")
else:
dateObject = None
callbackMemberTypes = filter(lambda t: t.isCallback() or t.isCallbackInterface(), memberTypes)
if len(callbackMemberTypes) > 0:
assert len(callbackMemberTypes) == 1
raise TypeError("Can't handle callbacks in unions.")
else:
callbackObject = None
dictionaryMemberTypes = filter(lambda t: t.isDictionary(), memberTypes)
if len(dictionaryMemberTypes) > 0:
raise TypeError("No support for unwrapping dictionaries as member "
"of a union")
else:
dictionaryObject = None
if callbackObject or dictionaryObject:
assert False, "Not currently supported"
else:
nonPlatformObject = None
objectMemberTypes = filter(lambda t: t.isObject(), memberTypes)
if len(objectMemberTypes) > 0:
raise TypeError("Can't handle objects in unions.")
else:
object = None
hasObjectTypes = interfaceObject or arrayObject or dateObject or nonPlatformObject or object
if hasObjectTypes:
assert interfaceObject
templateBody = CGList([interfaceObject], "\n")
conversions.append(CGIfWrapper("value.get().is_object()", templateBody))
otherMemberTypes = [
t for t in memberTypes if t.isPrimitive() or t.isString() or t.isEnum()
]
if len(otherMemberTypes) > 0:
assert len(otherMemberTypes) == 1
memberType = otherMemberTypes[0]
if memberType.isEnum():
name = memberType.inner.identifier.name
else:
name = memberType.name
match = (
"match %s::TryConvertTo%s(cx, value) {\n"
" Err(_) => return Err(()),\n"
" Ok(Some(value)) => return Ok(%s::e%s(value)),\n"
" Ok(None) => (),\n"
"}\n") % (self.type, name, self.type, name)
conversions.append(CGGeneric(match))
names.append(name)
conversions.append(CGGeneric(
"throw_not_in_union(cx, \"%s\");\n"
"Err(())" % ", ".join(names)))
method = CGWrapper(
CGIndenter(CGList(conversions, "\n\n")),
pre="unsafe fn from_jsval(cx: *mut JSContext,\n"
" value: HandleValue, _option: ()) -> Result<%s, ()> {\n" % self.type,
post="\n}")
return CGWrapper(
CGIndenter(CGList([
CGGeneric("type Config = ();"),
method,
], "\n")),
pre="impl FromJSValConvertible for %s {\n" % self.type,
post="\n}")
def try_method(self, t):
templateVars = getUnionTypeTemplateVars(t, self.descriptorProvider)
returnType = "Result<Option<%s>, ()>" % templateVars["typeName"]
jsConversion = templateVars["jsConversion"]
return CGWrapper(
CGIndenter(jsConversion, 4),
pre="unsafe fn TryConvertTo%s(cx: *mut JSContext, value: HandleValue) -> %s {\n" % (t.name, returnType),
post="\n}")
def define(self):
from_jsval = self.from_jsval()
methods = CGIndenter(CGList([
self.try_method(t) for t in self.type.flatMemberTypes
], "\n\n"))
return """
%s
impl %s {
%s
}
""" % (from_jsval.define(), self.type, methods.define())
class ClassItem:
""" Use with CGClass """
def __init__(self, name, visibility):
self.name = name
self.visibility = visibility
def declare(self, cgClass):
assert False
def define(self, cgClass):
assert False
class ClassBase(ClassItem):
def __init__(self, name, visibility='pub'):
ClassItem.__init__(self, name, visibility)
def declare(self, cgClass):
return '%s %s' % (self.visibility, self.name)
def define(self, cgClass):
# Only in the header
return ''
class ClassMethod(ClassItem):
def __init__(self, name, returnType, args, inline=False, static=False,
virtual=False, const=False, bodyInHeader=False,
templateArgs=None, visibility='public', body=None,
breakAfterReturnDecl="\n",
breakAfterSelf="\n", override=False):
"""
override indicates whether to flag the method as MOZ_OVERRIDE
"""
assert not override or virtual
assert not (override and static)
self.returnType = returnType
self.args = args
self.inline = False
self.static = static
self.virtual = virtual
self.const = const
self.bodyInHeader = True
self.templateArgs = templateArgs
self.body = body
self.breakAfterReturnDecl = breakAfterReturnDecl
self.breakAfterSelf = breakAfterSelf
self.override = override
ClassItem.__init__(self, name, visibility)
def getDecorators(self, declaring):
decorators = []
if self.inline:
decorators.append('inline')
if declaring:
if self.static:
decorators.append('static')
if self.virtual:
decorators.append('virtual')
if decorators:
return ' '.join(decorators) + ' '
return ''
def getBody(self):
# Override me or pass a string to constructor
assert self.body is not None
return self.body
def declare(self, cgClass):
templateClause = '<%s>' % ', '.join(self.templateArgs) \
if self.bodyInHeader and self.templateArgs else ''
args = ', '.join([a.declare() for a in self.args])
if self.bodyInHeader:
body = CGIndenter(CGGeneric(self.getBody())).define()
body = ' {\n' + body + '\n}'
else:
body = ';'
return string.Template(
"${decorators}%s"
"${visibility}fn ${name}${templateClause}(${args})${returnType}${const}${override}${body}%s" %
(self.breakAfterReturnDecl, self.breakAfterSelf)
).substitute({
'templateClause': templateClause,
'decorators': self.getDecorators(True),
'returnType': (" -> %s" % self.returnType) if self.returnType else "",
'name': self.name,
'const': ' const' if self.const else '',
'override': ' MOZ_OVERRIDE' if self.override else '',
'args': args,
'body': body,
'visibility': self.visibility + ' ' if self.visibility != 'priv' else ''
})
def define(self, cgClass):
pass
class ClassConstructor(ClassItem):
"""
Used for adding a constructor to a CGClass.
args is a list of Argument objects that are the arguments taken by the
constructor.
inline should be True if the constructor should be marked inline.
bodyInHeader should be True if the body should be placed in the class
declaration in the header.
visibility determines the visibility of the constructor (public,
protected, private), defaults to private.
explicit should be True if the constructor should be marked explicit.
baseConstructors is a list of strings containing calls to base constructors,
defaults to None.
body contains a string with the code for the constructor, defaults to empty.
"""
def __init__(self, args, inline=False, bodyInHeader=False,
visibility="priv", explicit=False, baseConstructors=None,
body=""):
self.args = args
self.inline = False
self.bodyInHeader = bodyInHeader
self.explicit = explicit
self.baseConstructors = baseConstructors or []
self.body = body
ClassItem.__init__(self, None, visibility)
def getDecorators(self, declaring):
decorators = []
if self.explicit:
decorators.append('explicit')
if self.inline and declaring:
decorators.append('inline')
if decorators:
return ' '.join(decorators) + ' '
return ''
def getInitializationList(self, cgClass):
items = [str(c) for c in self.baseConstructors]
for m in cgClass.members:
if not m.static:
initialize = m.body
if initialize:
items.append(m.name + "(" + initialize + ")")
if len(items) > 0:
return '\n : ' + ',\n '.join(items)
return ''
def getBody(self, cgClass):
initializers = [" parent: %s" % str(self.baseConstructors[0])]
return (self.body + (
"let mut ret = Rc::new(%s {\n"
"%s\n"
"});\n"
"// Note: callback cannot be moved after calling init.\n"
"match Rc::get_mut(&mut ret) {\n"
" Some(ref mut callback) => callback.parent.init(%s),\n"
" None => unreachable!(),\n"
"};\n"
"ret") % (cgClass.name, '\n'.join(initializers), self.args[0].name))
def declare(self, cgClass):
args = ', '.join([a.declare() for a in self.args])
body = ' ' + self.getBody(cgClass)
body = stripTrailingWhitespace(body.replace('\n', '\n '))
if len(body) > 0:
body += '\n'
body = ' {\n' + body + '}'
return string.Template("""\
pub fn ${decorators}new(${args}) -> Rc<${className}>${body}
""").substitute({'decorators': self.getDecorators(True),
'className': cgClass.getNameString(),
'args': args,
'body': body})
def define(self, cgClass):
if self.bodyInHeader:
return ''
args = ', '.join([a.define() for a in self.args])
body = ' ' + self.getBody()
body = '\n' + stripTrailingWhitespace(body.replace('\n', '\n '))
if len(body) > 0:
body += '\n'
return string.Template("""\
${decorators}
${className}::${className}(${args})${initializationList}
{${body}}
""").substitute({'decorators': self.getDecorators(False),
'className': cgClass.getNameString(),
'args': args,
'initializationList': self.getInitializationList(cgClass),
'body': body})
class ClassMember(ClassItem):
def __init__(self, name, type, visibility="priv", static=False,
body=None):
self.type = type
self.static = static
self.body = body
ClassItem.__init__(self, name, visibility)
def declare(self, cgClass):
return '%s %s: %s,\n' % (self.visibility, self.name, self.type)
def define(self, cgClass):
if not self.static:
return ''
if self.body:
body = " = " + self.body
else:
body = ""
return '%s %s::%s%s;\n' % (self.type, cgClass.getNameString(),
self.name, body)
class CGClass(CGThing):
def __init__(self, name, bases=[], members=[], constructors=[],
destructor=None, methods=[],
typedefs=[], enums=[], unions=[], templateArgs=[],
templateSpecialization=[],
disallowCopyConstruction=False, indent='',
decorators='',
extradeclarations=''):
CGThing.__init__(self)
self.name = name
self.bases = bases
self.members = members
self.constructors = constructors
# We store our single destructor in a list, since all of our
# code wants lists of members.
self.destructors = [destructor] if destructor else []
self.methods = methods
self.typedefs = typedefs
self.enums = enums
self.unions = unions
self.templateArgs = templateArgs
self.templateSpecialization = templateSpecialization
self.disallowCopyConstruction = disallowCopyConstruction
self.indent = indent
self.decorators = decorators
self.extradeclarations = extradeclarations
def getNameString(self):
className = self.name
if self.templateSpecialization:
className = className + \
'<%s>' % ', '.join([str(a) for a
in self.templateSpecialization])
return className
def define(self):
result = ''
if self.templateArgs:
templateArgs = [a.declare() for a in self.templateArgs]
templateArgs = templateArgs[len(self.templateSpecialization):]
result = result + self.indent + 'template <%s>\n' % ','.join([str(a) for a in templateArgs])
if self.templateSpecialization:
specialization = \
'<%s>' % ', '.join([str(a) for a in self.templateSpecialization])
else:
specialization = ''
myself = ''
if self.decorators != '':
myself += self.decorators + '\n'
myself += '%spub struct %s%s' % (self.indent, self.name, specialization)
result += myself
assert len(self.bases) == 1 # XXjdm Can we support multiple inheritance?
result += ' {\n'
if self.bases:
self.members = [ClassMember("parent", self.bases[0].name, "pub")] + self.members
result += CGIndenter(CGGeneric(self.extradeclarations),
len(self.indent)).define()
def declareMembers(cgClass, memberList):
result = ''
for member in memberList:
declaration = member.declare(cgClass)
declaration = CGIndenter(CGGeneric(declaration)).define()
result = result + declaration
return result
if self.disallowCopyConstruction:
class DisallowedCopyConstructor(object):
def __init__(self):
self.visibility = "private"
def declare(self, cgClass):
name = cgClass.getNameString()
return ("%s(const %s&) MOZ_DELETE;\n"
"void operator=(const %s) MOZ_DELETE;\n" % (name, name, name))
disallowedCopyConstructors = [DisallowedCopyConstructor()]
else:
disallowedCopyConstructors = []
order = [(self.enums, ''), (self.unions, ''),
(self.typedefs, ''), (self.members, '')]
for (memberList, separator) in order:
memberString = declareMembers(self, memberList)
if self.indent:
memberString = CGIndenter(CGGeneric(memberString),
len(self.indent)).define()
result = result + memberString
result += self.indent + '}\n\n'
result += 'impl %s {\n' % self.name
order = [(self.constructors + disallowedCopyConstructors, '\n'),
(self.destructors, '\n'), (self.methods, '\n)')]
for (memberList, separator) in order:
memberString = declareMembers(self, memberList)
if self.indent:
memberString = CGIndenter(CGGeneric(memberString),
len(self.indent)).define()
result = result + memberString
result += "}"
return result
class CGProxySpecialOperation(CGPerSignatureCall):
"""
Base class for classes for calling an indexed or named special operation
(don't use this directly, use the derived classes below).
"""
def __init__(self, descriptor, operation):
nativeName = MakeNativeName(descriptor.binaryNameFor(operation))
operation = descriptor.operations[operation]
assert len(operation.signatures()) == 1
signature = operation.signatures()[0]
(returnType, arguments) = signature
# We pass len(arguments) as the final argument so that the
# CGPerSignatureCall won't do any argument conversion of its own.
CGPerSignatureCall.__init__(self, returnType, "", arguments, nativeName,
False, descriptor, operation,
len(arguments))
if operation.isSetter() or operation.isCreator():
# arguments[0] is the index or name of the item that we're setting.
argument = arguments[1]
info = getJSToNativeConversionInfo(
argument.type, descriptor, treatNullAs=argument.treatNullAs,
exceptionCode="return false;")
template = info.template
declType = info.declType
templateValues = {
"val": "value.handle()",
}
self.cgRoot.prepend(instantiateJSToNativeConversionTemplate(
template, templateValues, declType, argument.identifier.name))
self.cgRoot.prepend(CGGeneric("let value = RootedValue::new(cx, desc.get().value);"))
elif operation.isGetter():
self.cgRoot.prepend(CGGeneric("let mut found = false;"))
def getArguments(self):
def process(arg):
argVal = arg.identifier.name
if arg.type.isGeckoInterface() and not arg.type.unroll().inner.isCallback():
argVal += ".r()"
return argVal
args = [(a, process(a)) for a in self.arguments]
if self.idlNode.isGetter():
args.append((FakeArgument(BuiltinTypes[IDLBuiltinType.Types.boolean],
self.idlNode),
"&mut found"))
return args
def wrap_return_value(self):
if not self.idlNode.isGetter() or self.templateValues is None:
return ""
wrap = CGGeneric(wrapForType(**self.templateValues))
wrap = CGIfWrapper("found", wrap)
return "\n" + wrap.define()
class CGProxyIndexedGetter(CGProxySpecialOperation):
"""
Class to generate a call to an indexed getter. If templateValues is not None
the returned value will be wrapped with wrapForType using templateValues.
"""
def __init__(self, descriptor, templateValues=None):
self.templateValues = templateValues
CGProxySpecialOperation.__init__(self, descriptor, 'IndexedGetter')
class CGProxyIndexedSetter(CGProxySpecialOperation):
"""
Class to generate a call to an indexed setter.
"""
def __init__(self, descriptor):
CGProxySpecialOperation.__init__(self, descriptor, 'IndexedSetter')
class CGProxyNamedOperation(CGProxySpecialOperation):
"""
Class to generate a call to a named operation.
"""
def __init__(self, descriptor, name):
CGProxySpecialOperation.__init__(self, descriptor, name)
def define(self):
# Our first argument is the id we're getting.
argName = self.arguments[0].identifier.name
return ("let %s = jsid_to_str(cx, id);\n"
"let this = UnwrapProxy(proxy);\n"
"let this = &*this;\n" % argName +
CGProxySpecialOperation.define(self))
class CGProxyNamedGetter(CGProxyNamedOperation):
"""
Class to generate a call to an named getter. If templateValues is not None
the returned value will be wrapped with wrapForType using templateValues.
"""
def __init__(self, descriptor, templateValues=None):
self.templateValues = templateValues
CGProxySpecialOperation.__init__(self, descriptor, 'NamedGetter')
class CGProxyNamedPresenceChecker(CGProxyNamedGetter):
"""
Class to generate a call that checks whether a named property exists.
For now, we just delegate to CGProxyNamedGetter
"""
def __init__(self, descriptor):
CGProxyNamedGetter.__init__(self, descriptor)
class CGProxyNamedSetter(CGProxyNamedOperation):
"""
Class to generate a call to a named setter.
"""
def __init__(self, descriptor):
CGProxySpecialOperation.__init__(self, descriptor, 'NamedSetter')
class CGProxyNamedDeleter(CGProxyNamedOperation):
"""
Class to generate a call to a named deleter.
"""
def __init__(self, descriptor):
CGProxySpecialOperation.__init__(self, descriptor, 'NamedDeleter')
class CGProxyUnwrap(CGAbstractMethod):
def __init__(self, descriptor):
args = [Argument('HandleObject', 'obj')]
CGAbstractMethod.__init__(self, descriptor, "UnwrapProxy",
'*const ' + descriptor.concreteType, args,
alwaysInline=True, unsafe=True)
def definition_body(self):
return CGGeneric("""\
/*if (xpc::WrapperFactory::IsXrayWrapper(obj)) {
obj = js::UnwrapObject(obj);
}*/
//MOZ_ASSERT(IsProxy(obj));
let box_ = GetProxyPrivate(*obj.ptr).to_private() as *const %s;
return box_;""" % self.descriptor.concreteType)
class CGDOMJSProxyHandler_getOwnPropertyDescriptor(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'),
Argument('MutableHandle<JSPropertyDescriptor>', 'desc')]
CGAbstractExternMethod.__init__(self, descriptor, "getOwnPropertyDescriptor",
"bool", args)
self.descriptor = descriptor
def getBody(self):
indexedGetter = self.descriptor.operations['IndexedGetter']
indexedSetter = self.descriptor.operations['IndexedSetter']
get = ""
if indexedGetter or indexedSetter:
get = "let index = get_array_index_from_id(cx, id);\n"
if indexedGetter:
readonly = toStringBool(self.descriptor.operations['IndexedSetter'] is None)
fillDescriptor = ("desc.get().value = result_root.ptr;\n"
"fill_property_descriptor(&mut *desc.ptr, *proxy.ptr, %s);\n"
"return true;" % readonly)
templateValues = {
'jsvalRef': 'result_root.handle_mut()',
'successCode': fillDescriptor,
'pre': 'let mut result_root = RootedValue::new(cx, UndefinedValue());'
}
get += ("if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedGetter(self.descriptor, templateValues)).define() + "\n" +
"}\n")
namedGetter = self.descriptor.operations['NamedGetter']
if namedGetter:
readonly = toStringBool(self.descriptor.operations['NamedSetter'] is None)
fillDescriptor = ("desc.get().value = result_root.ptr;\n"
"fill_property_descriptor(&mut *desc.ptr, *proxy.ptr, %s);\n"
"return true;" % readonly)
templateValues = {
'jsvalRef': 'result_root.handle_mut()',
'successCode': fillDescriptor,
'pre': 'let mut result_root = RootedValue::new(cx, UndefinedValue());'
}
# Once we start supporting OverrideBuiltins we need to make
# ResolveOwnProperty or EnumerateOwnProperties filter out named
# properties that shadow prototype properties.
namedGet = ("\n" +
"if RUST_JSID_IS_STRING(id) && !has_property_on_prototype(cx, proxy, id) {\n" +
CGIndenter(CGProxyNamedGetter(self.descriptor, templateValues)).define() + "\n" +
"}\n")
else:
namedGet = ""
return get + """\
let expando = RootedObject::new(cx, get_expando_object(proxy));
//if (!xpc::WrapperFactory::IsXrayWrapper(proxy) && (expando = GetExpandoObject(proxy))) {
if !expando.ptr.is_null() {
if !JS_GetPropertyDescriptorById(cx, expando.handle(), id, desc) {
return false;
}
if !desc.get().obj.is_null() {
// Pretend the property lives on the wrapper.
desc.get().obj = *proxy.ptr;
return true;
}
}
""" + namedGet + """\
desc.get().obj = ptr::null_mut();
return true;"""
def definition_body(self):
return CGGeneric(self.getBody())
# TODO(Issue 5876)
class CGDOMJSProxyHandler_defineProperty(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'),
Argument('Handle<JSPropertyDescriptor>', 'desc'),
Argument('*mut ObjectOpResult', 'opresult')]
CGAbstractExternMethod.__init__(self, descriptor, "defineProperty", "bool", args)
self.descriptor = descriptor
def getBody(self):
set = ""
indexedSetter = self.descriptor.operations['IndexedSetter']
if indexedSetter:
set += ("let index = get_array_index_from_id(cx, id);\n" +
"if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedSetter(self.descriptor)).define() +
" return true;\n" +
"}\n")
elif self.descriptor.operations['IndexedGetter']:
set += ("if get_array_index_from_id(cx, id).is_some() {\n" +
" return false;\n" +
" //return ThrowErrorMessage(cx, MSG_NO_PROPERTY_SETTER, \"%s\");\n" +
"}\n") % self.descriptor.name
namedSetter = self.descriptor.operations['NamedSetter']
if namedSetter:
if self.descriptor.hasUnforgeableMembers:
raise TypeError("Can't handle a named setter on an interface that has "
"unforgeables. Figure out how that should work!")
set += ("if RUST_JSID_IS_STRING(id) {\n" +
CGIndenter(CGProxyNamedSetter(self.descriptor)).define() +
" (*opresult).code_ = 0; /* SpecialCodes::OkCode */\n" +
" return true;\n" +
"} else {\n" +
" return false;\n" +
"}\n")
else:
set += ("if RUST_JSID_IS_STRING(id) {\n" +
CGIndenter(CGProxyNamedGetter(self.descriptor)).define() +
" if (found) {\n"
# TODO(Issue 5876)
" //return js::IsInNonStrictPropertySet(cx)\n" +
" // ? opresult.succeed()\n" +
" // : ThrowErrorMessage(cx, MSG_NO_NAMED_SETTER, \"${name}\");\n" +
" (*opresult).code_ = 0; /* SpecialCodes::OkCode */\n" +
" return true;\n" +
" }\n" +
" (*opresult).code_ = 0; /* SpecialCodes::OkCode */\n" +
" return true;\n"
"}\n") % (self.descriptor.name, self.descriptor.name)
set += "return proxyhandler::define_property(%s);" % ", ".join(a.name for a in self.args)
return set
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_delete(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'),
Argument('*mut ObjectOpResult', 'res')]
CGAbstractExternMethod.__init__(self, descriptor, "delete", "bool", args)
self.descriptor = descriptor
def getBody(self):
set = ""
if self.descriptor.operations['NamedDeleter']:
if self.descriptor.hasUnforgeableMembers:
raise TypeError("Can't handle a deleter on an interface that has "
"unforgeables. Figure out how that should work!")
set += CGProxyNamedDeleter(self.descriptor).define()
set += "return proxyhandler::delete(%s);" % ", ".join(a.name for a in self.args)
return set
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_ownPropertyKeys(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'proxy'),
Argument('*mut AutoIdVector', 'props')]
CGAbstractExternMethod.__init__(self, descriptor, "own_property_keys", "bool", args)
self.descriptor = descriptor
def getBody(self):
body = dedent(
"""
let unwrapped_proxy = UnwrapProxy(proxy);
""")
if self.descriptor.operations['IndexedGetter']:
body += dedent(
"""
for i in 0..(*unwrapped_proxy).Length() {
let rooted_jsid = RootedId::new(cx, int_to_jsid(i as i32));
AppendToAutoIdVector(props, rooted_jsid.handle().get());
}
""")
if self.descriptor.operations['NamedGetter']:
body += dedent(
"""
for name in (*unwrapped_proxy).SupportedPropertyNames() {
let cstring = CString::new(name).unwrap();
let jsstring = JS_InternString(cx, cstring.as_ptr());
let rooted = RootedString::new(cx, jsstring);
let jsid = INTERNED_STRING_TO_JSID(cx, rooted.handle().get());
let rooted_jsid = RootedId::new(cx, jsid);
AppendToAutoIdVector(props, rooted_jsid.handle().get());
}
""")
body += dedent(
"""
let expando = get_expando_object(proxy);
if !expando.is_null() {
let rooted_expando = RootedObject::new(cx, expando);
GetPropertyKeys(cx, rooted_expando.handle(), JSITER_OWNONLY | JSITER_HIDDEN | JSITER_SYMBOLS, props);
}
return true;
""")
return body
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_hasOwn(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleId', 'id'), Argument('*mut bool', 'bp')]
CGAbstractExternMethod.__init__(self, descriptor, "hasOwn", "bool", args)
self.descriptor = descriptor
def getBody(self):
indexedGetter = self.descriptor.operations['IndexedGetter']
if indexedGetter:
indexed = ("let index = get_array_index_from_id(cx, id);\n" +
"if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedGetter(self.descriptor)).define() + "\n" +
" *bp = found;\n" +
" return true;\n" +
"}\n\n")
else:
indexed = ""
namedGetter = self.descriptor.operations['NamedGetter']
if namedGetter:
named = ("if RUST_JSID_IS_STRING(id) && !has_property_on_prototype(cx, proxy, id) {\n" +
CGIndenter(CGProxyNamedGetter(self.descriptor)).define() + "\n" +
" *bp = found;\n"
" return true;\n"
"}\n" +
"\n")
else:
named = ""
return indexed + """\
let expando = RootedObject::new(cx, get_expando_object(proxy));
if !expando.ptr.is_null() {
let mut b = true;
let ok = JS_HasPropertyById(cx, expando.handle(), id, &mut b);
*bp = b;
if !ok || *bp {
return ok;
}
}
""" + named + """\
*bp = false;
return true;"""
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_get(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', 'proxy'),
Argument('HandleObject', 'receiver'), Argument('HandleId', 'id'),
Argument('MutableHandleValue', 'vp')]
CGAbstractExternMethod.__init__(self, descriptor, "get", "bool", args)
self.descriptor = descriptor
def getBody(self):
getFromExpando = """\
let expando = RootedObject::new(cx, get_expando_object(proxy));
if !expando.ptr.is_null() {
let mut hasProp = false;
if !JS_HasPropertyById(cx, expando.handle(), id, &mut hasProp) {
return false;
}
if hasProp {
return JS_ForwardGetPropertyTo(cx, expando.handle(), id, receiver, vp);
}
}"""
templateValues = {
'jsvalRef': 'vp',
'successCode': 'return true;',
}
indexedGetter = self.descriptor.operations['IndexedGetter']
if indexedGetter:
getIndexedOrExpando = ("let index = get_array_index_from_id(cx, id);\n" +
"if let Some(index) = index {\n" +
" let this = UnwrapProxy(proxy);\n" +
" let this = &*this;\n" +
CGIndenter(CGProxyIndexedGetter(self.descriptor, templateValues)).define())
getIndexedOrExpando += """\
// Even if we don't have this index, we don't forward the
// get on to our expando object.
} else {
%s
}
""" % (stripTrailingWhitespace(getFromExpando.replace('\n', '\n ')))
else:
getIndexedOrExpando = getFromExpando + "\n"
namedGetter = self.descriptor.operations['NamedGetter']
if namedGetter:
getNamed = ("if RUST_JSID_IS_STRING(id) {\n" +
CGIndenter(CGProxyNamedGetter(self.descriptor, templateValues)).define() +
"}\n")
else:
getNamed = ""
return """\
//MOZ_ASSERT(!xpc::WrapperFactory::IsXrayWrapper(proxy),
//"Should not have a XrayWrapper here");
%s
let mut found = false;
if !get_property_on_prototype(cx, proxy, id, &mut found, vp) {
return false;
}
if found {
return true;
}
%s
*vp.ptr = UndefinedValue();
return true;""" % (getIndexedOrExpando, getNamed)
def definition_body(self):
return CGGeneric(self.getBody())
class CGDOMJSProxyHandler_className(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', '_proxy')]
CGAbstractExternMethod.__init__(self, descriptor, "className", "*const i8", args)
self.descriptor = descriptor
def getBody(self):
return '%s as *const u8 as *const i8' % str_to_const_array(self.descriptor.name)
def definition_body(self):
return CGGeneric(self.getBody())
class CGAbstractClassHook(CGAbstractExternMethod):
"""
Meant for implementing JSClass hooks, like Finalize or Trace. Does very raw
'this' unwrapping as it assumes that the unwrapped type is always known.
"""
def __init__(self, descriptor, name, returnType, args):
CGAbstractExternMethod.__init__(self, descriptor, name, returnType,
args)
def definition_body_prologue(self):
return CGGeneric("""
let this = native_from_object::<%s>(obj).unwrap();
""" % self.descriptor.concreteType)
def definition_body(self):
return CGList([
self.definition_body_prologue(),
self.generate_code(),
])
def generate_code(self):
raise NotImplementedError # Override me!
def finalizeHook(descriptor, hookName, context):
release = ""
if descriptor.isGlobal():
release += """\
finalize_global(obj);
"""
elif descriptor.weakReferenceable:
release += """\
let weak_box_ptr = JS_GetReservedSlot(obj, DOM_WEAK_SLOT).to_private() as *mut WeakBox<%s>;
if !weak_box_ptr.is_null() {
let count = {
let weak_box = &*weak_box_ptr;
assert!(weak_box.value.get().is_some());
assert!(weak_box.count.get() > 0);
weak_box.value.set(None);
let count = weak_box.count.get() - 1;
weak_box.count.set(count);
count
};
if count == 0 {
mem::drop(Box::from_raw(weak_box_ptr));
}
}
""" % descriptor.concreteType
release += """\
if !this.is_null() {
// The pointer can be null if the object is the unforgeable holder of that interface.
let _ = Box::from_raw(this as *mut %s);
}
debug!("%s finalize: {:p}", this);\
""" % (descriptor.concreteType, descriptor.concreteType)
return release
class CGClassTraceHook(CGAbstractClassHook):
"""
A hook to trace through our native object; used for GC and CC
"""
def __init__(self, descriptor):
args = [Argument('*mut JSTracer', 'trc'), Argument('*mut JSObject', 'obj')]
CGAbstractClassHook.__init__(self, descriptor, TRACE_HOOK_NAME, 'void',
args)
self.traceGlobal = descriptor.isGlobal()
def generate_code(self):
body = [CGGeneric("if this.is_null() { return; } // GC during obj creation\n"
"(*this).trace(%s);" % self.args[0].name)]
if self.traceGlobal:
body += [CGGeneric("trace_global(trc, obj);")]
return CGList(body, "\n")
class CGClassConstructHook(CGAbstractExternMethod):
"""
JS-visible constructor for our objects
"""
def __init__(self, descriptor, constructor=None):
args = [Argument('*mut JSContext', 'cx'), Argument('u32', 'argc'), Argument('*mut JSVal', 'vp')]
name = CONSTRUCT_HOOK_NAME
if constructor:
name += "_" + constructor.identifier.name
else:
constructor = descriptor.interface.ctor()
assert constructor
CGAbstractExternMethod.__init__(self, descriptor, name, 'bool', args)
self.constructor = constructor
def definition_body(self):
preamble = CGGeneric("""\
let global = global_root_from_object(JS_CALLEE(cx, vp).to_object());
let args = CallArgs::from_vp(vp, argc);
""")
name = self.constructor.identifier.name
nativeName = MakeNativeName(self.descriptor.binaryNameFor(name))
callGenerator = CGMethodCall(["global.r()"], nativeName, True,
self.descriptor, self.constructor)
return CGList([preamble, callGenerator])
class CGClassHasInstanceHook(CGAbstractExternMethod):
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'obj'),
Argument('MutableHandleValue', 'value'),
Argument('*mut bool', 'rval')]
assert descriptor.interface.hasInterfaceObject() and not descriptor.interface.isCallback()
CGAbstractExternMethod.__init__(self, descriptor, HASINSTANCE_HOOK_NAME,
'bool', args)
def definition_body(self):
id = "PrototypeList::ID::%s" % self.descriptor.interface.identifier.name
return CGGeneric("""\
match has_instance(cx, obj, value.handle(), %(id)s, %(index)s) {
Ok(result) => {
*rval = result;
true
}
Err(()) => false,
}
""" % {"id": id, "index": self.descriptor.prototypeDepth})
class CGClassFunToStringHook(CGAbstractExternMethod):
"""
A hook to convert functions to strings.
"""
def __init__(self, descriptor):
args = [Argument('*mut JSContext', 'cx'), Argument('HandleObject', '_obj'),
Argument('u32', '_indent')]
CGAbstractExternMethod.__init__(self, descriptor, "fun_to_string", '*mut JSString', args)
def definition_body(self):
name = self.descriptor.interface.identifier.name
string = str_to_const_array("function %s() {\\n [native code]\\n}" % name)
return CGGeneric("JS_NewStringCopyZ(cx, %s as *const _ as *const libc::c_char)" % string)
class CGClassFinalizeHook(CGAbstractClassHook):
"""
A hook for finalize, used to release our native object.
"""
def __init__(self, descriptor):
args = [Argument('*mut FreeOp', '_fop'), Argument('*mut JSObject', 'obj')]
CGAbstractClassHook.__init__(self, descriptor, FINALIZE_HOOK_NAME,
'void', args)
def generate_code(self):
return CGGeneric(finalizeHook(self.descriptor, self.name, self.args[0].name))
class CGDOMJSProxyHandlerDOMClass(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
self.descriptor = descriptor
def define(self):
return "static Class: DOMClass = " + DOMClass(self.descriptor) + ";\n"
class CGInterfaceTrait(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
def attribute_arguments(needCx, argument=None):
if needCx:
yield "cx", "*mut JSContext"
if argument:
yield "value", argument_type(descriptor, argument)
def members():
for m in descriptor.interface.members:
if (m.isMethod() and not m.isStatic() and
(not m.isIdentifierLess() or m.isStringifier())):
name = CGSpecializedMethod.makeNativeName(descriptor, m)
infallible = 'infallible' in descriptor.getExtendedAttributes(m)
for idx, (rettype, arguments) in enumerate(m.signatures()):
arguments = method_arguments(descriptor, rettype, arguments)
rettype = return_type(descriptor, rettype, infallible)
yield name + ('_' * idx), arguments, rettype
elif m.isAttr() and not m.isStatic():
name = CGSpecializedGetter.makeNativeName(descriptor, m)
infallible = 'infallible' in descriptor.getExtendedAttributes(m, getter=True)
yield (name,
attribute_arguments(typeNeedsCx(m.type, True)),
return_type(descriptor, m.type, infallible))
if not m.readonly:
name = CGSpecializedSetter.makeNativeName(descriptor, m)
infallible = 'infallible' in descriptor.getExtendedAttributes(m, setter=True)
if infallible:
rettype = "()"
else:
rettype = "ErrorResult"
yield name, attribute_arguments(typeNeedsCx(m.type, False), m.type), rettype
if descriptor.proxy:
for name, operation in descriptor.operations.iteritems():
if not operation or operation.isStringifier():
continue
assert len(operation.signatures()) == 1
rettype, arguments = operation.signatures()[0]
infallible = 'infallible' in descriptor.getExtendedAttributes(operation)
if operation.isGetter():
arguments = method_arguments(descriptor, rettype, arguments, trailing=("found", "&mut bool"))
# If this interface 'supports named properties', then we
# should be able to access 'supported property names'
#
# WebIDL, Second Draft, section 3.2.4.5
# https://heycam.github.io/webidl/#idl-named-properties
if operation.isNamed():
yield "SupportedPropertyNames", [], "Vec<DOMString>"
else:
arguments = method_arguments(descriptor, rettype, arguments)
rettype = return_type(descriptor, rettype, infallible)
yield name, arguments, rettype
def fmt(arguments):
return "".join(", %s: %s" % argument for argument in arguments)
methods = [
CGGeneric("fn %s(&self%s) -> %s;\n" % (name, fmt(arguments), rettype))
for name, arguments, rettype in members()
]
if methods:
self.cgRoot = CGWrapper(CGIndenter(CGList(methods, "")),
pre="pub trait %sMethods {\n" % descriptor.interface.identifier.name,
post="}")
else:
self.cgRoot = CGGeneric("")
def define(self):
return self.cgRoot.define()
class CGWeakReferenceableTrait(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
assert descriptor.weakReferenceable
self.code = "impl WeakReferenceable for %s {}" % descriptor.interface.identifier.name
def define(self):
return self.code
class CGDescriptor(CGThing):
def __init__(self, descriptor):
CGThing.__init__(self)
assert not descriptor.concrete or not descriptor.interface.isCallback()
cgThings = []
if not descriptor.interface.isCallback():
cgThings.append(CGGetProtoObjectMethod(descriptor))
if descriptor.interface.hasInterfaceObject() and descriptor.hasDescendants():
cgThings.append(CGGetConstructorObjectMethod(descriptor))
for m in descriptor.interface.members:
if (m.isMethod() and
(not m.isIdentifierLess() or m == descriptor.operations["Stringifier"])):
if m.isStatic():
assert descriptor.interface.hasInterfaceObject()
cgThings.append(CGStaticMethod(descriptor, m))
elif not descriptor.interface.isCallback():
cgThings.append(CGSpecializedMethod(descriptor, m))
cgThings.append(CGMemberJITInfo(descriptor, m))
elif m.isAttr():
if m.stringifier:
raise TypeError("Stringifier attributes not supported yet. "
"See https://github.com/servo/servo/issues/7590\n"
"%s" % m.location)
if m.isStatic():
assert descriptor.interface.hasInterfaceObject()
cgThings.append(CGStaticGetter(descriptor, m))
elif not descriptor.interface.isCallback():
cgThings.append(CGSpecializedGetter(descriptor, m))
if not m.readonly:
if m.isStatic():
assert descriptor.interface.hasInterfaceObject()
cgThings.append(CGStaticSetter(descriptor, m))
elif not descriptor.interface.isCallback():
cgThings.append(CGSpecializedSetter(descriptor, m))
elif m.getExtendedAttribute("PutForwards"):
cgThings.append(CGSpecializedForwardingSetter(descriptor, m))
if (not m.isStatic() and not descriptor.interface.isCallback()):
cgThings.append(CGMemberJITInfo(descriptor, m))
if descriptor.concrete:
cgThings.append(CGClassFinalizeHook(descriptor))
cgThings.append(CGClassTraceHook(descriptor))
if descriptor.interface.hasInterfaceObject():
if descriptor.interface.ctor():
cgThings.append(CGClassConstructHook(descriptor))
for ctor in descriptor.interface.namedConstructors:
cgThings.append(CGClassConstructHook(descriptor, ctor))
if not descriptor.interface.isCallback():
cgThings.append(CGInterfaceObjectJSClass(descriptor))
cgThings.append(CGClassHasInstanceHook(descriptor))
cgThings.append(CGClassFunToStringHook(descriptor))
if not descriptor.interface.isCallback():
cgThings.append(CGPrototypeJSClass(descriptor))
properties = PropertyArrays(descriptor)
cgThings.append(CGGeneric(str(properties)))
cgThings.append(CGCreateInterfaceObjectsMethod(descriptor, properties))
cgThings.append(CGNamespace.build([descriptor.name + "Constants"],
CGConstant(m for m in descriptor.interface.members if m.isConst()),
public=True))
if descriptor.interface.hasInterfaceObject():
cgThings.append(CGDefineDOMInterfaceMethod(descriptor))
if descriptor.proxy:
cgThings.append(CGDefineProxyHandler(descriptor))
if descriptor.concrete:
if descriptor.proxy:
# cgThings.append(CGProxyIsProxy(descriptor))
cgThings.append(CGProxyUnwrap(descriptor))
cgThings.append(CGDOMJSProxyHandlerDOMClass(descriptor))
cgThings.append(CGDOMJSProxyHandler_ownPropertyKeys(descriptor))
cgThings.append(CGDOMJSProxyHandler_getOwnPropertyDescriptor(descriptor))
cgThings.append(CGDOMJSProxyHandler_className(descriptor))
cgThings.append(CGDOMJSProxyHandler_get(descriptor))
cgThings.append(CGDOMJSProxyHandler_hasOwn(descriptor))
if descriptor.operations['IndexedSetter'] or descriptor.operations['NamedSetter']:
cgThings.append(CGDOMJSProxyHandler_defineProperty(descriptor))
# We want to prevent indexed deleters from compiling at all.
assert not descriptor.operations['IndexedDeleter']
if descriptor.operations['NamedDeleter']:
cgThings.append(CGDOMJSProxyHandler_delete(descriptor))
# cgThings.append(CGDOMJSProxyHandler(descriptor))
# cgThings.append(CGIsMethod(descriptor))
pass
else:
cgThings.append(CGDOMJSClass(descriptor))
pass
cgThings.append(CGWrapMethod(descriptor))
if not descriptor.interface.isCallback():
if descriptor.concrete or descriptor.hasDescendants():
cgThings.append(CGIDLInterface(descriptor))
cgThings.append(CGInterfaceTrait(descriptor))
if descriptor.weakReferenceable:
cgThings.append(CGWeakReferenceableTrait(descriptor))
cgThings = CGList(cgThings, "\n")
# self.cgRoot = CGWrapper(CGNamespace(toBindingNamespace(descriptor.name),
# cgThings),
# post='\n')
self.cgRoot = cgThings
def define(self):
return self.cgRoot.define()
class CGNonNamespacedEnum(CGThing):
def __init__(self, enumName, names, first, comment="", deriving="", repr=""):
# Account for first value
entries = ["%s = %s" % (names[0], first)] + names[1:]
# Append a Last.
entries.append('Last = ' + str(first + len(entries)))
# Indent.
entries = [' ' + e for e in entries]
# Build the enum body.
enumstr = comment + 'pub enum %s {\n%s\n}\n' % (enumName, ',\n'.join(entries))
if repr:
enumstr = ('#[repr(%s)]\n' % repr) + enumstr
if deriving:
enumstr = ('#[derive(%s)]\n' % deriving) + enumstr
curr = CGGeneric(enumstr)
# Add some whitespace padding.
curr = CGWrapper(curr, pre='\n', post='\n')
# Add the typedef
# typedef = '\ntypedef %s::%s %s;\n\n' % (namespace, enumName, enumName)
# curr = CGList([curr, CGGeneric(typedef)])
# Save the result.
self.node = curr
def define(self):
return self.node.define()
class CGDictionary(CGThing):
def __init__(self, dictionary, descriptorProvider):
self.dictionary = dictionary
if all(CGDictionary(d, descriptorProvider).generatable for
d in CGDictionary.getDictionaryDependencies(dictionary)):
self.generatable = True
else:
self.generatable = False
# Nothing else to do here
return
self.memberInfo = [
(member,
getJSToNativeConversionInfo(member.type,
descriptorProvider,
isMember="Dictionary",
defaultValue=member.defaultValue,
exceptionCode="return Err(());"))
for member in dictionary.members]
def define(self):
if not self.generatable:
return ""
return self.struct() + "\n" + self.impl()
def struct(self):
d = self.dictionary
if d.parent:
inheritance = " pub parent: %s::%s,\n" % (self.makeModuleName(d.parent),
self.makeClassName(d.parent))
else:
inheritance = ""
memberDecls = [" pub %s: %s," %
(self.makeMemberName(m[0].identifier.name), self.getMemberType(m))
for m in self.memberInfo]
return (string.Template(
"pub struct ${selfName} {\n" +
"${inheritance}" +
"\n".join(memberDecls) + "\n" +
"}").substitute({"selfName": self.makeClassName(d),
"inheritance": inheritance}))
def impl(self):
d = self.dictionary
if d.parent:
initParent = "parent: try!(%s::%s::new(cx, val)),\n" % (
self.makeModuleName(d.parent),
self.makeClassName(d.parent))
else:
initParent = ""
def memberInit(memberInfo):
member, _ = memberInfo
name = self.makeMemberName(member.identifier.name)
conversion = self.getMemberConversion(memberInfo, member.type)
return CGGeneric("%s: %s,\n" % (name, conversion.define()))
def memberInsert(memberInfo):
member, _ = memberInfo
name = self.makeMemberName(member.identifier.name)
insertion = ("let mut %s = RootedValue::new(cx, UndefinedValue());\n"
"self.%s.to_jsval(cx, %s.handle_mut());\n"
"set_dictionary_property(cx, obj.handle(), \"%s\", %s.handle()).unwrap();"
% (name, name, name, name, name))
return CGGeneric("%s\n" % insertion)
memberInits = CGList([memberInit(m) for m in self.memberInfo])
memberInserts = CGList([memberInsert(m) for m in self.memberInfo])
return string.Template(
"impl ${selfName} {\n"
" pub unsafe fn empty(cx: *mut JSContext) -> ${selfName} {\n"
" ${selfName}::new(cx, HandleValue::null()).unwrap()\n"
" }\n"
" pub unsafe fn new(cx: *mut JSContext, val: HandleValue) -> Result<${selfName}, ()> {\n"
" let object = if val.get().is_null_or_undefined() {\n"
" RootedObject::new(cx, ptr::null_mut())\n"
" } else if val.get().is_object() {\n"
" RootedObject::new(cx, val.get().to_object())\n"
" } else {\n"
" throw_type_error(cx, \"Value not an object.\");\n"
" return Err(());\n"
" };\n"
" Ok(${selfName} {\n"
"${initParent}"
"${initMembers}"
" })\n"
" }\n"
"}\n"
"\n"
"impl ToJSValConvertible for ${selfName} {\n"
" unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {\n"
" let obj = RootedObject::new(cx, JS_NewObject(cx, ptr::null()));\n"
"${insertMembers}"
" rval.set(ObjectOrNullValue(obj.ptr))\n"
" }\n"
"}\n").substitute({
"selfName": self.makeClassName(d),
"initParent": CGIndenter(CGGeneric(initParent), indentLevel=12).define(),
"initMembers": CGIndenter(memberInits, indentLevel=12).define(),
"insertMembers": CGIndenter(memberInserts, indentLevel=8).define(),
})
@staticmethod
def makeDictionaryName(dictionary):
return dictionary.identifier.name
def makeClassName(self, dictionary):
return self.makeDictionaryName(dictionary)
@staticmethod
def makeModuleName(dictionary):
return getModuleFromObject(dictionary)
def getMemberType(self, memberInfo):
member, info = memberInfo
declType = info.declType
if member.optional and not member.defaultValue:
declType = CGWrapper(info.declType, pre="Option<", post=">")
return declType.define()
def getMemberConversion(self, memberInfo, memberType):
def indent(s):
return CGIndenter(CGGeneric(s), 8).define()
member, info = memberInfo
templateBody = info.template
default = info.default
replacements = {"val": "rval.handle()"}
conversion = string.Template(templateBody).substitute(replacements)
if memberType.isAny():
conversion = "%s.get()" % conversion
assert (member.defaultValue is None) == (default is None)
if not member.optional:
assert default is None
default = ("throw_type_error(cx, \"Missing required member \\\"%s\\\".\");\n"
"return Err(());") % member.identifier.name
elif not default:
default = "None"
conversion = "Some(%s)" % conversion
conversion = (
"{\n"
"let mut rval = RootedValue::new(cx, UndefinedValue());\n"
"match try!(get_dictionary_property(cx, object.handle(), \"%s\", rval.handle_mut())) {\n"
" true => {\n"
"%s\n"
" },\n"
" false => {\n"
"%s\n"
" },\n"
"}\n}") % (member.identifier.name, indent(conversion), indent(default))
return CGGeneric(conversion)
@staticmethod
def makeMemberName(name):
# Can't use Rust keywords as member names.
if name == "type":
return name + "_"
return name
@staticmethod
def getDictionaryDependencies(dictionary):
deps = set()
if dictionary.parent:
deps.add(dictionary.parent)
for member in dictionary.members:
if member.type.isDictionary():
deps.add(member.type.unroll().inner)
return deps
class CGRegisterProtos(CGAbstractMethod):
def __init__(self, config):
arguments = [
Argument('*mut JSContext', 'cx'),
Argument('HandleObject', 'global'),
]
CGAbstractMethod.__init__(self, None, 'Register', 'void', arguments,
unsafe=False, pub=True)
self.config = config
def definition_body(self):
return CGList([
CGGeneric("codegen::Bindings::%sBinding::DefineDOMInterface(cx, global);" % desc.name)
for desc in self.config.getDescriptors(hasInterfaceObject=True, register=True)
], "\n")
class CGRegisterProxyHandlersMethod(CGAbstractMethod):
def __init__(self, descriptors):
docs = "Create the global vtables used by the generated DOM bindings to implement JS proxies."
CGAbstractMethod.__init__(self, None, 'RegisterProxyHandlers', 'void', [],
unsafe=True, pub=True, docs=docs)
self.descriptors = descriptors
def definition_body(self):
return CGList([
CGGeneric("proxy_handlers[Proxies::%s as usize] = codegen::Bindings::%sBinding::DefineProxyHandler();"
% (desc.name, desc.name))
for desc in self.descriptors
], "\n")
class CGRegisterProxyHandlers(CGThing):
def __init__(self, config):
descriptors = config.getDescriptors(proxy=True)
length = len(descriptors)
self.root = CGList([
CGGeneric("pub static mut proxy_handlers: [*const libc::c_void; %d] = [0 as *const libc::c_void; %d];"
% (length, length)),
CGRegisterProxyHandlersMethod(descriptors),
], "\n")
def define(self):
return self.root.define()
class CGBindingRoot(CGThing):
"""
Root codegen class for binding generation. Instantiate the class, and call
declare or define to generate header or cpp code (respectively).
"""
def __init__(self, config, prefix, webIDLFile):
descriptors = config.getDescriptors(webIDLFile=webIDLFile,
hasInterfaceObject=True)
# We also want descriptors that have an interface prototype object
# (isCallback=False), but we don't want to include a second copy
# of descriptors that we also matched in the previous line
# (hence hasInterfaceObject=False).
descriptors.extend(config.getDescriptors(webIDLFile=webIDLFile,
hasInterfaceObject=False,
isCallback=False))
dictionaries = config.getDictionaries(webIDLFile=webIDLFile)
mainCallbacks = config.getCallbacks(webIDLFile=webIDLFile)
callbackDescriptors = config.getDescriptors(webIDLFile=webIDLFile,
isCallback=True)
enums = config.getEnums(webIDLFile)
if not (descriptors or dictionaries or mainCallbacks or callbackDescriptors or enums):
self.root = None
return
# Do codegen for all the enums.
cgthings = [CGEnum(e) for e in enums]
# Do codegen for all the dictionaries.
cgthings.extend([CGDictionary(d, config.getDescriptorProvider())
for d in dictionaries])
# Do codegen for all the callbacks.
cgthings.extend(CGList([CGCallbackFunction(c, config.getDescriptorProvider()),
CGCallbackFunctionImpl(c)], "\n")
for c in mainCallbacks)
# Do codegen for all the descriptors
cgthings.extend([CGDescriptor(x) for x in descriptors])
# Do codegen for all the callback interfaces.
cgthings.extend(CGList([CGCallbackInterface(x),
CGCallbackFunctionImpl(x.interface)], "\n")
for x in callbackDescriptors)
# And make sure we have the right number of newlines at the end
curr = CGWrapper(CGList(cgthings, "\n\n"), post="\n\n")
# Add imports
curr = CGImports(curr, descriptors + callbackDescriptors, mainCallbacks, [
'js',
'js::{JSCLASS_GLOBAL_SLOT_COUNT, JSCLASS_IMPLEMENTS_BARRIERS}',
'js::{JSCLASS_IS_DOMJSCLASS, JSCLASS_IS_GLOBAL, JSCLASS_RESERVED_SLOTS_MASK}',
'js::{JSCLASS_RESERVED_SLOTS_SHIFT, JSITER_HIDDEN, JSITER_OWNONLY}',
'js::{JSITER_SYMBOLS, JSPROP_ENUMERATE, JSPROP_PERMANENT, JSPROP_READONLY}',
'js::{JSPROP_SHARED, JS_CALLEE}',
'js::error::throw_type_error',
'js::jsapi::{AliasSet, ArgType, AutoIdVector, CallArgs, FreeOp}',
'js::jsapi::{GetGlobalForObjectCrossCompartment , GetPropertyKeys, Handle}',
'js::jsapi::{HandleId, HandleObject, HandleValue, HandleValueArray}',
'js::jsapi::{INTERNED_STRING_TO_JSID, IsCallable, JS_CallFunctionValue}',
'js::jsapi::{JS_ComputeThis, JS_CopyPropertiesFrom, JS_ForwardGetPropertyTo}',
'js::jsapi::{JS_GetClass, JS_GetFunctionPrototype, JS_GetGlobalForObject}',
'js::jsapi::{JS_GetObjectPrototype, JS_GetProperty, JS_GetPropertyById}',
'js::jsapi::{JS_GetPropertyDescriptorById, JS_GetReservedSlot, JS_HasProperty}',
'js::jsapi::{JS_HasPropertyById, JS_InitializePropertiesFromCompatibleNativeObject}',
'js::jsapi::{JS_InternString, JS_IsExceptionPending, JS_NewObject, JS_NewObjectWithGivenProto}',
'js::jsapi::{JS_NewObjectWithoutMetadata, JS_NewStringCopyZ, JS_SetProperty}',
'js::jsapi::{JS_SetPrototype, JS_SetReservedSlot, JS_WrapValue, JSAutoCompartment}',
'js::jsapi::{JSAutoRequest, JSContext, JSClass, JSFreeOp, JSFunctionSpec}',
'js::jsapi::{JSJitGetterCallArgs, JSJitInfo, JSJitMethodCallArgs, JSJitSetterCallArgs}',
'js::jsapi::{JSNative, JSObject, JSNativeWrapper, JSPropertyDescriptor, JSPropertySpec}',
'js::jsapi::{JSString, JSTracer, JSType, JSTypedMethodJitInfo, JSValueType}',
'js::jsapi::{ObjectOpResult, OpType, MutableHandle, MutableHandleObject}',
'js::jsapi::{MutableHandleValue, RootedId, RootedObject, RootedString}',
'js::jsapi::{RootedValue, SymbolCode, jsid}',
'js::jsval::JSVal',
'js::jsval::{ObjectValue, ObjectOrNullValue, PrivateValue}',
'js::jsval::{NullValue, UndefinedValue}',
'js::glue::{CallJitMethodOp, CallJitGetterOp, CallJitSetterOp, CreateProxyHandler}',
'js::glue::{GetProxyPrivate, NewProxyObject, ProxyTraps}',
'js::glue::{RUST_FUNCTION_VALUE_TO_JITINFO}',
'js::glue::{RUST_JS_NumberValue, RUST_JSID_IS_STRING, int_to_jsid}',
'js::glue::AppendToAutoIdVector',
'js::rust::{GCMethods, define_methods, define_properties}',
'dom::bindings',
'dom::bindings::global::{GlobalRef, global_root_from_object, global_root_from_reflector}',
'dom::bindings::interface::{NonCallbackInterfaceObjectClass, create_callback_interface_object}',
'dom::bindings::interface::{create_interface_prototype_object, create_named_constructors}',
'dom::bindings::interface::{create_noncallback_interface_object, has_instance}',
'dom::bindings::js::{JS, Root, RootedReference}',
'dom::bindings::js::{OptionalRootedReference}',
'dom::bindings::reflector::{Reflectable}',
'dom::bindings::utils::{ConstantSpec, DOMClass, DOMJSClass}',
'dom::bindings::utils::{DOM_PROTO_UNFORGEABLE_HOLDER_SLOT, JSCLASS_DOM_GLOBAL}',
'dom::bindings::utils::{NonNullJSNative, ProtoOrIfaceArray, create_dom_global}',
'dom::bindings::utils::{finalize_global, find_enum_string_index, generic_getter}',
'dom::bindings::utils::{generic_lenient_getter, generic_lenient_setter}',
'dom::bindings::utils::{generic_method, generic_setter, get_array_index_from_id}',
'dom::bindings::utils::{get_dictionary_property, get_property_on_prototype}',
'dom::bindings::utils::{get_proto_or_iface_array, has_property_on_prototype}',
'dom::bindings::utils::{is_platform_object, set_dictionary_property}',
'dom::bindings::utils::{throwing_constructor, trace_global}',
'dom::bindings::utils::ConstantVal::{IntVal, UintVal}',
'dom::bindings::trace::{JSTraceable, RootedTraceable}',
'dom::bindings::callback::{CallbackContainer,CallbackInterface,CallbackFunction}',
'dom::bindings::callback::{CallSetup,ExceptionHandling}',
'dom::bindings::callback::wrap_call_this_object',
'dom::bindings::conversions::{ConversionBehavior, DOM_OBJECT_SLOT, IDLInterface}',
'dom::bindings::conversions::{FromJSValConvertible, StringificationBehavior}',
'dom::bindings::conversions::{ToJSValConvertible, jsid_to_str, native_from_handlevalue}',
'dom::bindings::conversions::{native_from_object, private_from_object, root_from_object}',
'dom::bindings::conversions::{root_from_handleobject, root_from_handlevalue}',
'dom::bindings::codegen::{PrototypeList, RegisterBindings, UnionTypes}',
'dom::bindings::codegen::Bindings::*',
'dom::bindings::error::{Fallible, Error, ErrorResult}',
'dom::bindings::error::Error::JSFailed',
'dom::bindings::error::throw_dom_exception',
'dom::bindings::proxyhandler',
'dom::bindings::proxyhandler::{ensure_expando_object, fill_property_descriptor}',
'dom::bindings::proxyhandler::{get_expando_object, get_property_descriptor}',
'dom::bindings::num::Finite',
'dom::bindings::str::ByteString',
'dom::bindings::str::USVString',
'dom::bindings::trace::RootedVec',
'dom::bindings::weakref::{DOM_WEAK_SLOT, WeakBox, WeakReferenceable}',
'mem::heap_size_of_raw_self_and_children',
'libc',
'util::str::DOMString',
'std::borrow::ToOwned',
'std::cmp',
'std::mem',
'std::num',
'std::ptr',
'std::str',
'std::rc',
'std::rc::Rc',
'std::default::Default',
'std::ffi::CString',
])
# Add the auto-generated comment.
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
# Store the final result.
self.root = curr
def define(self):
if not self.root:
return None
return stripTrailingWhitespace(self.root.define())
def argument_type(descriptorProvider, ty, optional=False, defaultValue=None, variadic=False):
info = getJSToNativeConversionInfo(
ty, descriptorProvider, isArgument=True)
declType = info.declType
if variadic:
if ty.isGeckoInterface():
declType = CGWrapper(declType, pre="&[", post="]")
else:
declType = CGWrapper(declType, pre="Vec<", post=">")
elif optional and not defaultValue:
declType = CGWrapper(declType, pre="Option<", post=">")
if ty.isDictionary():
declType = CGWrapper(declType, pre="&")
return declType.define()
def method_arguments(descriptorProvider, returnType, arguments, passJSBits=True, trailing=None):
if needCx(returnType, arguments, passJSBits):
yield "cx", "*mut JSContext"
for argument in arguments:
ty = argument_type(descriptorProvider, argument.type, argument.optional,
argument.defaultValue, argument.variadic)
yield CGDictionary.makeMemberName(argument.identifier.name), ty
if trailing:
yield trailing
def return_type(descriptorProvider, rettype, infallible):
result = getRetvalDeclarationForType(rettype, descriptorProvider)
if not infallible:
result = CGWrapper(result, pre="Fallible<", post=">")
return result.define()
class CGNativeMember(ClassMethod):
def __init__(self, descriptorProvider, member, name, signature, extendedAttrs,
breakAfter=True, passJSBitsAsNeeded=True, visibility="public"):
"""
If passJSBitsAsNeeded is false, we don't automatically pass in a
JSContext* or a JSObject* based on the return and argument types.
"""
self.descriptorProvider = descriptorProvider
self.member = member
self.extendedAttrs = extendedAttrs
self.passJSBitsAsNeeded = passJSBitsAsNeeded
breakAfterSelf = "\n" if breakAfter else ""
ClassMethod.__init__(self, name,
self.getReturnType(signature[0]),
self.getArgs(signature[0], signature[1]),
static=member.isStatic(),
# Mark our getters, which are attrs that
# have a non-void return type, as const.
const=(not member.isStatic() and member.isAttr() and
not signature[0].isVoid()),
breakAfterSelf=breakAfterSelf,
visibility=visibility)
def getReturnType(self, type):
infallible = 'infallible' in self.extendedAttrs
typeDecl = return_type(self.descriptorProvider, type, infallible)
return typeDecl
def getArgs(self, returnType, argList):
return [Argument(arg[1], arg[0]) for arg in method_arguments(self.descriptorProvider,
returnType,
argList,
self.passJSBitsAsNeeded)]
class CGCallback(CGClass):
def __init__(self, idlObject, descriptorProvider, baseName, methods,
getters=[], setters=[]):
self.baseName = baseName
self._deps = idlObject.getDeps()
name = idlObject.identifier.name
# For our public methods that needThisHandling we want most of the
# same args and the same return type as what CallbackMember
# generates. So we want to take advantage of all its
# CGNativeMember infrastructure, but that infrastructure can't deal
# with templates and most especially template arguments. So just
# cheat and have CallbackMember compute all those things for us.
realMethods = []
for method in methods:
if not method.needThisHandling:
realMethods.append(method)
else:
realMethods.extend(self.getMethodImpls(method))
CGClass.__init__(self, name,
bases=[ClassBase(baseName)],
constructors=self.getConstructors(),
methods=realMethods + getters + setters,
decorators="#[derive(JSTraceable, PartialEq)]")
def getConstructors(self):
return [ClassConstructor(
[Argument("*mut JSObject", "aCallback")],
bodyInHeader=True,
visibility="pub",
explicit=False,
baseConstructors=[
"%s::new()" % self.baseName
])]
def getMethodImpls(self, method):
assert method.needThisHandling
args = list(method.args)
# Strip out the JSContext*/JSObject* args
# that got added.
assert args[0].name == "cx" and args[0].argType == "*mut JSContext"
assert args[1].name == "aThisObj" and args[1].argType == "HandleObject"
args = args[2:]
# Record the names of all the arguments, so we can use them when we call
# the private method.
argnames = [arg.name for arg in args]
argnamesWithThis = ["s.get_context()", "thisObjJS.handle()"] + argnames
argnamesWithoutThis = ["s.get_context()", "thisObjJS.handle()"] + argnames
# Now that we've recorded the argnames for our call to our private
# method, insert our optional argument for deciding whether the
# CallSetup should re-throw exceptions on aRv.
args.append(Argument("ExceptionHandling", "aExceptionHandling",
"ReportExceptions"))
# And now insert our template argument.
argsWithoutThis = list(args)
args.insert(0, Argument("&T", "thisObj"))
# And the self argument
method.args.insert(0, Argument(None, "&self"))
args.insert(0, Argument(None, "&self"))
argsWithoutThis.insert(0, Argument(None, "&self"))
setupCall = ("let s = CallSetup::new(self, aExceptionHandling);\n"
"if s.get_context().is_null() {\n"
" return Err(JSFailed);\n"
"}\n")
bodyWithThis = string.Template(
setupCall +
"let mut thisObjJS = RootedObject::new(s.get_context(), ptr::null_mut());\n"
"wrap_call_this_object(s.get_context(), thisObj, thisObjJS.handle_mut());\n"
"if thisObjJS.ptr.is_null() {\n"
" return Err(JSFailed);\n"
"}\n"
"return ${methodName}(${callArgs});").substitute({
"callArgs": ", ".join(argnamesWithThis),
"methodName": 'self.' + method.name,
})
bodyWithoutThis = string.Template(
setupCall +
"let thisObjJS = RootedObject::new(s.get_context(), ptr::null_mut());"
"return ${methodName}(${callArgs});").substitute({
"callArgs": ", ".join(argnamesWithoutThis),
"methodName": 'self.' + method.name,
})
return [ClassMethod(method.name + '_', method.returnType, args,
bodyInHeader=True,
templateArgs=["T: Reflectable"],
body=bodyWithThis,
visibility='pub'),
ClassMethod(method.name + '__', method.returnType, argsWithoutThis,
bodyInHeader=True,
body=bodyWithoutThis,
visibility='pub'),
method]
def deps(self):
return self._deps
# We're always fallible
def callbackGetterName(attr, descriptor):
return "Get" + MakeNativeName(
descriptor.binaryNameFor(attr.identifier.name))
def callbackSetterName(attr, descriptor):
return "Set" + MakeNativeName(
descriptor.binaryNameFor(attr.identifier.name))
class CGCallbackFunction(CGCallback):
def __init__(self, callback, descriptorProvider):
CGCallback.__init__(self, callback, descriptorProvider,
"CallbackFunction",
methods=[CallCallback(callback, descriptorProvider)])
def getConstructors(self):
return CGCallback.getConstructors(self)
class CGCallbackFunctionImpl(CGGeneric):
def __init__(self, callback):
impl = string.Template("""\
impl CallbackContainer for ${type} {
fn new(callback: *mut JSObject) -> Rc<${type}> {
${type}::new(callback)
}
fn callback(&self) -> *mut JSObject {
self.parent.callback()
}
}
impl ToJSValConvertible for ${type} {
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
self.callback().to_jsval(cx, rval);
}
}\
""").substitute({"type": callback.identifier.name})
CGGeneric.__init__(self, impl)
class CGCallbackInterface(CGCallback):
def __init__(self, descriptor):
iface = descriptor.interface
attrs = [m for m in iface.members if m.isAttr() and not m.isStatic()]
getters = [CallbackGetter(a, descriptor) for a in attrs]
setters = [CallbackSetter(a, descriptor) for a in attrs
if not a.readonly]
methods = [m for m in iface.members
if m.isMethod() and not m.isStatic() and not m.isIdentifierLess()]
methods = [CallbackOperation(m, sig, descriptor) for m in methods
for sig in m.signatures()]
assert not iface.isJSImplemented() or not iface.ctor()
CGCallback.__init__(self, iface, descriptor, "CallbackInterface",
methods, getters=getters, setters=setters)
class FakeMember():
def __init__(self):
self.treatNullAs = "Default"
def isStatic(self):
return False
def isAttr(self):
return False
def isMethod(self):
return False
def getExtendedAttribute(self, name):
return None
class CallbackMember(CGNativeMember):
def __init__(self, sig, name, descriptorProvider, needThisHandling):
"""
needThisHandling is True if we need to be able to accept a specified
thisObj, False otherwise.
"""
self.retvalType = sig[0]
self.originalSig = sig
args = sig[1]
self.argCount = len(args)
if self.argCount > 0:
# Check for variadic arguments
lastArg = args[self.argCount - 1]
if lastArg.variadic:
self.argCountStr = (
"(%d - 1) + %s.len()" % (self.argCount,
lastArg.identifier.name))
else:
self.argCountStr = "%d" % self.argCount
self.needThisHandling = needThisHandling
# If needThisHandling, we generate ourselves as private and the caller
# will handle generating public versions that handle the "this" stuff.
visibility = "priv" if needThisHandling else "pub"
# We don't care, for callback codegen, whether our original member was
# a method or attribute or whatnot. Just always pass FakeMember()
# here.
CGNativeMember.__init__(self, descriptorProvider, FakeMember(),
name, (self.retvalType, args),
extendedAttrs={},
passJSBitsAsNeeded=False,
visibility=visibility)
# We have to do all the generation of our body now, because
# the caller relies on us throwing if we can't manage it.
self.exceptionCode = "return Err(JSFailed);"
self.body = self.getImpl()
def getImpl(self):
replacements = {
"declRval": self.getRvalDecl(),
"returnResult": self.getResultConversion(),
"convertArgs": self.getArgConversions(),
"doCall": self.getCall(),
"setupCall": self.getCallSetup(),
}
if self.argCount > 0:
replacements["argCount"] = self.argCountStr
replacements["argvDecl"] = string.Template(
"let mut argv = vec![UndefinedValue(); ${argCount}];\n"
).substitute(replacements)
else:
# Avoid weird 0-sized arrays
replacements["argvDecl"] = ""
# Newlines and semicolons are in the values
pre = string.Template(
"${setupCall}"
"${declRval}"
"${argvDecl}").substitute(replacements)
body = string.Template(
"${convertArgs}"
"${doCall}"
"${returnResult}").substitute(replacements)
return CGWrapper(CGIndenter(CGList([
CGGeneric(pre),
CGGeneric(body),
], "\n"), 4), pre="unsafe {\n", post="\n}").define()
def getResultConversion(self):
replacements = {
"val": "rval.handle()",
}
info = getJSToNativeConversionInfo(
self.retvalType,
self.descriptorProvider,
exceptionCode=self.exceptionCode,
isCallbackReturnValue="Callback",
# XXXbz we should try to do better here
sourceDescription="return value")
template = info.template
declType = info.declType
convertType = instantiateJSToNativeConversionTemplate(
template, replacements, declType, "rvalDecl")
if self.retvalType is None or self.retvalType.isVoid():
retval = "()"
elif self.retvalType.isAny():
retval = "rvalDecl.get()"
else:
retval = "rvalDecl"
return "%s\nOk(%s)\n" % (convertType.define(), retval)
def getArgConversions(self):
# Just reget the arglist from self.originalSig, because our superclasses
# just have way to many members they like to clobber, so I can't find a
# safe member name to store it in.
argConversions = [self.getArgConversion(i, arg) for (i, arg)
in enumerate(self.originalSig[1])]
# Do them back to front, so our argc modifications will work
# correctly, because we examine trailing arguments first.
argConversions.reverse()
argConversions = [CGGeneric(c) for c in argConversions]
if self.argCount > 0:
argConversions.insert(0, self.getArgcDecl())
# And slap them together.
return CGList(argConversions, "\n\n").define() + "\n\n"
def getArgConversion(self, i, arg):
argval = arg.identifier.name
if arg.variadic:
argval = argval + "[idx].get()"
jsvalIndex = "%d + idx" % i
else:
jsvalIndex = "%d" % i
if arg.optional and not arg.defaultValue:
argval += ".clone().unwrap()"
conversion = wrapForType(
"argv_root.handle_mut()", result=argval,
successCode="argv[%s] = argv_root.ptr;" % jsvalIndex,
pre="let mut argv_root = RootedValue::new(cx, UndefinedValue());")
if arg.variadic:
conversion = string.Template(
"for idx in 0..${arg}.len() {\n" +
CGIndenter(CGGeneric(conversion)).define() + "\n"
"}"
).substitute({"arg": arg.identifier.name})
elif arg.optional and not arg.defaultValue:
conversion = (
CGIfWrapper("%s.is_some()" % arg.identifier.name,
CGGeneric(conversion)).define() +
" else if argc == %d {\n"
" // This is our current trailing argument; reduce argc\n"
" argc -= 1;\n"
"} else {\n"
" argv[%d] = UndefinedValue();\n"
"}" % (i + 1, i))
return conversion
def getArgs(self, returnType, argList):
args = CGNativeMember.getArgs(self, returnType, argList)
if not self.needThisHandling:
# Since we don't need this handling, we're the actual method that
# will be called, so we need an aRethrowExceptions argument.
args.append(Argument("ExceptionHandling", "aExceptionHandling",
"ReportExceptions"))
return args
# We want to allow the caller to pass in a "this" object, as
# well as a JSContext.
return [Argument("*mut JSContext", "cx"),
Argument("HandleObject", "aThisObj")] + args
def getCallSetup(self):
if self.needThisHandling:
# It's been done for us already
return ""
return (
"CallSetup s(CallbackPreserveColor(), aRv, aExceptionHandling);\n"
"JSContext* cx = s.get_context();\n"
"if (!cx) {\n"
" return Err(JSFailed);\n"
"}\n")
def getArgcDecl(self):
if self.argCount <= 1:
return CGGeneric("let argc = %s;" % self.argCountStr)
return CGGeneric("let mut argc = %s;" % self.argCountStr)
@staticmethod
def ensureASCIIName(idlObject):
type = "attribute" if idlObject.isAttr() else "operation"
if re.match("[^\x20-\x7E]", idlObject.identifier.name):
raise SyntaxError('Callback %s name "%s" contains non-ASCII '
"characters. We can't handle that. %s" %
(type, idlObject.identifier.name,
idlObject.location))
if re.match('"', idlObject.identifier.name):
raise SyntaxError("Callback %s name '%s' contains "
"double-quote character. We can't handle "
"that. %s" %
(type, idlObject.identifier.name,
idlObject.location))
class CallbackMethod(CallbackMember):
def __init__(self, sig, name, descriptorProvider, needThisHandling):
CallbackMember.__init__(self, sig, name, descriptorProvider,
needThisHandling)
def getRvalDecl(self):
return "let mut rval = RootedValue::new(cx, UndefinedValue());\n"
def getCall(self):
replacements = {
"thisObj": self.getThisObj(),
"getCallable": self.getCallableDecl()
}
if self.argCount > 0:
replacements["argv"] = "argv.as_ptr()"
replacements["argc"] = "argc"
else:
replacements["argv"] = "ptr::null_mut()"
replacements["argc"] = "0"
return string.Template(
"${getCallable}"
"let rootedThis = RootedObject::new(cx, ${thisObj});\n"
"let ok = JS_CallFunctionValue(\n"
" cx, rootedThis.handle(), callable.handle(),\n"
" &HandleValueArray {\n"
" length_: ${argc} as ::libc::size_t,\n"
" elements_: ${argv}\n"
" }, rval.handle_mut());\n"
"if !ok {\n"
" return Err(JSFailed);\n"
"}\n").substitute(replacements)
class CallCallback(CallbackMethod):
def __init__(self, callback, descriptorProvider):
CallbackMethod.__init__(self, callback.signatures()[0], "Call",
descriptorProvider, needThisHandling=True)
def getThisObj(self):
return "aThisObj.get()"
def getCallableDecl(self):
return "let callable = RootedValue::new(cx, ObjectValue(&*self.parent.callback()));\n"
class CallbackOperationBase(CallbackMethod):
"""
Common class for implementing various callback operations.
"""
def __init__(self, signature, jsName, nativeName, descriptor, singleOperation):
self.singleOperation = singleOperation
self.methodName = jsName
CallbackMethod.__init__(self, signature, nativeName, descriptor, singleOperation)
def getThisObj(self):
if not self.singleOperation:
return "self.parent.callback()"
# This relies on getCallableDecl declaring a boolean
# isCallable in the case when we're a single-operation
# interface.
return "if isCallable { aThisObj.get() } else { self.parent.callback() }"
def getCallableDecl(self):
replacements = {
"methodName": self.methodName
}
getCallableFromProp = string.Template(
'RootedValue::new(cx, try!(self.parent.get_callable_property(cx, "${methodName}")))'
).substitute(replacements)
if not self.singleOperation:
return 'JS::Rooted<JS::Value> callable(cx);\n' + getCallableFromProp
return (
'let isCallable = IsCallable(self.parent.callback());\n'
'let callable =\n' +
CGIndenter(
CGIfElseWrapper('isCallable',
CGGeneric('RootedValue::new(cx, ObjectValue(&*self.parent.callback()))'),
CGGeneric(getCallableFromProp))).define() + ';\n')
class CallbackOperation(CallbackOperationBase):
"""
Codegen actual WebIDL operations on callback interfaces.
"""
def __init__(self, method, signature, descriptor):
self.ensureASCIIName(method)
jsName = method.identifier.name
CallbackOperationBase.__init__(self, signature,
jsName,
MakeNativeName(descriptor.binaryNameFor(jsName)),
descriptor, descriptor.interface.isSingleOperationInterface())
class CallbackGetter(CallbackMember):
def __init__(self, attr, descriptor):
self.ensureASCIIName(attr)
self.attrName = attr.identifier.name
CallbackMember.__init__(self,
(attr.type, []),
callbackGetterName(attr),
descriptor,
needThisHandling=False)
def getRvalDecl(self):
return "JS::Rooted<JS::Value> rval(cx, JS::UndefinedValue());\n"
def getCall(self):
replacements = {
"attrName": self.attrName
}
return string.Template(
'if (!JS_GetProperty(cx, mCallback, "${attrName}", &rval)) {\n'
' return Err(JSFailed);\n'
'}\n').substitute(replacements)
class CallbackSetter(CallbackMember):
def __init__(self, attr, descriptor):
self.ensureASCIIName(attr)
self.attrName = attr.identifier.name
CallbackMember.__init__(self,
(BuiltinTypes[IDLBuiltinType.Types.void],
[FakeArgument(attr.type, attr)]),
callbackSetterName(attr),
descriptor,
needThisHandling=False)
def getRvalDecl(self):
# We don't need an rval
return ""
def getCall(self):
replacements = {
"attrName": self.attrName,
"argv": "argv.handleAt(0)",
}
return string.Template(
'MOZ_ASSERT(argv.length() == 1);\n'
'if (!JS_SetProperty(cx, mCallback, "${attrName}", ${argv})) {\n'
' return Err(JSFailed);\n'
'}\n').substitute(replacements)
def getArgcDecl(self):
return None
class GlobalGenRoots():
"""
Roots for global codegen.
To generate code, call the method associated with the target, and then
call the appropriate define/declare method.
"""
@staticmethod
def PrototypeList(config):
# Prototype ID enum.
interfaces = config.getDescriptors(isCallback=False)
protos = [d.name for d in interfaces]
constructors = [d.name for d in interfaces if d.hasDescendants()]
proxies = [d.name for d in config.getDescriptors(proxy=True)]
return CGList([
CGGeneric(AUTOGENERATED_WARNING_COMMENT),
CGGeneric("pub const PROTO_OR_IFACE_LENGTH: usize = %d;\n" % (len(protos) + len(constructors))),
CGGeneric("pub const MAX_PROTO_CHAIN_LENGTH: usize = %d;\n\n" % config.maxProtoChainLength),
CGNonNamespacedEnum('ID', protos, 0, deriving="PartialEq, Copy, Clone", repr="u16"),
CGNonNamespacedEnum('Constructor', constructors, len(protos),
deriving="PartialEq, Copy, Clone", repr="u16"),
CGWrapper(CGIndenter(CGList([CGGeneric('"' + name + '"') for name in protos],
",\n"),
indentLevel=4),
pre="static INTERFACES: [&'static str; %d] = [\n" % len(protos),
post="\n];\n\n"),
CGGeneric("pub fn proto_id_to_name(proto_id: u16) -> &'static str {\n"
" debug_assert!(proto_id < ID::Last as u16);\n"
" INTERFACES[proto_id as usize]\n"
"}\n\n"),
CGNonNamespacedEnum('Proxies', proxies, 0, deriving="PartialEq, Copy, Clone"),
])
@staticmethod
def RegisterBindings(config):
# TODO - Generate the methods we want
code = CGList([
CGRegisterProtos(config),
CGRegisterProxyHandlers(config),
], "\n")
return CGImports(code, [], [], [
'dom::bindings::codegen',
'dom::bindings::codegen::PrototypeList::Proxies',
'js::jsapi::JSContext',
'js::jsapi::HandleObject',
'libc',
], ignored_warnings=[])
@staticmethod
def InterfaceTypes(config):
descriptors = [d.name for d in config.getDescriptors(register=True, isCallback=False)]
curr = CGList([CGGeneric("pub use dom::%s::%s;\n" % (name.lower(), name)) for name in descriptors])
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
return curr
@staticmethod
def Bindings(config):
descriptors = (set(d.name + "Binding" for d in config.getDescriptors(register=True)) |
set(getModuleFromObject(d) for d in config.callbacks) |
set(getModuleFromObject(d) for d in config.getDictionaries()))
curr = CGList([CGGeneric("pub mod %s;\n" % name) for name in sorted(descriptors)])
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
return curr
@staticmethod
def InheritTypes(config):
descriptors = config.getDescriptors(register=True, isCallback=False)
imports = [CGGeneric("use dom::types::*;\n"),
CGGeneric("use dom::bindings::conversions::{DerivedFrom, get_dom_class};\n"),
CGGeneric("use dom::bindings::inheritance::Castable;\n"),
CGGeneric("use dom::bindings::js::{JS, LayoutJS, Root};\n"),
CGGeneric("use dom::bindings::trace::JSTraceable;\n"),
CGGeneric("use dom::bindings::reflector::Reflectable;\n"),
CGGeneric("use js::jsapi::JSTracer;\n\n"),
CGGeneric("use std::mem;\n\n")]
allprotos = []
topTypes = []
hierarchy = defaultdict(list)
for descriptor in descriptors:
name = descriptor.name
chain = descriptor.prototypeChain
upcast = descriptor.hasDescendants()
downcast = len(chain) != 1
if upcast and not downcast:
topTypes.append(name)
if not upcast:
# No other interface will implement DeriveFrom<Foo> for this Foo, so avoid
# implementing it for itself.
chain = chain[:-1]
# Implement `DerivedFrom<Bar>` for `Foo`, for all `Bar` that `Foo` inherits from.
if chain:
allprotos.append(CGGeneric("impl Castable for %s {}\n" % name))
for baseName in chain:
allprotos.append(CGGeneric("impl DerivedFrom<%s> for %s {}\n" % (baseName, name)))
if chain:
allprotos.append(CGGeneric("\n"))
if downcast:
hierarchy[descriptor.getParentName()].append(name)
typeIdCode = []
topTypeVariants = [
("ID used by abstract interfaces.", "Abstract"),
("ID used by interfaces that are not castable.", "Alone"),
]
topTypeVariants += [
("ID used by interfaces that derive from %s." % typeName, "%s(%sTypeId)" % (typeName, typeName))
for typeName in topTypes
]
topTypeVariantsAsStrings = [CGGeneric("/// %s\n%s," % variant) for variant in topTypeVariants]
typeIdCode.append(CGWrapper(CGIndenter(CGList(topTypeVariantsAsStrings, "\n"), 4),
pre="#[derive(Clone, Copy, Debug)]\npub enum TopTypeId {\n",
post="\n}\n\n"))
def type_id_variant(name):
# If `name` is present in the hierarchy keys', that means some other interfaces
# derive from it and this enum variant should have an argument with its own
# TypeId enum.
return "%s(%sTypeId)" % (name, name) if name in hierarchy else name
for base, derived in hierarchy.iteritems():
variants = []
if not config.getInterface(base).getExtendedAttribute("Abstract"):
variants.append(CGGeneric(base))
variants += [CGGeneric(type_id_variant(derivedName)) for derivedName in derived]
derives = "Clone, Copy, Debug"
if base != 'EventTarget' and base != 'HTMLElement':
derives += ", PartialEq"
typeIdCode.append(CGWrapper(CGIndenter(CGList(variants, ",\n"), 4),
pre="#[derive(%s)]\npub enum %sTypeId {\n" % (derives, base),
post="\n}\n\n"))
if base in topTypes:
typeIdCode.append(CGGeneric("""\
impl %(base)s {
pub fn type_id(&self) -> &'static %(base)sTypeId {
let domclass = unsafe {
get_dom_class(self.reflector().get_jsobject().get()).unwrap()
};
match domclass.type_id {
TopTypeId::%(base)s(ref type_id) => type_id,
_ => unreachable!(),
}
}
}
""" % {'base': base}))
curr = CGList(imports + typeIdCode + allprotos)
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
return curr
@staticmethod
def UnionTypes(config):
curr = UnionTypes(config.getDescriptors(),
config.getDictionaries(),
config.getCallbacks(),
config)
# Add the auto-generated comment.
curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
# Done.
return curr
| mpl-2.0 | 7,539,845,005,135,834,000 | 38.54212 | 119 | 0.575087 | false |
rwl/puddle | puddle/resource/action/properties_action.py | 1 | 3749 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Defines an action for viewing resource properties.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from enthought.io.api import File
from enthought.traits.api import Bool, Instance
from enthought.traits.ui.api import View, Item, Group
from enthought.pyface.action.api import Action
#------------------------------------------------------------------------------
# "PropertiesAction" class:
#------------------------------------------------------------------------------
class PropertiesAction(Action):
""" Defines an action for viewing resource properties.
"""
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
# The action"s name (displayed on menus/tool bar tools etc):
name = "P&roperties"
# Keyboard accelerator:
accelerator = "Alt+Enter"
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
def perform(self, event):
""" Perform the action.
"""
selections = self.window.selection
if selections:
selection = selections[0]
if isinstance(selection, File):
selection.edit_traits( parent=self.window.control,
view=self._create_resource_view(selection),
kind="livemodal" )
def _create_resource_view(self, selection):
""" Creates a resource view.
"""
resource_view = View(
Item(name="absolute_path", style="readonly"),
# FIXME: Readonly boolean editor is just blank
# Item(name="exists", style="readonly"),
# Item(name="is_file", style="readonly"),
# Item(name="is_folder", style="readonly"),
# Item(name="is_package", style="readonly"),
# Item(name="is_readonly", style="readonly"),
Item(name="mime_type", style="readonly"),
Item(name="url", style="readonly"),
title="Properties for %s" % selection.name+selection.ext,
icon=self.window.application.icon)
return resource_view
# EOF -------------------------------------------------------------------------
| mit | -6,371,690,640,377,739,000 | 41.123596 | 79 | 0.51107 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.